at master 5.9 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * RISC-V processor specific defines 4 * 5 * Copyright (C) 2021 Western Digital Corporation or its affiliates. 6 */ 7#ifndef SELFTEST_KVM_PROCESSOR_H 8#define SELFTEST_KVM_PROCESSOR_H 9 10#include <linux/stringify.h> 11#include <asm/csr.h> 12#include <asm/vdso/processor.h> 13#include "kvm_util.h" 14 15#define INSN_OPCODE_MASK 0x007c 16#define INSN_OPCODE_SHIFT 2 17#define INSN_OPCODE_SYSTEM 28 18 19#define INSN_MASK_FUNCT3 0x7000 20#define INSN_SHIFT_FUNCT3 12 21 22#define INSN_CSR_MASK 0xfff00000 23#define INSN_CSR_SHIFT 20 24 25#define GET_RM(insn) (((insn) & INSN_MASK_FUNCT3) >> INSN_SHIFT_FUNCT3) 26#define GET_CSR_NUM(insn) (((insn) & INSN_CSR_MASK) >> INSN_CSR_SHIFT) 27 28static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype, 29 uint64_t idx, uint64_t size) 30{ 31 return KVM_REG_RISCV | type | subtype | idx | size; 32} 33 34#if __riscv_xlen == 64 35#define KVM_REG_SIZE_ULONG KVM_REG_SIZE_U64 36#else 37#define KVM_REG_SIZE_ULONG KVM_REG_SIZE_U32 38#endif 39 40#define RISCV_CONFIG_REG(name) __kvm_reg_id(KVM_REG_RISCV_CONFIG, 0, \ 41 KVM_REG_RISCV_CONFIG_REG(name), \ 42 KVM_REG_SIZE_ULONG) 43 44#define RISCV_CORE_REG(name) __kvm_reg_id(KVM_REG_RISCV_CORE, 0, \ 45 KVM_REG_RISCV_CORE_REG(name), \ 46 KVM_REG_SIZE_ULONG) 47 48#define RISCV_GENERAL_CSR_REG(name) __kvm_reg_id(KVM_REG_RISCV_CSR, \ 49 KVM_REG_RISCV_CSR_GENERAL, \ 50 KVM_REG_RISCV_CSR_REG(name), \ 51 KVM_REG_SIZE_ULONG) 52 53#define RISCV_TIMER_REG(name) __kvm_reg_id(KVM_REG_RISCV_TIMER, 0, \ 54 KVM_REG_RISCV_TIMER_REG(name), \ 55 KVM_REG_SIZE_U64) 56 57#define RISCV_ISA_EXT_REG(idx) __kvm_reg_id(KVM_REG_RISCV_ISA_EXT, \ 58 KVM_REG_RISCV_ISA_SINGLE, \ 59 idx, KVM_REG_SIZE_ULONG) 60 61#define RISCV_SBI_EXT_REG(idx) __kvm_reg_id(KVM_REG_RISCV_SBI_EXT, \ 62 KVM_REG_RISCV_SBI_SINGLE, \ 63 idx, KVM_REG_SIZE_ULONG) 64 65bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext); 66 67static inline bool __vcpu_has_isa_ext(struct kvm_vcpu *vcpu, uint64_t isa_ext) 68{ 69 return __vcpu_has_ext(vcpu, RISCV_ISA_EXT_REG(isa_ext)); 70} 71 72static inline bool __vcpu_has_sbi_ext(struct kvm_vcpu *vcpu, uint64_t sbi_ext) 73{ 74 return __vcpu_has_ext(vcpu, RISCV_SBI_EXT_REG(sbi_ext)); 75} 76 77struct pt_regs { 78 unsigned long epc; 79 unsigned long ra; 80 unsigned long sp; 81 unsigned long gp; 82 unsigned long tp; 83 unsigned long t0; 84 unsigned long t1; 85 unsigned long t2; 86 unsigned long s0; 87 unsigned long s1; 88 unsigned long a0; 89 unsigned long a1; 90 unsigned long a2; 91 unsigned long a3; 92 unsigned long a4; 93 unsigned long a5; 94 unsigned long a6; 95 unsigned long a7; 96 unsigned long s2; 97 unsigned long s3; 98 unsigned long s4; 99 unsigned long s5; 100 unsigned long s6; 101 unsigned long s7; 102 unsigned long s8; 103 unsigned long s9; 104 unsigned long s10; 105 unsigned long s11; 106 unsigned long t3; 107 unsigned long t4; 108 unsigned long t5; 109 unsigned long t6; 110 /* Supervisor/Machine CSRs */ 111 unsigned long status; 112 unsigned long badaddr; 113 unsigned long cause; 114 /* a0 value before the syscall */ 115 unsigned long orig_a0; 116}; 117 118#define NR_VECTORS 2 119#define NR_EXCEPTIONS 32 120#define EC_MASK (NR_EXCEPTIONS - 1) 121 122typedef void(*exception_handler_fn)(struct pt_regs *); 123 124void vm_init_vector_tables(struct kvm_vm *vm); 125void vcpu_init_vector_tables(struct kvm_vcpu *vcpu); 126 127void vm_install_exception_handler(struct kvm_vm *vm, int vector, exception_handler_fn handler); 128 129void vm_install_interrupt_handler(struct kvm_vm *vm, exception_handler_fn handler); 130 131/* L3 index Bit[47:39] */ 132#define PGTBL_L3_INDEX_MASK 0x0000FF8000000000ULL 133#define PGTBL_L3_INDEX_SHIFT 39 134#define PGTBL_L3_BLOCK_SHIFT 39 135#define PGTBL_L3_BLOCK_SIZE 0x0000008000000000ULL 136#define PGTBL_L3_MAP_MASK (~(PGTBL_L3_BLOCK_SIZE - 1)) 137/* L2 index Bit[38:30] */ 138#define PGTBL_L2_INDEX_MASK 0x0000007FC0000000ULL 139#define PGTBL_L2_INDEX_SHIFT 30 140#define PGTBL_L2_BLOCK_SHIFT 30 141#define PGTBL_L2_BLOCK_SIZE 0x0000000040000000ULL 142#define PGTBL_L2_MAP_MASK (~(PGTBL_L2_BLOCK_SIZE - 1)) 143/* L1 index Bit[29:21] */ 144#define PGTBL_L1_INDEX_MASK 0x000000003FE00000ULL 145#define PGTBL_L1_INDEX_SHIFT 21 146#define PGTBL_L1_BLOCK_SHIFT 21 147#define PGTBL_L1_BLOCK_SIZE 0x0000000000200000ULL 148#define PGTBL_L1_MAP_MASK (~(PGTBL_L1_BLOCK_SIZE - 1)) 149/* L0 index Bit[20:12] */ 150#define PGTBL_L0_INDEX_MASK 0x00000000001FF000ULL 151#define PGTBL_L0_INDEX_SHIFT 12 152#define PGTBL_L0_BLOCK_SHIFT 12 153#define PGTBL_L0_BLOCK_SIZE 0x0000000000001000ULL 154#define PGTBL_L0_MAP_MASK (~(PGTBL_L0_BLOCK_SIZE - 1)) 155 156#define PGTBL_PTE_ADDR_MASK 0x003FFFFFFFFFFC00ULL 157#define PGTBL_PTE_ADDR_SHIFT 10 158#define PGTBL_PTE_RSW_MASK 0x0000000000000300ULL 159#define PGTBL_PTE_RSW_SHIFT 8 160#define PGTBL_PTE_DIRTY_MASK 0x0000000000000080ULL 161#define PGTBL_PTE_DIRTY_SHIFT 7 162#define PGTBL_PTE_ACCESSED_MASK 0x0000000000000040ULL 163#define PGTBL_PTE_ACCESSED_SHIFT 6 164#define PGTBL_PTE_GLOBAL_MASK 0x0000000000000020ULL 165#define PGTBL_PTE_GLOBAL_SHIFT 5 166#define PGTBL_PTE_USER_MASK 0x0000000000000010ULL 167#define PGTBL_PTE_USER_SHIFT 4 168#define PGTBL_PTE_EXECUTE_MASK 0x0000000000000008ULL 169#define PGTBL_PTE_EXECUTE_SHIFT 3 170#define PGTBL_PTE_WRITE_MASK 0x0000000000000004ULL 171#define PGTBL_PTE_WRITE_SHIFT 2 172#define PGTBL_PTE_READ_MASK 0x0000000000000002ULL 173#define PGTBL_PTE_READ_SHIFT 1 174#define PGTBL_PTE_PERM_MASK (PGTBL_PTE_ACCESSED_MASK | \ 175 PGTBL_PTE_DIRTY_MASK | \ 176 PGTBL_PTE_EXECUTE_MASK | \ 177 PGTBL_PTE_WRITE_MASK | \ 178 PGTBL_PTE_READ_MASK) 179#define PGTBL_PTE_VALID_MASK 0x0000000000000001ULL 180#define PGTBL_PTE_VALID_SHIFT 0 181 182#define PGTBL_PAGE_SIZE PGTBL_L0_BLOCK_SIZE 183#define PGTBL_PAGE_SIZE_SHIFT PGTBL_L0_BLOCK_SHIFT 184 185static inline void local_irq_enable(void) 186{ 187 csr_set(CSR_SSTATUS, SR_SIE); 188} 189 190static inline void local_irq_disable(void) 191{ 192 csr_clear(CSR_SSTATUS, SR_SIE); 193} 194 195#endif /* SELFTEST_KVM_PROCESSOR_H */