at v6.16 194 lines 5.8 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * RISC-V processor specific defines 4 * 5 * Copyright (C) 2021 Western Digital Corporation or its affiliates. 6 */ 7#ifndef SELFTEST_KVM_PROCESSOR_H 8#define SELFTEST_KVM_PROCESSOR_H 9 10#include <linux/stringify.h> 11#include <asm/csr.h> 12#include "kvm_util.h" 13 14#define INSN_OPCODE_MASK 0x007c 15#define INSN_OPCODE_SHIFT 2 16#define INSN_OPCODE_SYSTEM 28 17 18#define INSN_MASK_FUNCT3 0x7000 19#define INSN_SHIFT_FUNCT3 12 20 21#define INSN_CSR_MASK 0xfff00000 22#define INSN_CSR_SHIFT 20 23 24#define GET_RM(insn) (((insn) & INSN_MASK_FUNCT3) >> INSN_SHIFT_FUNCT3) 25#define GET_CSR_NUM(insn) (((insn) & INSN_CSR_MASK) >> INSN_CSR_SHIFT) 26 27static inline uint64_t __kvm_reg_id(uint64_t type, uint64_t subtype, 28 uint64_t idx, uint64_t size) 29{ 30 return KVM_REG_RISCV | type | subtype | idx | size; 31} 32 33#if __riscv_xlen == 64 34#define KVM_REG_SIZE_ULONG KVM_REG_SIZE_U64 35#else 36#define KVM_REG_SIZE_ULONG KVM_REG_SIZE_U32 37#endif 38 39#define RISCV_CONFIG_REG(name) __kvm_reg_id(KVM_REG_RISCV_CONFIG, 0, \ 40 KVM_REG_RISCV_CONFIG_REG(name), \ 41 KVM_REG_SIZE_ULONG) 42 43#define RISCV_CORE_REG(name) __kvm_reg_id(KVM_REG_RISCV_CORE, 0, \ 44 KVM_REG_RISCV_CORE_REG(name), \ 45 KVM_REG_SIZE_ULONG) 46 47#define RISCV_GENERAL_CSR_REG(name) __kvm_reg_id(KVM_REG_RISCV_CSR, \ 48 KVM_REG_RISCV_CSR_GENERAL, \ 49 KVM_REG_RISCV_CSR_REG(name), \ 50 KVM_REG_SIZE_ULONG) 51 52#define RISCV_TIMER_REG(name) __kvm_reg_id(KVM_REG_RISCV_TIMER, 0, \ 53 KVM_REG_RISCV_TIMER_REG(name), \ 54 KVM_REG_SIZE_U64) 55 56#define RISCV_ISA_EXT_REG(idx) __kvm_reg_id(KVM_REG_RISCV_ISA_EXT, \ 57 KVM_REG_RISCV_ISA_SINGLE, \ 58 idx, KVM_REG_SIZE_ULONG) 59 60#define RISCV_SBI_EXT_REG(idx) __kvm_reg_id(KVM_REG_RISCV_SBI_EXT, \ 61 KVM_REG_RISCV_SBI_SINGLE, \ 62 idx, KVM_REG_SIZE_ULONG) 63 64bool __vcpu_has_ext(struct kvm_vcpu *vcpu, uint64_t ext); 65 66static inline bool __vcpu_has_isa_ext(struct kvm_vcpu *vcpu, uint64_t isa_ext) 67{ 68 return __vcpu_has_ext(vcpu, RISCV_ISA_EXT_REG(isa_ext)); 69} 70 71static inline bool __vcpu_has_sbi_ext(struct kvm_vcpu *vcpu, uint64_t sbi_ext) 72{ 73 return __vcpu_has_ext(vcpu, RISCV_SBI_EXT_REG(sbi_ext)); 74} 75 76struct pt_regs { 77 unsigned long epc; 78 unsigned long ra; 79 unsigned long sp; 80 unsigned long gp; 81 unsigned long tp; 82 unsigned long t0; 83 unsigned long t1; 84 unsigned long t2; 85 unsigned long s0; 86 unsigned long s1; 87 unsigned long a0; 88 unsigned long a1; 89 unsigned long a2; 90 unsigned long a3; 91 unsigned long a4; 92 unsigned long a5; 93 unsigned long a6; 94 unsigned long a7; 95 unsigned long s2; 96 unsigned long s3; 97 unsigned long s4; 98 unsigned long s5; 99 unsigned long s6; 100 unsigned long s7; 101 unsigned long s8; 102 unsigned long s9; 103 unsigned long s10; 104 unsigned long s11; 105 unsigned long t3; 106 unsigned long t4; 107 unsigned long t5; 108 unsigned long t6; 109 /* Supervisor/Machine CSRs */ 110 unsigned long status; 111 unsigned long badaddr; 112 unsigned long cause; 113 /* a0 value before the syscall */ 114 unsigned long orig_a0; 115}; 116 117#define NR_VECTORS 2 118#define NR_EXCEPTIONS 32 119#define EC_MASK (NR_EXCEPTIONS - 1) 120 121typedef void(*exception_handler_fn)(struct pt_regs *); 122 123void vm_init_vector_tables(struct kvm_vm *vm); 124void vcpu_init_vector_tables(struct kvm_vcpu *vcpu); 125 126void vm_install_exception_handler(struct kvm_vm *vm, int vector, exception_handler_fn handler); 127 128void vm_install_interrupt_handler(struct kvm_vm *vm, exception_handler_fn handler); 129 130/* L3 index Bit[47:39] */ 131#define PGTBL_L3_INDEX_MASK 0x0000FF8000000000ULL 132#define PGTBL_L3_INDEX_SHIFT 39 133#define PGTBL_L3_BLOCK_SHIFT 39 134#define PGTBL_L3_BLOCK_SIZE 0x0000008000000000ULL 135#define PGTBL_L3_MAP_MASK (~(PGTBL_L3_BLOCK_SIZE - 1)) 136/* L2 index Bit[38:30] */ 137#define PGTBL_L2_INDEX_MASK 0x0000007FC0000000ULL 138#define PGTBL_L2_INDEX_SHIFT 30 139#define PGTBL_L2_BLOCK_SHIFT 30 140#define PGTBL_L2_BLOCK_SIZE 0x0000000040000000ULL 141#define PGTBL_L2_MAP_MASK (~(PGTBL_L2_BLOCK_SIZE - 1)) 142/* L1 index Bit[29:21] */ 143#define PGTBL_L1_INDEX_MASK 0x000000003FE00000ULL 144#define PGTBL_L1_INDEX_SHIFT 21 145#define PGTBL_L1_BLOCK_SHIFT 21 146#define PGTBL_L1_BLOCK_SIZE 0x0000000000200000ULL 147#define PGTBL_L1_MAP_MASK (~(PGTBL_L1_BLOCK_SIZE - 1)) 148/* L0 index Bit[20:12] */ 149#define PGTBL_L0_INDEX_MASK 0x00000000001FF000ULL 150#define PGTBL_L0_INDEX_SHIFT 12 151#define PGTBL_L0_BLOCK_SHIFT 12 152#define PGTBL_L0_BLOCK_SIZE 0x0000000000001000ULL 153#define PGTBL_L0_MAP_MASK (~(PGTBL_L0_BLOCK_SIZE - 1)) 154 155#define PGTBL_PTE_ADDR_MASK 0x003FFFFFFFFFFC00ULL 156#define PGTBL_PTE_ADDR_SHIFT 10 157#define PGTBL_PTE_RSW_MASK 0x0000000000000300ULL 158#define PGTBL_PTE_RSW_SHIFT 8 159#define PGTBL_PTE_DIRTY_MASK 0x0000000000000080ULL 160#define PGTBL_PTE_DIRTY_SHIFT 7 161#define PGTBL_PTE_ACCESSED_MASK 0x0000000000000040ULL 162#define PGTBL_PTE_ACCESSED_SHIFT 6 163#define PGTBL_PTE_GLOBAL_MASK 0x0000000000000020ULL 164#define PGTBL_PTE_GLOBAL_SHIFT 5 165#define PGTBL_PTE_USER_MASK 0x0000000000000010ULL 166#define PGTBL_PTE_USER_SHIFT 4 167#define PGTBL_PTE_EXECUTE_MASK 0x0000000000000008ULL 168#define PGTBL_PTE_EXECUTE_SHIFT 3 169#define PGTBL_PTE_WRITE_MASK 0x0000000000000004ULL 170#define PGTBL_PTE_WRITE_SHIFT 2 171#define PGTBL_PTE_READ_MASK 0x0000000000000002ULL 172#define PGTBL_PTE_READ_SHIFT 1 173#define PGTBL_PTE_PERM_MASK (PGTBL_PTE_ACCESSED_MASK | \ 174 PGTBL_PTE_DIRTY_MASK | \ 175 PGTBL_PTE_EXECUTE_MASK | \ 176 PGTBL_PTE_WRITE_MASK | \ 177 PGTBL_PTE_READ_MASK) 178#define PGTBL_PTE_VALID_MASK 0x0000000000000001ULL 179#define PGTBL_PTE_VALID_SHIFT 0 180 181#define PGTBL_PAGE_SIZE PGTBL_L0_BLOCK_SIZE 182#define PGTBL_PAGE_SIZE_SHIFT PGTBL_L0_BLOCK_SHIFT 183 184static inline void local_irq_enable(void) 185{ 186 csr_set(CSR_SSTATUS, SR_SIE); 187} 188 189static inline void local_irq_disable(void) 190{ 191 csr_clear(CSR_SSTATUS, SR_SIE); 192} 193 194#endif /* SELFTEST_KVM_PROCESSOR_H */