Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

KVM: riscv: selftests: Add sstc timer test

Add a KVM selftests to validate the Sstc timer functionality.
The test was ported from arm64 arch timer test.

Signed-off-by: Haibo Xu <haibo1.xu@intel.com>
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
Signed-off-by: Anup Patel <anup@brainfault.org>

authored by

Haibo Xu and committed by
Anup Patel
d0b94bcb 812806bd

+210 -10
+1
tools/testing/selftests/kvm/Makefile
··· 185 185 TEST_GEN_PROGS_s390x += set_memory_region_test 186 186 TEST_GEN_PROGS_s390x += kvm_binary_stats_test 187 187 188 + TEST_GEN_PROGS_riscv += arch_timer 188 189 TEST_GEN_PROGS_riscv += demand_paging_test 189 190 TEST_GEN_PROGS_riscv += dirty_log_test 190 191 TEST_GEN_PROGS_riscv += get-reg-list
+8 -4
tools/testing/selftests/kvm/aarch64/arch_timer.c
··· 194 194 vm_init_descriptor_tables(vm); 195 195 vm_install_exception_handler(vm, VECTOR_IRQ_CURRENT, guest_irq_handler); 196 196 197 - if (!test_args.offset.reserved) { 198 - if (kvm_has_cap(KVM_CAP_COUNTER_OFFSET)) 199 - vm_ioctl(vm, KVM_ARM_SET_COUNTER_OFFSET, &test_args.offset); 200 - else 197 + if (!test_args.reserved) { 198 + if (kvm_has_cap(KVM_CAP_COUNTER_OFFSET)) { 199 + struct kvm_arm_counter_offset offset = { 200 + .counter_offset = test_args.counter_offset, 201 + .reserved = 0, 202 + }; 203 + vm_ioctl(vm, KVM_ARM_SET_COUNTER_OFFSET, &offset); 204 + } else 201 205 TEST_FAIL("no support for global offset"); 202 206 } 203 207
+6 -4
tools/testing/selftests/kvm/arch_timer.c
··· 36 36 .timer_period_ms = TIMER_TEST_PERIOD_MS_DEF, 37 37 .migration_freq_ms = TIMER_TEST_MIGRATION_FREQ_MS, 38 38 .timer_err_margin_us = TIMER_TEST_ERR_MARGIN_US, 39 - .offset = { .reserved = 1 }, 39 + .reserved = 1, 40 40 }; 41 41 42 42 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; ··· 74 74 default: 75 75 TEST_FAIL("Unexpected guest exit"); 76 76 } 77 + 78 + pr_info("PASS(vCPU-%d).\n", vcpu_idx); 77 79 78 80 return NULL; 79 81 } ··· 192 190 TIMER_TEST_PERIOD_MS_DEF); 193 191 pr_info("\t-m: Frequency (in ms) of vCPUs to migrate to different pCPU. 0 to turn off (default: %u)\n", 194 192 TIMER_TEST_MIGRATION_FREQ_MS); 195 - pr_info("\t-o: Counter offset (in counter cycles, default: 0)\n"); 193 + pr_info("\t-o: Counter offset (in counter cycles, default: 0) [aarch64-only]\n"); 196 194 pr_info("\t-e: Interrupt arrival error margin (in us) of the guest timer (default: %u)\n", 197 195 TIMER_TEST_ERR_MARGIN_US); 198 196 pr_info("\t-h: print this help screen\n"); ··· 225 223 test_args.timer_err_margin_us = atoi_non_negative("Error Margin", optarg); 226 224 break; 227 225 case 'o': 228 - test_args.offset.counter_offset = strtol(optarg, NULL, 0); 229 - test_args.offset.reserved = 0; 226 + test_args.counter_offset = strtol(optarg, NULL, 0); 227 + test_args.reserved = 0; 230 228 break; 231 229 case 'h': 232 230 default:
+71
tools/testing/selftests/kvm/include/riscv/arch_timer.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * RISC-V Arch Timer(sstc) specific interface 4 + * 5 + * Copyright (c) 2024 Intel Corporation 6 + */ 7 + 8 + #ifndef SELFTEST_KVM_ARCH_TIMER_H 9 + #define SELFTEST_KVM_ARCH_TIMER_H 10 + 11 + #include <asm/csr.h> 12 + #include <asm/vdso/processor.h> 13 + 14 + static unsigned long timer_freq; 15 + 16 + #define msec_to_cycles(msec) \ 17 + ((timer_freq) * (uint64_t)(msec) / 1000) 18 + 19 + #define usec_to_cycles(usec) \ 20 + ((timer_freq) * (uint64_t)(usec) / 1000000) 21 + 22 + #define cycles_to_usec(cycles) \ 23 + ((uint64_t)(cycles) * 1000000 / (timer_freq)) 24 + 25 + static inline uint64_t timer_get_cycles(void) 26 + { 27 + return csr_read(CSR_TIME); 28 + } 29 + 30 + static inline void timer_set_cmp(uint64_t cval) 31 + { 32 + csr_write(CSR_STIMECMP, cval); 33 + } 34 + 35 + static inline uint64_t timer_get_cmp(void) 36 + { 37 + return csr_read(CSR_STIMECMP); 38 + } 39 + 40 + static inline void timer_irq_enable(void) 41 + { 42 + csr_set(CSR_SIE, IE_TIE); 43 + } 44 + 45 + static inline void timer_irq_disable(void) 46 + { 47 + csr_clear(CSR_SIE, IE_TIE); 48 + } 49 + 50 + static inline void timer_set_next_cmp_ms(uint32_t msec) 51 + { 52 + uint64_t now_ct = timer_get_cycles(); 53 + uint64_t next_ct = now_ct + msec_to_cycles(msec); 54 + 55 + timer_set_cmp(next_ct); 56 + } 57 + 58 + static inline void __delay(uint64_t cycles) 59 + { 60 + uint64_t start = timer_get_cycles(); 61 + 62 + while ((timer_get_cycles() - start) < cycles) 63 + cpu_relax(); 64 + } 65 + 66 + static inline void udelay(unsigned long usec) 67 + { 68 + __delay(usec_to_cycles(usec)); 69 + } 70 + 71 + #endif /* SELFTEST_KVM_ARCH_TIMER_H */
+10
tools/testing/selftests/kvm/include/riscv/processor.h
··· 193 193 194 194 bool guest_sbi_probe_extension(int extid, long *out_val); 195 195 196 + static inline void local_irq_enable(void) 197 + { 198 + csr_set(CSR_SSTATUS, SR_SIE); 199 + } 200 + 201 + static inline void local_irq_disable(void) 202 + { 203 + csr_clear(CSR_SSTATUS, SR_SIE); 204 + } 205 + 196 206 #endif /* SELFTEST_KVM_PROCESSOR_H */
+3 -2
tools/testing/selftests/kvm/include/timer_test.h
··· 23 23 uint32_t timer_period_ms; 24 24 uint32_t migration_freq_ms; 25 25 uint32_t timer_err_margin_us; 26 - /* TODO: Change arm specific type to a common one */ 27 - struct kvm_arm_counter_offset offset; 26 + /* Members of struct kvm_arm_counter_offset */ 27 + uint64_t counter_offset; 28 + uint64_t reserved; 28 29 }; 29 30 30 31 /* Shared variables between host and guest */
+111
tools/testing/selftests/kvm/riscv/arch_timer.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* 3 + * arch_timer.c - Tests the riscv64 sstc timer IRQ functionality 4 + * 5 + * The test validates the sstc timer IRQs using vstimecmp registers. 6 + * It's ported from the aarch64 arch_timer test. 7 + * 8 + * Copyright (c) 2024, Intel Corporation. 9 + */ 10 + 11 + #define _GNU_SOURCE 12 + 13 + #include "arch_timer.h" 14 + #include "kvm_util.h" 15 + #include "processor.h" 16 + #include "timer_test.h" 17 + 18 + static int timer_irq = IRQ_S_TIMER; 19 + 20 + static void guest_irq_handler(struct ex_regs *regs) 21 + { 22 + uint64_t xcnt, xcnt_diff_us, cmp; 23 + unsigned int intid = regs->cause & ~CAUSE_IRQ_FLAG; 24 + uint32_t cpu = guest_get_vcpuid(); 25 + struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; 26 + 27 + timer_irq_disable(); 28 + 29 + xcnt = timer_get_cycles(); 30 + cmp = timer_get_cmp(); 31 + xcnt_diff_us = cycles_to_usec(xcnt - shared_data->xcnt); 32 + 33 + /* Make sure we are dealing with the correct timer IRQ */ 34 + GUEST_ASSERT_EQ(intid, timer_irq); 35 + 36 + __GUEST_ASSERT(xcnt >= cmp, 37 + "xcnt = 0x%"PRIx64", cmp = 0x%"PRIx64", xcnt_diff_us = 0x%" PRIx64, 38 + xcnt, cmp, xcnt_diff_us); 39 + 40 + WRITE_ONCE(shared_data->nr_iter, shared_data->nr_iter + 1); 41 + } 42 + 43 + static void guest_run(struct test_vcpu_shared_data *shared_data) 44 + { 45 + uint32_t irq_iter, config_iter; 46 + 47 + shared_data->nr_iter = 0; 48 + shared_data->guest_stage = 0; 49 + 50 + for (config_iter = 0; config_iter < test_args.nr_iter; config_iter++) { 51 + /* Setup the next interrupt */ 52 + timer_set_next_cmp_ms(test_args.timer_period_ms); 53 + shared_data->xcnt = timer_get_cycles(); 54 + timer_irq_enable(); 55 + 56 + /* Setup a timeout for the interrupt to arrive */ 57 + udelay(msecs_to_usecs(test_args.timer_period_ms) + 58 + test_args.timer_err_margin_us); 59 + 60 + irq_iter = READ_ONCE(shared_data->nr_iter); 61 + __GUEST_ASSERT(config_iter + 1 == irq_iter, 62 + "config_iter + 1 = 0x%x, irq_iter = 0x%x.\n" 63 + " Guest timer interrupt was not trigged within the specified\n" 64 + " interval, try to increase the error margin by [-e] option.\n", 65 + config_iter + 1, irq_iter); 66 + } 67 + } 68 + 69 + static void guest_code(void) 70 + { 71 + uint32_t cpu = guest_get_vcpuid(); 72 + struct test_vcpu_shared_data *shared_data = &vcpu_shared_data[cpu]; 73 + 74 + timer_irq_disable(); 75 + local_irq_enable(); 76 + 77 + guest_run(shared_data); 78 + 79 + GUEST_DONE(); 80 + } 81 + 82 + struct kvm_vm *test_vm_create(void) 83 + { 84 + struct kvm_vm *vm; 85 + int nr_vcpus = test_args.nr_vcpus; 86 + 87 + vm = vm_create_with_vcpus(nr_vcpus, guest_code, vcpus); 88 + __TEST_REQUIRE(__vcpu_has_ext(vcpus[0], RISCV_ISA_EXT_REG(KVM_RISCV_ISA_EXT_SSTC)), 89 + "SSTC not available, skipping test\n"); 90 + 91 + vm_init_vector_tables(vm); 92 + vm_install_interrupt_handler(vm, guest_irq_handler); 93 + 94 + for (int i = 0; i < nr_vcpus; i++) 95 + vcpu_init_vector_tables(vcpus[i]); 96 + 97 + /* Initialize guest timer frequency. */ 98 + vcpu_get_reg(vcpus[0], RISCV_TIMER_REG(frequency), &timer_freq); 99 + sync_global_to_guest(vm, timer_freq); 100 + pr_debug("timer_freq: %lu\n", timer_freq); 101 + 102 + /* Make all the test's cmdline args visible to the guest */ 103 + sync_global_to_guest(vm, test_args); 104 + 105 + return vm; 106 + } 107 + 108 + void test_vm_cleanup(struct kvm_vm *vm) 109 + { 110 + kvm_vm_free(vm); 111 + }