Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * steal/stolen time test
4 *
5 * Copyright (C) 2020, Red Hat, Inc.
6 */
7#define _GNU_SOURCE
8#include <stdio.h>
9#include <time.h>
10#include <sched.h>
11#include <pthread.h>
12#include <linux/kernel.h>
13#include <sys/syscall.h>
14#include <asm/kvm.h>
15#include <asm/kvm_para.h>
16
17#include "test_util.h"
18#include "kvm_util.h"
19#include "processor.h"
20
21#define NR_VCPUS 4
22#define ST_GPA_BASE (1 << 30)
23#define MIN_RUN_DELAY_NS 200000UL
24
25static void *st_gva[NR_VCPUS];
26static uint64_t guest_stolen_time[NR_VCPUS];
27
28#if defined(__x86_64__)
29
30/* steal_time must have 64-byte alignment */
31#define STEAL_TIME_SIZE ((sizeof(struct kvm_steal_time) + 63) & ~63)
32
33static void check_status(struct kvm_steal_time *st)
34{
35 GUEST_ASSERT(!(READ_ONCE(st->version) & 1));
36 GUEST_ASSERT(READ_ONCE(st->flags) == 0);
37 GUEST_ASSERT(READ_ONCE(st->preempted) == 0);
38}
39
40static void guest_code(int cpu)
41{
42 struct kvm_steal_time *st = st_gva[cpu];
43 uint32_t version;
44
45 GUEST_ASSERT(rdmsr(MSR_KVM_STEAL_TIME) == ((uint64_t)st_gva[cpu] | KVM_MSR_ENABLED));
46
47 memset(st, 0, sizeof(*st));
48 GUEST_SYNC(0);
49
50 check_status(st);
51 WRITE_ONCE(guest_stolen_time[cpu], st->steal);
52 version = READ_ONCE(st->version);
53 check_status(st);
54 GUEST_SYNC(1);
55
56 check_status(st);
57 GUEST_ASSERT(version < READ_ONCE(st->version));
58 WRITE_ONCE(guest_stolen_time[cpu], st->steal);
59 check_status(st);
60 GUEST_DONE();
61}
62
63static void steal_time_init(struct kvm_vm *vm)
64{
65 int i;
66
67 if (!(kvm_get_supported_cpuid_entry(KVM_CPUID_FEATURES)->eax &
68 KVM_FEATURE_STEAL_TIME)) {
69 print_skip("steal-time not supported");
70 exit(KSFT_SKIP);
71 }
72
73 for (i = 0; i < NR_VCPUS; ++i) {
74 int ret;
75
76 /* ST_GPA_BASE is identity mapped */
77 st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
78 sync_global_to_guest(vm, st_gva[i]);
79
80 ret = _vcpu_set_msr(vm, i, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_STEAL_RESERVED_MASK);
81 TEST_ASSERT(ret == 0, "Bad GPA didn't fail");
82
83 vcpu_set_msr(vm, i, MSR_KVM_STEAL_TIME, (ulong)st_gva[i] | KVM_MSR_ENABLED);
84 }
85}
86
87static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpuid)
88{
89 struct kvm_steal_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpuid]);
90 int i;
91
92 pr_info("VCPU%d:\n", vcpuid);
93 pr_info(" steal: %lld\n", st->steal);
94 pr_info(" version: %d\n", st->version);
95 pr_info(" flags: %d\n", st->flags);
96 pr_info(" preempted: %d\n", st->preempted);
97 pr_info(" u8_pad: ");
98 for (i = 0; i < 3; ++i)
99 pr_info("%d", st->u8_pad[i]);
100 pr_info("\n pad: ");
101 for (i = 0; i < 11; ++i)
102 pr_info("%d", st->pad[i]);
103 pr_info("\n");
104}
105
106#elif defined(__aarch64__)
107
108/* PV_TIME_ST must have 64-byte alignment */
109#define STEAL_TIME_SIZE ((sizeof(struct st_time) + 63) & ~63)
110
111#define SMCCC_ARCH_FEATURES 0x80000001
112#define PV_TIME_FEATURES 0xc5000020
113#define PV_TIME_ST 0xc5000021
114
115struct st_time {
116 uint32_t rev;
117 uint32_t attr;
118 uint64_t st_time;
119};
120
121static int64_t smccc(uint32_t func, uint32_t arg)
122{
123 unsigned long ret;
124
125 asm volatile(
126 "mov x0, %1\n"
127 "mov x1, %2\n"
128 "hvc #0\n"
129 "mov %0, x0\n"
130 : "=r" (ret) : "r" (func), "r" (arg) :
131 "x0", "x1", "x2", "x3");
132
133 return ret;
134}
135
136static void check_status(struct st_time *st)
137{
138 GUEST_ASSERT(READ_ONCE(st->rev) == 0);
139 GUEST_ASSERT(READ_ONCE(st->attr) == 0);
140}
141
142static void guest_code(int cpu)
143{
144 struct st_time *st;
145 int64_t status;
146
147 status = smccc(SMCCC_ARCH_FEATURES, PV_TIME_FEATURES);
148 GUEST_ASSERT(status == 0);
149 status = smccc(PV_TIME_FEATURES, PV_TIME_FEATURES);
150 GUEST_ASSERT(status == 0);
151 status = smccc(PV_TIME_FEATURES, PV_TIME_ST);
152 GUEST_ASSERT(status == 0);
153
154 status = smccc(PV_TIME_ST, 0);
155 GUEST_ASSERT(status != -1);
156 GUEST_ASSERT(status == (ulong)st_gva[cpu]);
157
158 st = (struct st_time *)status;
159 GUEST_SYNC(0);
160
161 check_status(st);
162 WRITE_ONCE(guest_stolen_time[cpu], st->st_time);
163 GUEST_SYNC(1);
164
165 check_status(st);
166 WRITE_ONCE(guest_stolen_time[cpu], st->st_time);
167 GUEST_DONE();
168}
169
170static void steal_time_init(struct kvm_vm *vm)
171{
172 struct kvm_device_attr dev = {
173 .group = KVM_ARM_VCPU_PVTIME_CTRL,
174 .attr = KVM_ARM_VCPU_PVTIME_IPA,
175 };
176 int i, ret;
177
178 ret = _vcpu_ioctl(vm, 0, KVM_HAS_DEVICE_ATTR, &dev);
179 if (ret != 0 && errno == ENXIO) {
180 print_skip("steal-time not supported");
181 exit(KSFT_SKIP);
182 }
183
184 for (i = 0; i < NR_VCPUS; ++i) {
185 uint64_t st_ipa;
186
187 vcpu_ioctl(vm, i, KVM_HAS_DEVICE_ATTR, &dev);
188
189 dev.addr = (uint64_t)&st_ipa;
190
191 /* ST_GPA_BASE is identity mapped */
192 st_gva[i] = (void *)(ST_GPA_BASE + i * STEAL_TIME_SIZE);
193 sync_global_to_guest(vm, st_gva[i]);
194
195 st_ipa = (ulong)st_gva[i] | 1;
196 ret = _vcpu_ioctl(vm, i, KVM_SET_DEVICE_ATTR, &dev);
197 TEST_ASSERT(ret == -1 && errno == EINVAL, "Bad IPA didn't report EINVAL");
198
199 st_ipa = (ulong)st_gva[i];
200 vcpu_ioctl(vm, i, KVM_SET_DEVICE_ATTR, &dev);
201
202 ret = _vcpu_ioctl(vm, i, KVM_SET_DEVICE_ATTR, &dev);
203 TEST_ASSERT(ret == -1 && errno == EEXIST, "Set IPA twice without EEXIST");
204
205 }
206}
207
208static void steal_time_dump(struct kvm_vm *vm, uint32_t vcpuid)
209{
210 struct st_time *st = addr_gva2hva(vm, (ulong)st_gva[vcpuid]);
211
212 pr_info("VCPU%d:\n", vcpuid);
213 pr_info(" rev: %d\n", st->rev);
214 pr_info(" attr: %d\n", st->attr);
215 pr_info(" st_time: %ld\n", st->st_time);
216}
217
218#endif
219
220static long get_run_delay(void)
221{
222 char path[64];
223 long val[2];
224 FILE *fp;
225
226 sprintf(path, "/proc/%ld/schedstat", syscall(SYS_gettid));
227 fp = fopen(path, "r");
228 fscanf(fp, "%ld %ld ", &val[0], &val[1]);
229 fclose(fp);
230
231 return val[1];
232}
233
234static void *do_steal_time(void *arg)
235{
236 struct timespec ts, stop;
237
238 clock_gettime(CLOCK_MONOTONIC, &ts);
239 stop = timespec_add_ns(ts, MIN_RUN_DELAY_NS);
240
241 while (1) {
242 clock_gettime(CLOCK_MONOTONIC, &ts);
243 if (timespec_to_ns(timespec_sub(ts, stop)) >= 0)
244 break;
245 }
246
247 return NULL;
248}
249
250static void run_vcpu(struct kvm_vm *vm, uint32_t vcpuid)
251{
252 struct ucall uc;
253
254 vcpu_args_set(vm, vcpuid, 1, vcpuid);
255
256 vcpu_ioctl(vm, vcpuid, KVM_RUN, NULL);
257
258 switch (get_ucall(vm, vcpuid, &uc)) {
259 case UCALL_SYNC:
260 case UCALL_DONE:
261 break;
262 case UCALL_ABORT:
263 TEST_ASSERT(false, "%s at %s:%ld", (const char *)uc.args[0],
264 __FILE__, uc.args[1]);
265 default:
266 TEST_ASSERT(false, "Unexpected exit: %s",
267 exit_reason_str(vcpu_state(vm, vcpuid)->exit_reason));
268 }
269}
270
271int main(int ac, char **av)
272{
273 struct kvm_vm *vm;
274 pthread_attr_t attr;
275 pthread_t thread;
276 cpu_set_t cpuset;
277 unsigned int gpages;
278 long stolen_time;
279 long run_delay;
280 bool verbose;
281 int i;
282
283 verbose = ac > 1 && (!strncmp(av[1], "-v", 3) || !strncmp(av[1], "--verbose", 10));
284
285 /* Set CPU affinity so we can force preemption of the VCPU */
286 CPU_ZERO(&cpuset);
287 CPU_SET(0, &cpuset);
288 pthread_attr_init(&attr);
289 pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset);
290 pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset);
291
292 /* Create a one VCPU guest and an identity mapped memslot for the steal time structure */
293 vm = vm_create_default(0, 0, guest_code);
294 gpages = vm_calc_num_guest_pages(VM_MODE_DEFAULT, STEAL_TIME_SIZE * NR_VCPUS);
295 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, ST_GPA_BASE, 1, gpages, 0);
296 virt_map(vm, ST_GPA_BASE, ST_GPA_BASE, gpages);
297 ucall_init(vm, NULL);
298
299 /* Add the rest of the VCPUs */
300 for (i = 1; i < NR_VCPUS; ++i)
301 vm_vcpu_add_default(vm, i, guest_code);
302
303 steal_time_init(vm);
304
305 /* Run test on each VCPU */
306 for (i = 0; i < NR_VCPUS; ++i) {
307 /* First VCPU run initializes steal-time */
308 run_vcpu(vm, i);
309
310 /* Second VCPU run, expect guest stolen time to be <= run_delay */
311 run_vcpu(vm, i);
312 sync_global_from_guest(vm, guest_stolen_time[i]);
313 stolen_time = guest_stolen_time[i];
314 run_delay = get_run_delay();
315 TEST_ASSERT(stolen_time <= run_delay,
316 "Expected stolen time <= %ld, got %ld",
317 run_delay, stolen_time);
318
319 /* Steal time from the VCPU. The steal time thread has the same CPU affinity as the VCPUs. */
320 run_delay = get_run_delay();
321 pthread_create(&thread, &attr, do_steal_time, NULL);
322 do
323 sched_yield();
324 while (get_run_delay() - run_delay < MIN_RUN_DELAY_NS);
325 pthread_join(thread, NULL);
326 run_delay = get_run_delay() - run_delay;
327 TEST_ASSERT(run_delay >= MIN_RUN_DELAY_NS,
328 "Expected run_delay >= %ld, got %ld",
329 MIN_RUN_DELAY_NS, run_delay);
330
331 /* Run VCPU again to confirm stolen time is consistent with run_delay */
332 run_vcpu(vm, i);
333 sync_global_from_guest(vm, guest_stolen_time[i]);
334 stolen_time = guest_stolen_time[i] - stolen_time;
335 TEST_ASSERT(stolen_time >= run_delay,
336 "Expected stolen time >= %ld, got %ld",
337 run_delay, stolen_time);
338
339 if (verbose) {
340 pr_info("VCPU%d: total-stolen-time=%ld test-stolen-time=%ld", i,
341 guest_stolen_time[i], stolen_time);
342 if (stolen_time == run_delay)
343 pr_info(" (BONUS: guest test-stolen-time even exactly matches test-run_delay)");
344 pr_info("\n");
345 steal_time_dump(vm, i);
346 }
347 }
348
349 return 0;
350}