Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#if !defined(_TRACE_KVM_MAIN_H) || defined(TRACE_HEADER_MULTI_READ)
3#define _TRACE_KVM_MAIN_H
4
5#include <linux/tracepoint.h>
6
7#undef TRACE_SYSTEM
8#define TRACE_SYSTEM kvm
9
10#define ERSN(x) { KVM_EXIT_##x, "KVM_EXIT_" #x }
11
12#define kvm_trace_exit_reason \
13 ERSN(UNKNOWN), ERSN(EXCEPTION), ERSN(IO), ERSN(HYPERCALL), \
14 ERSN(DEBUG), ERSN(HLT), ERSN(MMIO), ERSN(IRQ_WINDOW_OPEN), \
15 ERSN(SHUTDOWN), ERSN(FAIL_ENTRY), ERSN(INTR), ERSN(SET_TPR), \
16 ERSN(TPR_ACCESS), ERSN(S390_SIEIC), ERSN(S390_RESET), ERSN(DCR),\
17 ERSN(NMI), ERSN(INTERNAL_ERROR), ERSN(OSI), ERSN(PAPR_HCALL), \
18 ERSN(S390_UCONTROL), ERSN(WATCHDOG), ERSN(S390_TSCH), ERSN(EPR),\
19 ERSN(SYSTEM_EVENT), ERSN(S390_STSI), ERSN(IOAPIC_EOI), \
20 ERSN(HYPERV), ERSN(ARM_NISV), ERSN(X86_RDMSR), ERSN(X86_WRMSR)
21
22TRACE_EVENT(kvm_userspace_exit,
23 TP_PROTO(__u32 reason, int errno),
24 TP_ARGS(reason, errno),
25
26 TP_STRUCT__entry(
27 __field( __u32, reason )
28 __field( int, errno )
29 ),
30
31 TP_fast_assign(
32 __entry->reason = reason;
33 __entry->errno = errno;
34 ),
35
36 TP_printk("reason %s (%d)",
37 __entry->errno < 0 ?
38 (__entry->errno == -EINTR ? "restart" : "error") :
39 __print_symbolic(__entry->reason, kvm_trace_exit_reason),
40 __entry->errno < 0 ? -__entry->errno : __entry->reason)
41);
42
43TRACE_EVENT(kvm_vcpu_wakeup,
44 TP_PROTO(__u64 ns, bool waited, bool valid),
45 TP_ARGS(ns, waited, valid),
46
47 TP_STRUCT__entry(
48 __field( __u64, ns )
49 __field( bool, waited )
50 __field( bool, valid )
51 ),
52
53 TP_fast_assign(
54 __entry->ns = ns;
55 __entry->waited = waited;
56 __entry->valid = valid;
57 ),
58
59 TP_printk("%s time %lld ns, polling %s",
60 __entry->waited ? "wait" : "poll",
61 __entry->ns,
62 __entry->valid ? "valid" : "invalid")
63);
64
65#if defined(CONFIG_HAVE_KVM_IRQCHIP)
66TRACE_EVENT(kvm_set_irq,
67 TP_PROTO(unsigned int gsi, int level, int irq_source_id),
68 TP_ARGS(gsi, level, irq_source_id),
69
70 TP_STRUCT__entry(
71 __field( unsigned int, gsi )
72 __field( int, level )
73 __field( int, irq_source_id )
74 ),
75
76 TP_fast_assign(
77 __entry->gsi = gsi;
78 __entry->level = level;
79 __entry->irq_source_id = irq_source_id;
80 ),
81
82 TP_printk("gsi %u level %d source %d",
83 __entry->gsi, __entry->level, __entry->irq_source_id)
84);
85
86#ifdef CONFIG_KVM_IOAPIC
87
88#define kvm_irqchips \
89 {KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
90 {KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
91 {KVM_IRQCHIP_IOAPIC, "IOAPIC"}
92
93#endif /* CONFIG_KVM_IOAPIC */
94
95#ifdef kvm_irqchips
96#define kvm_ack_irq_string "irqchip %s pin %u"
97#define kvm_ack_irq_parm __print_symbolic(__entry->irqchip, kvm_irqchips), __entry->pin
98#else
99#define kvm_ack_irq_string "irqchip %d pin %u"
100#define kvm_ack_irq_parm __entry->irqchip, __entry->pin
101#endif
102
103TRACE_EVENT(kvm_ack_irq,
104 TP_PROTO(unsigned int irqchip, unsigned int pin),
105 TP_ARGS(irqchip, pin),
106
107 TP_STRUCT__entry(
108 __field( unsigned int, irqchip )
109 __field( unsigned int, pin )
110 ),
111
112 TP_fast_assign(
113 __entry->irqchip = irqchip;
114 __entry->pin = pin;
115 ),
116
117 TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm)
118);
119
120#endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */
121
122
123
124#define KVM_TRACE_MMIO_READ_UNSATISFIED 0
125#define KVM_TRACE_MMIO_READ 1
126#define KVM_TRACE_MMIO_WRITE 2
127
128#define kvm_trace_symbol_mmio \
129 { KVM_TRACE_MMIO_READ_UNSATISFIED, "unsatisfied-read" }, \
130 { KVM_TRACE_MMIO_READ, "read" }, \
131 { KVM_TRACE_MMIO_WRITE, "write" }
132
133TRACE_EVENT(kvm_mmio,
134 TP_PROTO(int type, int len, u64 gpa, void *val),
135 TP_ARGS(type, len, gpa, val),
136
137 TP_STRUCT__entry(
138 __field( u32, type )
139 __field( u32, len )
140 __field( u64, gpa )
141 __field( u64, val )
142 ),
143
144 TP_fast_assign(
145 __entry->type = type;
146 __entry->len = len;
147 __entry->gpa = gpa;
148 __entry->val = 0;
149 if (val)
150 memcpy(&__entry->val, val,
151 min_t(u32, sizeof(__entry->val), len));
152 ),
153
154 TP_printk("mmio %s len %u gpa 0x%llx val 0x%llx",
155 __print_symbolic(__entry->type, kvm_trace_symbol_mmio),
156 __entry->len, __entry->gpa, __entry->val)
157);
158
159#define KVM_TRACE_IOCSR_READ_UNSATISFIED 0
160#define KVM_TRACE_IOCSR_READ 1
161#define KVM_TRACE_IOCSR_WRITE 2
162
163#define kvm_trace_symbol_iocsr \
164 { KVM_TRACE_IOCSR_READ_UNSATISFIED, "unsatisfied-read" }, \
165 { KVM_TRACE_IOCSR_READ, "read" }, \
166 { KVM_TRACE_IOCSR_WRITE, "write" }
167
168TRACE_EVENT(kvm_iocsr,
169 TP_PROTO(int type, int len, u64 gpa, void *val),
170 TP_ARGS(type, len, gpa, val),
171
172 TP_STRUCT__entry(
173 __field( u32, type )
174 __field( u32, len )
175 __field( u64, gpa )
176 __field( u64, val )
177 ),
178
179 TP_fast_assign(
180 __entry->type = type;
181 __entry->len = len;
182 __entry->gpa = gpa;
183 __entry->val = 0;
184 if (val)
185 memcpy(&__entry->val, val,
186 min_t(u32, sizeof(__entry->val), len));
187 ),
188
189 TP_printk("iocsr %s len %u gpa 0x%llx val 0x%llx",
190 __print_symbolic(__entry->type, kvm_trace_symbol_iocsr),
191 __entry->len, __entry->gpa, __entry->val)
192);
193
194#define kvm_fpu_load_symbol \
195 {0, "unload"}, \
196 {1, "load"}
197
198TRACE_EVENT(kvm_fpu,
199 TP_PROTO(int load),
200 TP_ARGS(load),
201
202 TP_STRUCT__entry(
203 __field( u32, load )
204 ),
205
206 TP_fast_assign(
207 __entry->load = load;
208 ),
209
210 TP_printk("%s", __print_symbolic(__entry->load, kvm_fpu_load_symbol))
211);
212
213#ifdef CONFIG_KVM_ASYNC_PF
214DECLARE_EVENT_CLASS(kvm_async_get_page_class,
215
216 TP_PROTO(u64 gva, u64 gfn),
217
218 TP_ARGS(gva, gfn),
219
220 TP_STRUCT__entry(
221 __field(__u64, gva)
222 __field(u64, gfn)
223 ),
224
225 TP_fast_assign(
226 __entry->gva = gva;
227 __entry->gfn = gfn;
228 ),
229
230 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn)
231);
232
233DEFINE_EVENT(kvm_async_get_page_class, kvm_try_async_get_page,
234
235 TP_PROTO(u64 gva, u64 gfn),
236
237 TP_ARGS(gva, gfn)
238);
239
240DEFINE_EVENT(kvm_async_get_page_class, kvm_async_pf_repeated_fault,
241
242 TP_PROTO(u64 gva, u64 gfn),
243
244 TP_ARGS(gva, gfn)
245);
246
247DECLARE_EVENT_CLASS(kvm_async_pf_nopresent_ready,
248
249 TP_PROTO(u64 token, u64 gva),
250
251 TP_ARGS(token, gva),
252
253 TP_STRUCT__entry(
254 __field(__u64, token)
255 __field(__u64, gva)
256 ),
257
258 TP_fast_assign(
259 __entry->token = token;
260 __entry->gva = gva;
261 ),
262
263 TP_printk("token %#llx gva %#llx", __entry->token, __entry->gva)
264
265);
266
267DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_not_present,
268
269 TP_PROTO(u64 token, u64 gva),
270
271 TP_ARGS(token, gva)
272);
273
274DEFINE_EVENT(kvm_async_pf_nopresent_ready, kvm_async_pf_ready,
275
276 TP_PROTO(u64 token, u64 gva),
277
278 TP_ARGS(token, gva)
279);
280
281TRACE_EVENT(
282 kvm_async_pf_completed,
283 TP_PROTO(unsigned long address, u64 gva),
284 TP_ARGS(address, gva),
285
286 TP_STRUCT__entry(
287 __field(unsigned long, address)
288 __field(u64, gva)
289 ),
290
291 TP_fast_assign(
292 __entry->address = address;
293 __entry->gva = gva;
294 ),
295
296 TP_printk("gva %#llx address %#lx", __entry->gva,
297 __entry->address)
298);
299
300#endif
301
302TRACE_EVENT(kvm_halt_poll_ns,
303 TP_PROTO(bool grow, unsigned int vcpu_id, unsigned int new,
304 unsigned int old),
305 TP_ARGS(grow, vcpu_id, new, old),
306
307 TP_STRUCT__entry(
308 __field(bool, grow)
309 __field(unsigned int, vcpu_id)
310 __field(unsigned int, new)
311 __field(unsigned int, old)
312 ),
313
314 TP_fast_assign(
315 __entry->grow = grow;
316 __entry->vcpu_id = vcpu_id;
317 __entry->new = new;
318 __entry->old = old;
319 ),
320
321 TP_printk("vcpu %u: halt_poll_ns %u (%s %u)",
322 __entry->vcpu_id,
323 __entry->new,
324 __entry->grow ? "grow" : "shrink",
325 __entry->old)
326);
327
328#define trace_kvm_halt_poll_ns_grow(vcpu_id, new, old) \
329 trace_kvm_halt_poll_ns(true, vcpu_id, new, old)
330#define trace_kvm_halt_poll_ns_shrink(vcpu_id, new, old) \
331 trace_kvm_halt_poll_ns(false, vcpu_id, new, old)
332
333TRACE_EVENT(kvm_dirty_ring_push,
334 TP_PROTO(struct kvm_dirty_ring *ring, u32 slot, u64 offset),
335 TP_ARGS(ring, slot, offset),
336
337 TP_STRUCT__entry(
338 __field(int, index)
339 __field(u32, dirty_index)
340 __field(u32, reset_index)
341 __field(u32, slot)
342 __field(u64, offset)
343 ),
344
345 TP_fast_assign(
346 __entry->index = ring->index;
347 __entry->dirty_index = ring->dirty_index;
348 __entry->reset_index = ring->reset_index;
349 __entry->slot = slot;
350 __entry->offset = offset;
351 ),
352
353 TP_printk("ring %d: dirty 0x%x reset 0x%x "
354 "slot %u offset 0x%llx (used %u)",
355 __entry->index, __entry->dirty_index,
356 __entry->reset_index, __entry->slot, __entry->offset,
357 __entry->dirty_index - __entry->reset_index)
358);
359
360TRACE_EVENT(kvm_dirty_ring_reset,
361 TP_PROTO(struct kvm_dirty_ring *ring),
362 TP_ARGS(ring),
363
364 TP_STRUCT__entry(
365 __field(int, index)
366 __field(u32, dirty_index)
367 __field(u32, reset_index)
368 ),
369
370 TP_fast_assign(
371 __entry->index = ring->index;
372 __entry->dirty_index = ring->dirty_index;
373 __entry->reset_index = ring->reset_index;
374 ),
375
376 TP_printk("ring %d: dirty 0x%x reset 0x%x (used %u)",
377 __entry->index, __entry->dirty_index, __entry->reset_index,
378 __entry->dirty_index - __entry->reset_index)
379);
380
381TRACE_EVENT(kvm_dirty_ring_exit,
382 TP_PROTO(struct kvm_vcpu *vcpu),
383 TP_ARGS(vcpu),
384
385 TP_STRUCT__entry(
386 __field(int, vcpu_id)
387 ),
388
389 TP_fast_assign(
390 __entry->vcpu_id = vcpu->vcpu_id;
391 ),
392
393 TP_printk("vcpu %d", __entry->vcpu_id)
394);
395
396#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
397/*
398 * @start: Starting address of guest memory range
399 * @end: End address of guest memory range
400 * @attr: The value of the attribute being set.
401 */
402TRACE_EVENT(kvm_vm_set_mem_attributes,
403 TP_PROTO(gfn_t start, gfn_t end, unsigned long attr),
404 TP_ARGS(start, end, attr),
405
406 TP_STRUCT__entry(
407 __field(gfn_t, start)
408 __field(gfn_t, end)
409 __field(unsigned long, attr)
410 ),
411
412 TP_fast_assign(
413 __entry->start = start;
414 __entry->end = end;
415 __entry->attr = attr;
416 ),
417
418 TP_printk("%#016llx -- %#016llx [0x%lx]",
419 __entry->start, __entry->end, __entry->attr)
420);
421#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
422
423TRACE_EVENT(kvm_unmap_hva_range,
424 TP_PROTO(unsigned long start, unsigned long end),
425 TP_ARGS(start, end),
426
427 TP_STRUCT__entry(
428 __field( unsigned long, start )
429 __field( unsigned long, end )
430 ),
431
432 TP_fast_assign(
433 __entry->start = start;
434 __entry->end = end;
435 ),
436
437 TP_printk("mmu notifier unmap range: %#016lx -- %#016lx",
438 __entry->start, __entry->end)
439);
440
441TRACE_EVENT(kvm_age_hva,
442 TP_PROTO(unsigned long start, unsigned long end),
443 TP_ARGS(start, end),
444
445 TP_STRUCT__entry(
446 __field( unsigned long, start )
447 __field( unsigned long, end )
448 ),
449
450 TP_fast_assign(
451 __entry->start = start;
452 __entry->end = end;
453 ),
454
455 TP_printk("mmu notifier age hva: %#016lx -- %#016lx",
456 __entry->start, __entry->end)
457);
458
459TRACE_EVENT(kvm_test_age_hva,
460 TP_PROTO(unsigned long hva),
461 TP_ARGS(hva),
462
463 TP_STRUCT__entry(
464 __field( unsigned long, hva )
465 ),
466
467 TP_fast_assign(
468 __entry->hva = hva;
469 ),
470
471 TP_printk("mmu notifier test age hva: %#016lx", __entry->hva)
472);
473
474#endif /* _TRACE_KVM_MAIN_H */
475
476/* This part must be outside protection */
477#include <trace/define_trace.h>