Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.8 291 lines 6.3 kB view raw
1#if !defined(_TRACE_KVMMMU_H) || defined(TRACE_HEADER_MULTI_READ) 2#define _TRACE_KVMMMU_H 3 4#include <linux/tracepoint.h> 5#include <linux/ftrace_event.h> 6 7#undef TRACE_SYSTEM 8#define TRACE_SYSTEM kvmmmu 9 10#define KVM_MMU_PAGE_FIELDS \ 11 __field(__u64, gfn) \ 12 __field(__u32, role) \ 13 __field(__u32, root_count) \ 14 __field(bool, unsync) 15 16#define KVM_MMU_PAGE_ASSIGN(sp) \ 17 __entry->gfn = sp->gfn; \ 18 __entry->role = sp->role.word; \ 19 __entry->root_count = sp->root_count; \ 20 __entry->unsync = sp->unsync; 21 22#define KVM_MMU_PAGE_PRINTK() ({ \ 23 const char *ret = p->buffer + p->len; \ 24 static const char *access_str[] = { \ 25 "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \ 26 }; \ 27 union kvm_mmu_page_role role; \ 28 \ 29 role.word = __entry->role; \ 30 \ 31 trace_seq_printf(p, "sp gfn %llx %u%s q%u%s %s%s" \ 32 " %snxe root %u %s%c", \ 33 __entry->gfn, role.level, \ 34 role.cr4_pae ? " pae" : "", \ 35 role.quadrant, \ 36 role.direct ? " direct" : "", \ 37 access_str[role.access], \ 38 role.invalid ? " invalid" : "", \ 39 role.nxe ? "" : "!", \ 40 __entry->root_count, \ 41 __entry->unsync ? "unsync" : "sync", 0); \ 42 ret; \ 43 }) 44 45#define kvm_mmu_trace_pferr_flags \ 46 { PFERR_PRESENT_MASK, "P" }, \ 47 { PFERR_WRITE_MASK, "W" }, \ 48 { PFERR_USER_MASK, "U" }, \ 49 { PFERR_RSVD_MASK, "RSVD" }, \ 50 { PFERR_FETCH_MASK, "F" } 51 52/* 53 * A pagetable walk has started 54 */ 55TRACE_EVENT( 56 kvm_mmu_pagetable_walk, 57 TP_PROTO(u64 addr, u32 pferr), 58 TP_ARGS(addr, pferr), 59 60 TP_STRUCT__entry( 61 __field(__u64, addr) 62 __field(__u32, pferr) 63 ), 64 65 TP_fast_assign( 66 __entry->addr = addr; 67 __entry->pferr = pferr; 68 ), 69 70 TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr, 71 __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags)) 72); 73 74 75/* We just walked a paging element */ 76TRACE_EVENT( 77 kvm_mmu_paging_element, 78 TP_PROTO(u64 pte, int level), 79 TP_ARGS(pte, level), 80 81 TP_STRUCT__entry( 82 __field(__u64, pte) 83 __field(__u32, level) 84 ), 85 86 TP_fast_assign( 87 __entry->pte = pte; 88 __entry->level = level; 89 ), 90 91 TP_printk("pte %llx level %u", __entry->pte, __entry->level) 92); 93 94DECLARE_EVENT_CLASS(kvm_mmu_set_bit_class, 95 96 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size), 97 98 TP_ARGS(table_gfn, index, size), 99 100 TP_STRUCT__entry( 101 __field(__u64, gpa) 102 ), 103 104 TP_fast_assign( 105 __entry->gpa = ((u64)table_gfn << PAGE_SHIFT) 106 + index * size; 107 ), 108 109 TP_printk("gpa %llx", __entry->gpa) 110); 111 112/* We set a pte accessed bit */ 113DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_accessed_bit, 114 115 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size), 116 117 TP_ARGS(table_gfn, index, size) 118); 119 120/* We set a pte dirty bit */ 121DEFINE_EVENT(kvm_mmu_set_bit_class, kvm_mmu_set_dirty_bit, 122 123 TP_PROTO(unsigned long table_gfn, unsigned index, unsigned size), 124 125 TP_ARGS(table_gfn, index, size) 126); 127 128TRACE_EVENT( 129 kvm_mmu_walker_error, 130 TP_PROTO(u32 pferr), 131 TP_ARGS(pferr), 132 133 TP_STRUCT__entry( 134 __field(__u32, pferr) 135 ), 136 137 TP_fast_assign( 138 __entry->pferr = pferr; 139 ), 140 141 TP_printk("pferr %x %s", __entry->pferr, 142 __print_flags(__entry->pferr, "|", kvm_mmu_trace_pferr_flags)) 143); 144 145TRACE_EVENT( 146 kvm_mmu_get_page, 147 TP_PROTO(struct kvm_mmu_page *sp, bool created), 148 TP_ARGS(sp, created), 149 150 TP_STRUCT__entry( 151 KVM_MMU_PAGE_FIELDS 152 __field(bool, created) 153 ), 154 155 TP_fast_assign( 156 KVM_MMU_PAGE_ASSIGN(sp) 157 __entry->created = created; 158 ), 159 160 TP_printk("%s %s", KVM_MMU_PAGE_PRINTK(), 161 __entry->created ? "new" : "existing") 162); 163 164DECLARE_EVENT_CLASS(kvm_mmu_page_class, 165 166 TP_PROTO(struct kvm_mmu_page *sp), 167 TP_ARGS(sp), 168 169 TP_STRUCT__entry( 170 KVM_MMU_PAGE_FIELDS 171 ), 172 173 TP_fast_assign( 174 KVM_MMU_PAGE_ASSIGN(sp) 175 ), 176 177 TP_printk("%s", KVM_MMU_PAGE_PRINTK()) 178); 179 180DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_sync_page, 181 TP_PROTO(struct kvm_mmu_page *sp), 182 183 TP_ARGS(sp) 184); 185 186DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_unsync_page, 187 TP_PROTO(struct kvm_mmu_page *sp), 188 189 TP_ARGS(sp) 190); 191 192DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_prepare_zap_page, 193 TP_PROTO(struct kvm_mmu_page *sp), 194 195 TP_ARGS(sp) 196); 197 198DEFINE_EVENT(kvm_mmu_page_class, kvm_mmu_delay_free_pages, 199 TP_PROTO(struct kvm_mmu_page *sp), 200 201 TP_ARGS(sp) 202); 203 204TRACE_EVENT( 205 mark_mmio_spte, 206 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access), 207 TP_ARGS(sptep, gfn, access), 208 209 TP_STRUCT__entry( 210 __field(void *, sptep) 211 __field(gfn_t, gfn) 212 __field(unsigned, access) 213 ), 214 215 TP_fast_assign( 216 __entry->sptep = sptep; 217 __entry->gfn = gfn; 218 __entry->access = access; 219 ), 220 221 TP_printk("sptep:%p gfn %llx access %x", __entry->sptep, __entry->gfn, 222 __entry->access) 223); 224 225TRACE_EVENT( 226 handle_mmio_page_fault, 227 TP_PROTO(u64 addr, gfn_t gfn, unsigned access), 228 TP_ARGS(addr, gfn, access), 229 230 TP_STRUCT__entry( 231 __field(u64, addr) 232 __field(gfn_t, gfn) 233 __field(unsigned, access) 234 ), 235 236 TP_fast_assign( 237 __entry->addr = addr; 238 __entry->gfn = gfn; 239 __entry->access = access; 240 ), 241 242 TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn, 243 __entry->access) 244); 245 246#define __spte_satisfied(__spte) \ 247 (__entry->retry && is_writable_pte(__entry->__spte)) 248 249TRACE_EVENT( 250 fast_page_fault, 251 TP_PROTO(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code, 252 u64 *sptep, u64 old_spte, bool retry), 253 TP_ARGS(vcpu, gva, error_code, sptep, old_spte, retry), 254 255 TP_STRUCT__entry( 256 __field(int, vcpu_id) 257 __field(gva_t, gva) 258 __field(u32, error_code) 259 __field(u64 *, sptep) 260 __field(u64, old_spte) 261 __field(u64, new_spte) 262 __field(bool, retry) 263 ), 264 265 TP_fast_assign( 266 __entry->vcpu_id = vcpu->vcpu_id; 267 __entry->gva = gva; 268 __entry->error_code = error_code; 269 __entry->sptep = sptep; 270 __entry->old_spte = old_spte; 271 __entry->new_spte = *sptep; 272 __entry->retry = retry; 273 ), 274 275 TP_printk("vcpu %d gva %lx error_code %s sptep %p old %#llx" 276 " new %llx spurious %d fixed %d", __entry->vcpu_id, 277 __entry->gva, __print_flags(__entry->error_code, "|", 278 kvm_mmu_trace_pferr_flags), __entry->sptep, 279 __entry->old_spte, __entry->new_spte, 280 __spte_satisfied(old_spte), __spte_satisfied(new_spte) 281 ) 282); 283#endif /* _TRACE_KVMMMU_H */ 284 285#undef TRACE_INCLUDE_PATH 286#define TRACE_INCLUDE_PATH . 287#undef TRACE_INCLUDE_FILE 288#define TRACE_INCLUDE_FILE mmutrace 289 290/* This part must be outside protection */ 291#include <trace/define_trace.h>