Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at a0b6218037b5cf50737a7dc0fc5464ea3f8781cd 629 lines 16 kB view raw
1#ifndef __KVM_H 2#define __KVM_H 3 4/* 5 * This work is licensed under the terms of the GNU GPL, version 2. See 6 * the COPYING file in the top-level directory. 7 */ 8 9#include <linux/types.h> 10#include <linux/list.h> 11#include <linux/mutex.h> 12#include <linux/spinlock.h> 13#include <linux/mm.h> 14 15#include "vmx.h" 16#include <linux/kvm.h> 17 18#define CR0_PE_MASK (1ULL << 0) 19#define CR0_TS_MASK (1ULL << 3) 20#define CR0_NE_MASK (1ULL << 5) 21#define CR0_WP_MASK (1ULL << 16) 22#define CR0_NW_MASK (1ULL << 29) 23#define CR0_CD_MASK (1ULL << 30) 24#define CR0_PG_MASK (1ULL << 31) 25 26#define CR3_WPT_MASK (1ULL << 3) 27#define CR3_PCD_MASK (1ULL << 4) 28 29#define CR3_RESEVED_BITS 0x07ULL 30#define CR3_L_MODE_RESEVED_BITS (~((1ULL << 40) - 1) | 0x0fe7ULL) 31#define CR3_FLAGS_MASK ((1ULL << 5) - 1) 32 33#define CR4_VME_MASK (1ULL << 0) 34#define CR4_PSE_MASK (1ULL << 4) 35#define CR4_PAE_MASK (1ULL << 5) 36#define CR4_PGE_MASK (1ULL << 7) 37#define CR4_VMXE_MASK (1ULL << 13) 38 39#define KVM_GUEST_CR0_MASK \ 40 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK \ 41 | CR0_NW_MASK | CR0_CD_MASK) 42#define KVM_VM_CR0_ALWAYS_ON \ 43 (CR0_PG_MASK | CR0_PE_MASK | CR0_WP_MASK | CR0_NE_MASK) 44#define KVM_GUEST_CR4_MASK \ 45 (CR4_PSE_MASK | CR4_PAE_MASK | CR4_PGE_MASK | CR4_VMXE_MASK | CR4_VME_MASK) 46#define KVM_PMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK) 47#define KVM_RMODE_VM_CR4_ALWAYS_ON (CR4_VMXE_MASK | CR4_PAE_MASK | CR4_VME_MASK) 48 49#define INVALID_PAGE (~(hpa_t)0) 50#define UNMAPPED_GVA (~(gpa_t)0) 51 52#define KVM_MAX_VCPUS 1 53#define KVM_MEMORY_SLOTS 4 54#define KVM_NUM_MMU_PAGES 256 55#define KVM_MIN_FREE_MMU_PAGES 5 56#define KVM_REFILL_PAGES 25 57 58#define FX_IMAGE_SIZE 512 59#define FX_IMAGE_ALIGN 16 60#define FX_BUF_SIZE (2 * FX_IMAGE_SIZE + FX_IMAGE_ALIGN) 61 62#define DE_VECTOR 0 63#define DF_VECTOR 8 64#define TS_VECTOR 10 65#define NP_VECTOR 11 66#define SS_VECTOR 12 67#define GP_VECTOR 13 68#define PF_VECTOR 14 69 70#define SELECTOR_TI_MASK (1 << 2) 71#define SELECTOR_RPL_MASK 0x03 72 73#define IOPL_SHIFT 12 74 75/* 76 * Address types: 77 * 78 * gva - guest virtual address 79 * gpa - guest physical address 80 * gfn - guest frame number 81 * hva - host virtual address 82 * hpa - host physical address 83 * hfn - host frame number 84 */ 85 86typedef unsigned long gva_t; 87typedef u64 gpa_t; 88typedef unsigned long gfn_t; 89 90typedef unsigned long hva_t; 91typedef u64 hpa_t; 92typedef unsigned long hfn_t; 93 94#define NR_PTE_CHAIN_ENTRIES 5 95 96struct kvm_pte_chain { 97 u64 *parent_ptes[NR_PTE_CHAIN_ENTRIES]; 98 struct hlist_node link; 99}; 100 101/* 102 * kvm_mmu_page_role, below, is defined as: 103 * 104 * bits 0:3 - total guest paging levels (2-4, or zero for real mode) 105 * bits 4:7 - page table level for this shadow (1-4) 106 * bits 8:9 - page table quadrant for 2-level guests 107 * bit 16 - "metaphysical" - gfn is not a real page (huge page/real mode) 108 */ 109union kvm_mmu_page_role { 110 unsigned word; 111 struct { 112 unsigned glevels : 4; 113 unsigned level : 4; 114 unsigned quadrant : 2; 115 unsigned pad_for_nice_hex_output : 6; 116 unsigned metaphysical : 1; 117 }; 118}; 119 120struct kvm_mmu_page { 121 struct list_head link; 122 struct hlist_node hash_link; 123 124 /* 125 * The following two entries are used to key the shadow page in the 126 * hash table. 127 */ 128 gfn_t gfn; 129 union kvm_mmu_page_role role; 130 131 hpa_t page_hpa; 132 unsigned long slot_bitmap; /* One bit set per slot which has memory 133 * in this shadow page. 134 */ 135 int global; /* Set if all ptes in this page are global */ 136 int multimapped; /* More than one parent_pte? */ 137 int root_count; /* Currently serving as active root */ 138 union { 139 u64 *parent_pte; /* !multimapped */ 140 struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */ 141 }; 142}; 143 144struct vmcs { 145 u32 revision_id; 146 u32 abort; 147 char data[0]; 148}; 149 150#define vmx_msr_entry kvm_msr_entry 151 152struct kvm_vcpu; 153 154/* 155 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level 156 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu 157 * mode. 158 */ 159struct kvm_mmu { 160 void (*new_cr3)(struct kvm_vcpu *vcpu); 161 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); 162 void (*free)(struct kvm_vcpu *vcpu); 163 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva); 164 hpa_t root_hpa; 165 int root_level; 166 int shadow_root_level; 167 168 u64 *pae_root; 169}; 170 171#define KVM_NR_MEM_OBJS 20 172 173struct kvm_mmu_memory_cache { 174 int nobjs; 175 void *objects[KVM_NR_MEM_OBJS]; 176}; 177 178/* 179 * We don't want allocation failures within the mmu code, so we preallocate 180 * enough memory for a single page fault in a cache. 181 */ 182struct kvm_guest_debug { 183 int enabled; 184 unsigned long bp[4]; 185 int singlestep; 186}; 187 188enum { 189 VCPU_REGS_RAX = 0, 190 VCPU_REGS_RCX = 1, 191 VCPU_REGS_RDX = 2, 192 VCPU_REGS_RBX = 3, 193 VCPU_REGS_RSP = 4, 194 VCPU_REGS_RBP = 5, 195 VCPU_REGS_RSI = 6, 196 VCPU_REGS_RDI = 7, 197#ifdef CONFIG_X86_64 198 VCPU_REGS_R8 = 8, 199 VCPU_REGS_R9 = 9, 200 VCPU_REGS_R10 = 10, 201 VCPU_REGS_R11 = 11, 202 VCPU_REGS_R12 = 12, 203 VCPU_REGS_R13 = 13, 204 VCPU_REGS_R14 = 14, 205 VCPU_REGS_R15 = 15, 206#endif 207 NR_VCPU_REGS 208}; 209 210enum { 211 VCPU_SREG_CS, 212 VCPU_SREG_DS, 213 VCPU_SREG_ES, 214 VCPU_SREG_FS, 215 VCPU_SREG_GS, 216 VCPU_SREG_SS, 217 VCPU_SREG_TR, 218 VCPU_SREG_LDTR, 219}; 220 221struct kvm_vcpu { 222 struct kvm *kvm; 223 union { 224 struct vmcs *vmcs; 225 struct vcpu_svm *svm; 226 }; 227 struct mutex mutex; 228 int cpu; 229 int launched; 230 int interrupt_window_open; 231 unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ 232#define NR_IRQ_WORDS KVM_IRQ_BITMAP_SIZE(unsigned long) 233 unsigned long irq_pending[NR_IRQ_WORDS]; 234 unsigned long regs[NR_VCPU_REGS]; /* for rsp: vcpu_load_rsp_rip() */ 235 unsigned long rip; /* needs vcpu_load_rsp_rip() */ 236 237 unsigned long cr0; 238 unsigned long cr2; 239 unsigned long cr3; 240 unsigned long cr4; 241 unsigned long cr8; 242 u64 pdptrs[4]; /* pae */ 243 u64 shadow_efer; 244 u64 apic_base; 245 int nmsrs; 246 struct vmx_msr_entry *guest_msrs; 247 struct vmx_msr_entry *host_msrs; 248 249 struct list_head free_pages; 250 struct kvm_mmu_page page_header_buf[KVM_NUM_MMU_PAGES]; 251 struct kvm_mmu mmu; 252 253 struct kvm_mmu_memory_cache mmu_pte_chain_cache; 254 struct kvm_mmu_memory_cache mmu_rmap_desc_cache; 255 256 gfn_t last_pt_write_gfn; 257 int last_pt_write_count; 258 259 struct kvm_guest_debug guest_debug; 260 261 char fx_buf[FX_BUF_SIZE]; 262 char *host_fx_image; 263 char *guest_fx_image; 264 265 int mmio_needed; 266 int mmio_read_completed; 267 int mmio_is_write; 268 int mmio_size; 269 unsigned char mmio_data[8]; 270 gpa_t mmio_phys_addr; 271 272 struct { 273 int active; 274 u8 save_iopl; 275 struct kvm_save_segment { 276 u16 selector; 277 unsigned long base; 278 u32 limit; 279 u32 ar; 280 } tr, es, ds, fs, gs; 281 } rmode; 282}; 283 284struct kvm_memory_slot { 285 gfn_t base_gfn; 286 unsigned long npages; 287 unsigned long flags; 288 struct page **phys_mem; 289 unsigned long *dirty_bitmap; 290}; 291 292struct kvm { 293 spinlock_t lock; /* protects everything except vcpus */ 294 int nmemslots; 295 struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS]; 296 /* 297 * Hash table of struct kvm_mmu_page. 298 */ 299 struct list_head active_mmu_pages; 300 int n_free_mmu_pages; 301 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; 302 struct kvm_vcpu vcpus[KVM_MAX_VCPUS]; 303 int memory_config_version; 304 int busy; 305 unsigned long rmap_overflow; 306}; 307 308struct kvm_stat { 309 u32 pf_fixed; 310 u32 pf_guest; 311 u32 tlb_flush; 312 u32 invlpg; 313 314 u32 exits; 315 u32 io_exits; 316 u32 mmio_exits; 317 u32 signal_exits; 318 u32 irq_window_exits; 319 u32 halt_exits; 320 u32 request_irq_exits; 321 u32 irq_exits; 322}; 323 324struct descriptor_table { 325 u16 limit; 326 unsigned long base; 327} __attribute__((packed)); 328 329struct kvm_arch_ops { 330 int (*cpu_has_kvm_support)(void); /* __init */ 331 int (*disabled_by_bios)(void); /* __init */ 332 void (*hardware_enable)(void *dummy); /* __init */ 333 void (*hardware_disable)(void *dummy); 334 int (*hardware_setup)(void); /* __init */ 335 void (*hardware_unsetup)(void); /* __exit */ 336 337 int (*vcpu_create)(struct kvm_vcpu *vcpu); 338 void (*vcpu_free)(struct kvm_vcpu *vcpu); 339 340 struct kvm_vcpu *(*vcpu_load)(struct kvm_vcpu *vcpu); 341 void (*vcpu_put)(struct kvm_vcpu *vcpu); 342 343 int (*set_guest_debug)(struct kvm_vcpu *vcpu, 344 struct kvm_debug_guest *dbg); 345 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); 346 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); 347 u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); 348 void (*get_segment)(struct kvm_vcpu *vcpu, 349 struct kvm_segment *var, int seg); 350 void (*set_segment)(struct kvm_vcpu *vcpu, 351 struct kvm_segment *var, int seg); 352 void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); 353 void (*decache_cr0_cr4_guest_bits)(struct kvm_vcpu *vcpu); 354 void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); 355 void (*set_cr0_no_modeswitch)(struct kvm_vcpu *vcpu, 356 unsigned long cr0); 357 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 358 void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); 359 void (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); 360 void (*get_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 361 void (*set_idt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 362 void (*get_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 363 void (*set_gdt)(struct kvm_vcpu *vcpu, struct descriptor_table *dt); 364 unsigned long (*get_dr)(struct kvm_vcpu *vcpu, int dr); 365 void (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value, 366 int *exception); 367 void (*cache_regs)(struct kvm_vcpu *vcpu); 368 void (*decache_regs)(struct kvm_vcpu *vcpu); 369 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); 370 void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); 371 372 void (*invlpg)(struct kvm_vcpu *vcpu, gva_t addr); 373 void (*tlb_flush)(struct kvm_vcpu *vcpu); 374 void (*inject_page_fault)(struct kvm_vcpu *vcpu, 375 unsigned long addr, u32 err_code); 376 377 void (*inject_gp)(struct kvm_vcpu *vcpu, unsigned err_code); 378 379 int (*run)(struct kvm_vcpu *vcpu, struct kvm_run *run); 380 int (*vcpu_setup)(struct kvm_vcpu *vcpu); 381 void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); 382}; 383 384extern struct kvm_stat kvm_stat; 385extern struct kvm_arch_ops *kvm_arch_ops; 386 387#define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt) 388#define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt) 389 390int kvm_init_arch(struct kvm_arch_ops *ops, struct module *module); 391void kvm_exit_arch(void); 392 393void kvm_mmu_destroy(struct kvm_vcpu *vcpu); 394int kvm_mmu_create(struct kvm_vcpu *vcpu); 395int kvm_mmu_setup(struct kvm_vcpu *vcpu); 396 397int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); 398void kvm_mmu_slot_remove_write_access(struct kvm_vcpu *vcpu, int slot); 399 400hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa); 401#define HPA_MSB ((sizeof(hpa_t) * 8) - 1) 402#define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) 403static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } 404hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva); 405 406void kvm_emulator_want_group7_invlpg(void); 407 408extern hpa_t bad_page_address; 409 410static inline struct page *gfn_to_page(struct kvm_memory_slot *slot, gfn_t gfn) 411{ 412 return slot->phys_mem[gfn - slot->base_gfn]; 413} 414 415struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); 416void mark_page_dirty(struct kvm *kvm, gfn_t gfn); 417 418enum emulation_result { 419 EMULATE_DONE, /* no further processing */ 420 EMULATE_DO_MMIO, /* kvm_run filled with mmio request */ 421 EMULATE_FAIL, /* can't emulate this instruction */ 422}; 423 424int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run, 425 unsigned long cr2, u16 error_code); 426void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); 427void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); 428void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw, 429 unsigned long *rflags); 430 431unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr); 432void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value, 433 unsigned long *rflags); 434 435struct x86_emulate_ctxt; 436 437int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address); 438int emulate_clts(struct kvm_vcpu *vcpu); 439int emulator_get_dr(struct x86_emulate_ctxt* ctxt, int dr, 440 unsigned long *dest); 441int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, 442 unsigned long value); 443 444void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); 445void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0); 446void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0); 447void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0); 448void lmsw(struct kvm_vcpu *vcpu, unsigned long msw); 449 450int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata); 451int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data); 452 453void fx_init(struct kvm_vcpu *vcpu); 454 455void load_msrs(struct vmx_msr_entry *e, int n); 456void save_msrs(struct vmx_msr_entry *e, int n); 457void kvm_resched(struct kvm_vcpu *vcpu); 458 459int kvm_read_guest(struct kvm_vcpu *vcpu, 460 gva_t addr, 461 unsigned long size, 462 void *dest); 463 464int kvm_write_guest(struct kvm_vcpu *vcpu, 465 gva_t addr, 466 unsigned long size, 467 void *data); 468 469unsigned long segment_base(u16 selector); 470 471void kvm_mmu_pre_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); 472void kvm_mmu_post_write(struct kvm_vcpu *vcpu, gpa_t gpa, int bytes); 473int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); 474void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); 475 476static inline int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, 477 u32 error_code) 478{ 479 if (unlikely(vcpu->kvm->n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES)) 480 kvm_mmu_free_some_pages(vcpu); 481 return vcpu->mmu.page_fault(vcpu, gva, error_code); 482} 483 484static inline struct page *_gfn_to_page(struct kvm *kvm, gfn_t gfn) 485{ 486 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); 487 return (slot) ? slot->phys_mem[gfn - slot->base_gfn] : NULL; 488} 489 490static inline int is_long_mode(struct kvm_vcpu *vcpu) 491{ 492#ifdef CONFIG_X86_64 493 return vcpu->shadow_efer & EFER_LME; 494#else 495 return 0; 496#endif 497} 498 499static inline int is_pae(struct kvm_vcpu *vcpu) 500{ 501 return vcpu->cr4 & CR4_PAE_MASK; 502} 503 504static inline int is_pse(struct kvm_vcpu *vcpu) 505{ 506 return vcpu->cr4 & CR4_PSE_MASK; 507} 508 509static inline int is_paging(struct kvm_vcpu *vcpu) 510{ 511 return vcpu->cr0 & CR0_PG_MASK; 512} 513 514static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot) 515{ 516 return slot - kvm->memslots; 517} 518 519static inline struct kvm_mmu_page *page_header(hpa_t shadow_page) 520{ 521 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT); 522 523 return (struct kvm_mmu_page *)page->private; 524} 525 526static inline u16 read_fs(void) 527{ 528 u16 seg; 529 asm ("mov %%fs, %0" : "=g"(seg)); 530 return seg; 531} 532 533static inline u16 read_gs(void) 534{ 535 u16 seg; 536 asm ("mov %%gs, %0" : "=g"(seg)); 537 return seg; 538} 539 540static inline u16 read_ldt(void) 541{ 542 u16 ldt; 543 asm ("sldt %0" : "=g"(ldt)); 544 return ldt; 545} 546 547static inline void load_fs(u16 sel) 548{ 549 asm ("mov %0, %%fs" : : "rm"(sel)); 550} 551 552static inline void load_gs(u16 sel) 553{ 554 asm ("mov %0, %%gs" : : "rm"(sel)); 555} 556 557#ifndef load_ldt 558static inline void load_ldt(u16 sel) 559{ 560 asm ("lldt %0" : : "g"(sel)); 561} 562#endif 563 564static inline void get_idt(struct descriptor_table *table) 565{ 566 asm ("sidt %0" : "=m"(*table)); 567} 568 569static inline void get_gdt(struct descriptor_table *table) 570{ 571 asm ("sgdt %0" : "=m"(*table)); 572} 573 574static inline unsigned long read_tr_base(void) 575{ 576 u16 tr; 577 asm ("str %0" : "=g"(tr)); 578 return segment_base(tr); 579} 580 581#ifdef CONFIG_X86_64 582static inline unsigned long read_msr(unsigned long msr) 583{ 584 u64 value; 585 586 rdmsrl(msr, value); 587 return value; 588} 589#endif 590 591static inline void fx_save(void *image) 592{ 593 asm ("fxsave (%0)":: "r" (image)); 594} 595 596static inline void fx_restore(void *image) 597{ 598 asm ("fxrstor (%0)":: "r" (image)); 599} 600 601static inline void fpu_init(void) 602{ 603 asm ("finit"); 604} 605 606static inline u32 get_rdx_init_val(void) 607{ 608 return 0x600; /* P6 family */ 609} 610 611#define ASM_VMX_VMCLEAR_RAX ".byte 0x66, 0x0f, 0xc7, 0x30" 612#define ASM_VMX_VMLAUNCH ".byte 0x0f, 0x01, 0xc2" 613#define ASM_VMX_VMRESUME ".byte 0x0f, 0x01, 0xc3" 614#define ASM_VMX_VMPTRLD_RAX ".byte 0x0f, 0xc7, 0x30" 615#define ASM_VMX_VMREAD_RDX_RAX ".byte 0x0f, 0x78, 0xd0" 616#define ASM_VMX_VMWRITE_RAX_RDX ".byte 0x0f, 0x79, 0xd0" 617#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4" 618#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4" 619#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30" 620 621#define MSR_IA32_TIME_STAMP_COUNTER 0x010 622 623#define TSS_IOPB_BASE_OFFSET 0x66 624#define TSS_BASE_SIZE 0x68 625#define TSS_IOPB_SIZE (65536 / 8) 626#define TSS_REDIRECTION_SIZE (256 / 8) 627#define RMODE_TSS_SIZE (TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1) 628 629#endif