Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
"The main changes are the PCID fixes from Andy, but there's also two
hyperv fixes and two paravirt updates"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/hyper-v: Remove duplicated HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED definition
x86/hyper-V: Allocate the IDT entry early in boot
paravirt: Switch maintainer
x86/paravirt: Remove no longer used paravirt functions
x86/mm/64: Initialize CR4.PCIDE early
x86/hibernate/64: Mask off CR3's PCID bits in the saved CR3
x86/mm: Get rid of VM_BUG_ON in switch_tlb_irqs_off()

+107 -150
+2 -2
MAINTAINERS
··· 10135 10135 F: Documentation/parport*.txt 10136 10136 10137 10137 PARAVIRT_OPS INTERFACE 10138 - M: Jeremy Fitzhardinge <jeremy@goop.org> 10138 + M: Juergen Gross <jgross@suse.com> 10139 10139 M: Chris Wright <chrisw@sous-sol.org> 10140 10140 M: Alok Kataria <akataria@vmware.com> 10141 10141 M: Rusty Russell <rusty@rustcorp.com.au> ··· 10143 10143 S: Supported 10144 10144 F: Documentation/virtual/paravirt_ops.txt 10145 10145 F: arch/*/kernel/paravirt* 10146 - F: arch/*/include/asm/paravirt.h 10146 + F: arch/*/include/asm/paravirt*.h 10147 10147 F: include/linux/hypervisor.h 10148 10148 10149 10149 PARIDE DRIVERS FOR PARALLEL PORT IDE DEVICES
+1 -2
arch/x86/include/asm/desc.h
··· 121 121 #define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt)) 122 122 123 123 #define store_gdt(dtr) native_store_gdt(dtr) 124 - #define store_idt(dtr) native_store_idt(dtr) 125 124 #define store_tr(tr) (tr = native_store_tr()) 126 125 127 126 #define load_TLS(t, cpu) native_load_tls(t, cpu) ··· 227 228 asm volatile("sgdt %0":"=m" (*dtr)); 228 229 } 229 230 230 - static inline void native_store_idt(struct desc_ptr *dtr) 231 + static inline void store_idt(struct desc_ptr *dtr) 231 232 { 232 233 asm volatile("sidt %0":"=m" (*dtr)); 233 234 }
-37
arch/x86/include/asm/paravirt.h
··· 71 71 PVOP_VCALL1(pv_mmu_ops.write_cr3, x); 72 72 } 73 73 74 - static inline unsigned long __read_cr4(void) 75 - { 76 - return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4); 77 - } 78 - 79 74 static inline void __write_cr4(unsigned long x) 80 75 { 81 76 PVOP_VCALL1(pv_cpu_ops.write_cr4, x); ··· 223 228 { 224 229 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries); 225 230 } 226 - static inline void store_idt(struct desc_ptr *dtr) 227 - { 228 - PVOP_VCALL1(pv_cpu_ops.store_idt, dtr); 229 - } 230 231 static inline unsigned long paravirt_store_tr(void) 231 232 { 232 233 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr); ··· 356 365 PVOP_VCALL1(pv_mmu_ops.release_p4d, pfn); 357 366 } 358 367 359 - static inline void pte_update(struct mm_struct *mm, unsigned long addr, 360 - pte_t *ptep) 361 - { 362 - PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep); 363 - } 364 - 365 368 static inline pte_t __pte(pteval_t val) 366 369 { 367 370 pteval_t ret; ··· 455 470 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte); 456 471 else 457 472 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte); 458 - } 459 - 460 - static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 461 - pmd_t *pmdp, pmd_t pmd) 462 - { 463 - if (sizeof(pmdval_t) > sizeof(long)) 464 - /* 5 arg words */ 465 - pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd); 466 - else 467 - PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp, 468 - native_pmd_val(pmd)); 469 - } 470 - 471 - static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, 472 - pud_t *pudp, pud_t pud) 473 - { 474 - if (sizeof(pudval_t) > sizeof(long)) 475 - /* 5 arg words */ 476 - pv_mmu_ops.set_pud_at(mm, addr, pudp, pud); 477 - else 478 - PVOP_VCALL4(pv_mmu_ops.set_pud_at, mm, addr, pudp, 479 - native_pud_val(pud)); 480 473 } 481 474 482 475 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
-9
arch/x86/include/asm/paravirt_types.h
··· 107 107 unsigned long (*read_cr0)(void); 108 108 void (*write_cr0)(unsigned long); 109 109 110 - unsigned long (*read_cr4)(void); 111 110 void (*write_cr4)(unsigned long); 112 111 113 112 #ifdef CONFIG_X86_64 ··· 118 119 void (*load_tr_desc)(void); 119 120 void (*load_gdt)(const struct desc_ptr *); 120 121 void (*load_idt)(const struct desc_ptr *); 121 - /* store_gdt has been removed. */ 122 - void (*store_idt)(struct desc_ptr *); 123 122 void (*set_ldt)(const void *desc, unsigned entries); 124 123 unsigned long (*store_tr)(void); 125 124 void (*load_tls)(struct thread_struct *t, unsigned int cpu); ··· 242 245 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, 243 246 pte_t *ptep, pte_t pteval); 244 247 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); 245 - void (*set_pmd_at)(struct mm_struct *mm, unsigned long addr, 246 - pmd_t *pmdp, pmd_t pmdval); 247 - void (*set_pud_at)(struct mm_struct *mm, unsigned long addr, 248 - pud_t *pudp, pud_t pudval); 249 - void (*pte_update)(struct mm_struct *mm, unsigned long addr, 250 - pte_t *ptep); 251 248 252 249 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr, 253 250 pte_t *ptep);
+4 -23
arch/x86/include/asm/pgtable.h
··· 55 55 #else /* !CONFIG_PARAVIRT */ 56 56 #define set_pte(ptep, pte) native_set_pte(ptep, pte) 57 57 #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) 58 - #define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd) 59 - #define set_pud_at(mm, addr, pudp, pud) native_set_pud_at(mm, addr, pudp, pud) 60 58 61 59 #define set_pte_atomic(ptep, pte) \ 62 60 native_set_pte_atomic(ptep, pte) ··· 84 86 85 87 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) 86 88 #define pmd_clear(pmd) native_pmd_clear(pmd) 87 - 88 - #define pte_update(mm, addr, ptep) do { } while (0) 89 89 90 90 #define pgd_val(x) native_pgd_val(x) 91 91 #define __pgd(x) native_make_pgd(x) ··· 975 979 native_set_pte(ptep, pte); 976 980 } 977 981 978 - static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr, 979 - pmd_t *pmdp , pmd_t pmd) 982 + static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 983 + pmd_t *pmdp, pmd_t pmd) 980 984 { 981 985 native_set_pmd(pmdp, pmd); 982 986 } 983 987 984 - static inline void native_set_pud_at(struct mm_struct *mm, unsigned long addr, 985 - pud_t *pudp, pud_t pud) 988 + static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, 989 + pud_t *pudp, pud_t pud) 986 990 { 987 991 native_set_pud(pudp, pud); 988 992 } 989 - 990 - #ifndef CONFIG_PARAVIRT 991 - /* 992 - * Rules for using pte_update - it must be called after any PTE update which 993 - * has not been done using the set_pte / clear_pte interfaces. It is used by 994 - * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE 995 - * updates should either be sets, clears, or set_pte_atomic for P->P 996 - * transitions, which means this hook should only be called for user PTEs. 997 - * This hook implies a P->P protection or access change has taken place, which 998 - * requires a subsequent TLB flush. 999 - */ 1000 - #define pte_update(mm, addr, ptep) do { } while (0) 1001 - #endif 1002 993 1003 994 /* 1004 995 * We only update the dirty/accessed state if we set ··· 1014 1031 pte_t *ptep) 1015 1032 { 1016 1033 pte_t pte = native_ptep_get_and_clear(ptep); 1017 - pte_update(mm, addr, ptep); 1018 1034 return pte; 1019 1035 } 1020 1036 ··· 1040 1058 unsigned long addr, pte_t *ptep) 1041 1059 { 1042 1060 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); 1043 - pte_update(mm, addr, ptep); 1044 1061 } 1045 1062 1046 1063 #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
+5 -5
arch/x86/include/asm/special_insns.h
··· 135 135 136 136 extern asmlinkage void native_load_gs_index(unsigned); 137 137 138 + static inline unsigned long __read_cr4(void) 139 + { 140 + return native_read_cr4(); 141 + } 142 + 138 143 #ifdef CONFIG_PARAVIRT 139 144 #include <asm/paravirt.h> 140 145 #else ··· 176 171 static inline void write_cr3(unsigned long x) 177 172 { 178 173 native_write_cr3(x); 179 - } 180 - 181 - static inline unsigned long __read_cr4(void) 182 - { 183 - return native_read_cr4(); 184 174 } 185 175 186 176 static inline void __write_cr4(unsigned long x)
-6
arch/x86/include/uapi/asm/hyperv.h
··· 153 153 #define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED (1 << 11) 154 154 155 155 /* 156 - * HV_VP_SET available 157 - */ 158 - #define HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED (1 << 11) 159 - 160 - 161 - /* 162 156 * Crash notification flag. 163 157 */ 164 158 #define HV_CRASH_CTL_CRASH_NOTIFY (1ULL << 63)
+7 -42
arch/x86/kernel/cpu/common.c
··· 169 169 __setup("nompx", x86_mpx_setup); 170 170 171 171 #ifdef CONFIG_X86_64 172 - static int __init x86_pcid_setup(char *s) 172 + static int __init x86_nopcid_setup(char *s) 173 173 { 174 - /* require an exact match without trailing characters */ 175 - if (strlen(s)) 176 - return 0; 174 + /* nopcid doesn't accept parameters */ 175 + if (s) 176 + return -EINVAL; 177 177 178 178 /* do not emit a message if the feature is not present */ 179 179 if (!boot_cpu_has(X86_FEATURE_PCID)) 180 - return 1; 180 + return 0; 181 181 182 182 setup_clear_cpu_cap(X86_FEATURE_PCID); 183 183 pr_info("nopcid: PCID feature disabled\n"); 184 - return 1; 184 + return 0; 185 185 } 186 - __setup("nopcid", x86_pcid_setup); 186 + early_param("nopcid", x86_nopcid_setup); 187 187 #endif 188 188 189 189 static int __init x86_noinvpcid_setup(char *s) ··· 326 326 #else 327 327 cr4_clear_bits(X86_CR4_SMAP); 328 328 #endif 329 - } 330 - } 331 - 332 - static void setup_pcid(struct cpuinfo_x86 *c) 333 - { 334 - if (cpu_has(c, X86_FEATURE_PCID)) { 335 - if (cpu_has(c, X86_FEATURE_PGE)) { 336 - /* 337 - * We'd like to use cr4_set_bits_and_update_boot(), 338 - * but we can't. CR4.PCIDE is special and can only 339 - * be set in long mode, and the early CPU init code 340 - * doesn't know this and would try to restore CR4.PCIDE 341 - * prior to entering long mode. 342 - * 343 - * Instead, we rely on the fact that hotplug, resume, 344 - * etc all fully restore CR4 before they write anything 345 - * that could have nonzero PCID bits to CR3. CR4.PCIDE 346 - * has no effect on the page tables themselves, so we 347 - * don't need it to be restored early. 348 - */ 349 - cr4_set_bits(X86_CR4_PCIDE); 350 - } else { 351 - /* 352 - * flush_tlb_all(), as currently implemented, won't 353 - * work if PCID is on but PGE is not. Since that 354 - * combination doesn't exist on real hardware, there's 355 - * no reason to try to fully support it, but it's 356 - * polite to avoid corrupting data if we're on 357 - * an improperly configured VM. 358 - */ 359 - clear_cpu_cap(c, X86_FEATURE_PCID); 360 - } 361 329 } 362 330 } 363 331 ··· 1142 1174 /* Set up SMEP/SMAP */ 1143 1175 setup_smep(c); 1144 1176 setup_smap(c); 1145 - 1146 - /* Set up PCID */ 1147 - setup_pcid(c); 1148 1177 1149 1178 /* 1150 1179 * The vendor-specific functions might have changed features.
+2 -2
arch/x86/kernel/cpu/mshyperv.c
··· 59 59 void hv_setup_vmbus_irq(void (*handler)(void)) 60 60 { 61 61 vmbus_handler = handler; 62 - /* Setup the IDT for hypervisor callback */ 63 - alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector); 64 62 } 65 63 66 64 void hv_remove_vmbus_irq(void) ··· 249 251 */ 250 252 x86_platform.apic_post_init = hyperv_init; 251 253 hyperv_setup_mmu_ops(); 254 + /* Setup the IDT for hypervisor callback */ 255 + alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector); 252 256 #endif 253 257 } 254 258
-5
arch/x86/kernel/paravirt.c
··· 327 327 .set_debugreg = native_set_debugreg, 328 328 .read_cr0 = native_read_cr0, 329 329 .write_cr0 = native_write_cr0, 330 - .read_cr4 = native_read_cr4, 331 330 .write_cr4 = native_write_cr4, 332 331 #ifdef CONFIG_X86_64 333 332 .read_cr8 = native_read_cr8, ··· 342 343 .set_ldt = native_set_ldt, 343 344 .load_gdt = native_load_gdt, 344 345 .load_idt = native_load_idt, 345 - .store_idt = native_store_idt, 346 346 .store_tr = native_store_tr, 347 347 .load_tls = native_load_tls, 348 348 #ifdef CONFIG_X86_64 ··· 409 411 .set_pte = native_set_pte, 410 412 .set_pte_at = native_set_pte_at, 411 413 .set_pmd = native_set_pmd, 412 - .set_pmd_at = native_set_pmd_at, 413 - .pte_update = paravirt_nop, 414 414 415 415 .ptep_modify_prot_start = __ptep_modify_prot_start, 416 416 .ptep_modify_prot_commit = __ptep_modify_prot_commit, ··· 420 424 .pmd_clear = native_pmd_clear, 421 425 #endif 422 426 .set_pud = native_set_pud, 423 - .set_pud_at = native_set_pud_at, 424 427 425 428 .pmd_val = PTE_IDENT, 426 429 .make_pmd = PTE_IDENT,
+4 -1
arch/x86/kernel/setup.c
··· 1178 1178 * with the current CR4 value. This may not be necessary, but 1179 1179 * auditing all the early-boot CR4 manipulation would be needed to 1180 1180 * rule it out. 1181 + * 1182 + * Mask off features that don't work outside long mode (just 1183 + * PCIDE for now). 1181 1184 */ 1182 - mmu_cr4_features = __read_cr4(); 1185 + mmu_cr4_features = __read_cr4() & ~X86_CR4_PCIDE; 1183 1186 1184 1187 memblock_set_current_limit(get_max_mapped()); 1185 1188
+5 -3
arch/x86/kernel/smpboot.c
··· 226 226 static void notrace start_secondary(void *unused) 227 227 { 228 228 /* 229 - * Don't put *anything* before cpu_init(), SMP booting is too 230 - * fragile that we want to limit the things done here to the 231 - * most necessary things. 229 + * Don't put *anything* except direct CPU state initialization 230 + * before cpu_init(), SMP booting is too fragile that we want to 231 + * limit the things done here to the most necessary things. 232 232 */ 233 + if (boot_cpu_has(X86_FEATURE_PCID)) 234 + __write_cr4(__read_cr4() | X86_CR4_PCIDE); 233 235 cpu_init(); 234 236 x86_cpuinit.early_percpu_clock_init(); 235 237 preempt_disable();
+1 -1
arch/x86/kvm/vmx.c
··· 5192 5192 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 5193 5193 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ 5194 5194 5195 - native_store_idt(&dt); 5195 + store_idt(&dt); 5196 5196 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ 5197 5197 vmx->host_idt_base = dt.address; 5198 5198
+34
arch/x86/mm/init.c
··· 19 19 #include <asm/microcode.h> 20 20 #include <asm/kaslr.h> 21 21 #include <asm/hypervisor.h> 22 + #include <asm/cpufeature.h> 22 23 23 24 /* 24 25 * We need to define the tracepoints somewhere, and tlb.c ··· 192 191 } else { 193 192 direct_gbpages = 0; 194 193 } 194 + } 195 + 196 + static void setup_pcid(void) 197 + { 198 + #ifdef CONFIG_X86_64 199 + if (boot_cpu_has(X86_FEATURE_PCID)) { 200 + if (boot_cpu_has(X86_FEATURE_PGE)) { 201 + /* 202 + * This can't be cr4_set_bits_and_update_boot() -- 203 + * the trampoline code can't handle CR4.PCIDE and 204 + * it wouldn't do any good anyway. Despite the name, 205 + * cr4_set_bits_and_update_boot() doesn't actually 206 + * cause the bits in question to remain set all the 207 + * way through the secondary boot asm. 208 + * 209 + * Instead, we brute-force it and set CR4.PCIDE 210 + * manually in start_secondary(). 211 + */ 212 + cr4_set_bits(X86_CR4_PCIDE); 213 + } else { 214 + /* 215 + * flush_tlb_all(), as currently implemented, won't 216 + * work if PCID is on but PGE is not. Since that 217 + * combination doesn't exist on real hardware, there's 218 + * no reason to try to fully support it, but it's 219 + * polite to avoid corrupting data if we're on 220 + * an improperly configured VM. 221 + */ 222 + setup_clear_cpu_cap(X86_FEATURE_PCID); 223 + } 224 + } 225 + #endif 195 226 } 196 227 197 228 #ifdef CONFIG_X86_32 ··· 625 592 unsigned long end; 626 593 627 594 probe_page_size_mask(); 595 + setup_pcid(); 628 596 629 597 #ifdef CONFIG_X86_64 630 598 end = max_pfn << PAGE_SHIFT;
+1 -6
arch/x86/mm/pgtable.c
··· 426 426 { 427 427 int changed = !pte_same(*ptep, entry); 428 428 429 - if (changed && dirty) { 429 + if (changed && dirty) 430 430 *ptep = entry; 431 - pte_update(vma->vm_mm, address, ptep); 432 - } 433 431 434 432 return changed; 435 433 } ··· 483 485 if (pte_young(*ptep)) 484 486 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, 485 487 (unsigned long *) &ptep->pte); 486 - 487 - if (ret) 488 - pte_update(vma->vm_mm, addr, ptep); 489 488 490 489 return ret; 491 490 }
+21 -1
arch/x86/mm/tlb.c
··· 121 121 * hypothetical buggy code that directly switches to swapper_pg_dir 122 122 * without going through leave_mm() / switch_mm_irqs_off() or that 123 123 * does something like write_cr3(read_cr3_pa()). 124 + * 125 + * Only do this check if CONFIG_DEBUG_VM=y because __read_cr3() 126 + * isn't free. 124 127 */ 125 - VM_BUG_ON(__read_cr3() != (__sme_pa(real_prev->pgd) | prev_asid)); 128 + #ifdef CONFIG_DEBUG_VM 129 + if (WARN_ON_ONCE(__read_cr3() != 130 + (__sme_pa(real_prev->pgd) | prev_asid))) { 131 + /* 132 + * If we were to BUG here, we'd be very likely to kill 133 + * the system so hard that we don't see the call trace. 134 + * Try to recover instead by ignoring the error and doing 135 + * a global flush to minimize the chance of corruption. 136 + * 137 + * (This is far from being a fully correct recovery. 138 + * Architecturally, the CPU could prefetch something 139 + * back into an incorrect ASID slot and leave it there 140 + * to cause trouble down the road. It's better than 141 + * nothing, though.) 142 + */ 143 + __flush_tlb_all(); 144 + } 145 + #endif 126 146 127 147 if (real_prev == next) { 128 148 VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) !=
+20 -1
arch/x86/power/hibernate_64.c
··· 295 295 return -EOVERFLOW; 296 296 rdr->jump_address = (unsigned long)restore_registers; 297 297 rdr->jump_address_phys = __pa_symbol(restore_registers); 298 - rdr->cr3 = restore_cr3; 298 + 299 + /* 300 + * The restore code fixes up CR3 and CR4 in the following sequence: 301 + * 302 + * [in hibernation asm] 303 + * 1. CR3 <= temporary page tables 304 + * 2. CR4 <= mmu_cr4_features (from the kernel that restores us) 305 + * 3. CR3 <= rdr->cr3 306 + * 4. CR4 <= mmu_cr4_features (from us, i.e. the image kernel) 307 + * [in restore_processor_state()] 308 + * 5. CR4 <= saved CR4 309 + * 6. CR3 <= saved CR3 310 + * 311 + * Our mmu_cr4_features has CR4.PCIDE=0, and toggling 312 + * CR4.PCIDE while CR3's PCID bits are nonzero is illegal, so 313 + * rdr->cr3 needs to point to valid page tables but must not 314 + * have any of the PCID bits set. 315 + */ 316 + rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK; 317 + 299 318 rdr->magic = RESTORE_MAGIC; 300 319 301 320 hibernation_e820_save(rdr->e820_digest);
-2
arch/x86/xen/enlighten_pv.c
··· 1038 1038 .read_cr0 = xen_read_cr0, 1039 1039 .write_cr0 = xen_write_cr0, 1040 1040 1041 - .read_cr4 = native_read_cr4, 1042 1041 .write_cr4 = xen_write_cr4, 1043 1042 1044 1043 #ifdef CONFIG_X86_64 ··· 1072 1073 .alloc_ldt = xen_alloc_ldt, 1073 1074 .free_ldt = xen_free_ldt, 1074 1075 1075 - .store_idt = native_store_idt, 1076 1076 .store_tr = xen_store_tr, 1077 1077 1078 1078 .write_ldt_entry = xen_write_ldt_entry,
-2
arch/x86/xen/mmu_pv.c
··· 2409 2409 .flush_tlb_single = xen_flush_tlb_single, 2410 2410 .flush_tlb_others = xen_flush_tlb_others, 2411 2411 2412 - .pte_update = paravirt_nop, 2413 - 2414 2412 .pgd_alloc = xen_pgd_alloc, 2415 2413 .pgd_free = xen_pgd_free, 2416 2414