x86/paravirt: Remove no longer used paravirt functions

With removal of lguest some of the paravirt functions are no longer
needed:

->read_cr4()
->store_idt()
->set_pmd_at()
->set_pud_at()
->pte_update()

Remove them.

Signed-off-by: Juergen Gross <jgross@suse.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: akataria@vmware.com
Cc: boris.ostrovsky@oracle.com
Cc: chrisw@sous-sol.org
Cc: jeremy@goop.org
Cc: rusty@rustcorp.com.au
Cc: virtualization@lists.linux-foundation.org
Cc: xen-devel@lists.xenproject.org
Link: http://lkml.kernel.org/r/20170904102527.25409-1-jgross@suse.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>

authored by

Juergen Gross and committed by
Ingo Molnar
87930019 c7ad5ad2

+12 -92
+1 -2
arch/x86/include/asm/desc.h
··· 121 #define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt)) 122 123 #define store_gdt(dtr) native_store_gdt(dtr) 124 - #define store_idt(dtr) native_store_idt(dtr) 125 #define store_tr(tr) (tr = native_store_tr()) 126 127 #define load_TLS(t, cpu) native_load_tls(t, cpu) ··· 227 asm volatile("sgdt %0":"=m" (*dtr)); 228 } 229 230 - static inline void native_store_idt(struct desc_ptr *dtr) 231 { 232 asm volatile("sidt %0":"=m" (*dtr)); 233 }
··· 121 #define load_ldt(ldt) asm volatile("lldt %0"::"m" (ldt)) 122 123 #define store_gdt(dtr) native_store_gdt(dtr) 124 #define store_tr(tr) (tr = native_store_tr()) 125 126 #define load_TLS(t, cpu) native_load_tls(t, cpu) ··· 228 asm volatile("sgdt %0":"=m" (*dtr)); 229 } 230 231 + static inline void store_idt(struct desc_ptr *dtr) 232 { 233 asm volatile("sidt %0":"=m" (*dtr)); 234 }
-37
arch/x86/include/asm/paravirt.h
··· 71 PVOP_VCALL1(pv_mmu_ops.write_cr3, x); 72 } 73 74 - static inline unsigned long __read_cr4(void) 75 - { 76 - return PVOP_CALL0(unsigned long, pv_cpu_ops.read_cr4); 77 - } 78 - 79 static inline void __write_cr4(unsigned long x) 80 { 81 PVOP_VCALL1(pv_cpu_ops.write_cr4, x); ··· 223 { 224 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries); 225 } 226 - static inline void store_idt(struct desc_ptr *dtr) 227 - { 228 - PVOP_VCALL1(pv_cpu_ops.store_idt, dtr); 229 - } 230 static inline unsigned long paravirt_store_tr(void) 231 { 232 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr); ··· 356 PVOP_VCALL1(pv_mmu_ops.release_p4d, pfn); 357 } 358 359 - static inline void pte_update(struct mm_struct *mm, unsigned long addr, 360 - pte_t *ptep) 361 - { 362 - PVOP_VCALL3(pv_mmu_ops.pte_update, mm, addr, ptep); 363 - } 364 - 365 static inline pte_t __pte(pteval_t val) 366 { 367 pteval_t ret; ··· 455 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte); 456 else 457 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte); 458 - } 459 - 460 - static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 461 - pmd_t *pmdp, pmd_t pmd) 462 - { 463 - if (sizeof(pmdval_t) > sizeof(long)) 464 - /* 5 arg words */ 465 - pv_mmu_ops.set_pmd_at(mm, addr, pmdp, pmd); 466 - else 467 - PVOP_VCALL4(pv_mmu_ops.set_pmd_at, mm, addr, pmdp, 468 - native_pmd_val(pmd)); 469 - } 470 - 471 - static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, 472 - pud_t *pudp, pud_t pud) 473 - { 474 - if (sizeof(pudval_t) > sizeof(long)) 475 - /* 5 arg words */ 476 - pv_mmu_ops.set_pud_at(mm, addr, pudp, pud); 477 - else 478 - PVOP_VCALL4(pv_mmu_ops.set_pud_at, mm, addr, pudp, 479 - native_pud_val(pud)); 480 } 481 482 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
··· 71 PVOP_VCALL1(pv_mmu_ops.write_cr3, x); 72 } 73 74 static inline void __write_cr4(unsigned long x) 75 { 76 PVOP_VCALL1(pv_cpu_ops.write_cr4, x); ··· 228 { 229 PVOP_VCALL2(pv_cpu_ops.set_ldt, addr, entries); 230 } 231 static inline unsigned long paravirt_store_tr(void) 232 { 233 return PVOP_CALL0(unsigned long, pv_cpu_ops.store_tr); ··· 365 PVOP_VCALL1(pv_mmu_ops.release_p4d, pfn); 366 } 367 368 static inline pte_t __pte(pteval_t val) 369 { 370 pteval_t ret; ··· 470 pv_mmu_ops.set_pte_at(mm, addr, ptep, pte); 471 else 472 PVOP_VCALL4(pv_mmu_ops.set_pte_at, mm, addr, ptep, pte.pte); 473 } 474 475 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
-9
arch/x86/include/asm/paravirt_types.h
··· 107 unsigned long (*read_cr0)(void); 108 void (*write_cr0)(unsigned long); 109 110 - unsigned long (*read_cr4)(void); 111 void (*write_cr4)(unsigned long); 112 113 #ifdef CONFIG_X86_64 ··· 118 void (*load_tr_desc)(void); 119 void (*load_gdt)(const struct desc_ptr *); 120 void (*load_idt)(const struct desc_ptr *); 121 - /* store_gdt has been removed. */ 122 - void (*store_idt)(struct desc_ptr *); 123 void (*set_ldt)(const void *desc, unsigned entries); 124 unsigned long (*store_tr)(void); 125 void (*load_tls)(struct thread_struct *t, unsigned int cpu); ··· 242 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, 243 pte_t *ptep, pte_t pteval); 244 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); 245 - void (*set_pmd_at)(struct mm_struct *mm, unsigned long addr, 246 - pmd_t *pmdp, pmd_t pmdval); 247 - void (*set_pud_at)(struct mm_struct *mm, unsigned long addr, 248 - pud_t *pudp, pud_t pudval); 249 - void (*pte_update)(struct mm_struct *mm, unsigned long addr, 250 - pte_t *ptep); 251 252 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr, 253 pte_t *ptep);
··· 107 unsigned long (*read_cr0)(void); 108 void (*write_cr0)(unsigned long); 109 110 void (*write_cr4)(unsigned long); 111 112 #ifdef CONFIG_X86_64 ··· 119 void (*load_tr_desc)(void); 120 void (*load_gdt)(const struct desc_ptr *); 121 void (*load_idt)(const struct desc_ptr *); 122 void (*set_ldt)(const void *desc, unsigned entries); 123 unsigned long (*store_tr)(void); 124 void (*load_tls)(struct thread_struct *t, unsigned int cpu); ··· 245 void (*set_pte_at)(struct mm_struct *mm, unsigned long addr, 246 pte_t *ptep, pte_t pteval); 247 void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval); 248 249 pte_t (*ptep_modify_prot_start)(struct mm_struct *mm, unsigned long addr, 250 pte_t *ptep);
+4 -23
arch/x86/include/asm/pgtable.h
··· 55 #else /* !CONFIG_PARAVIRT */ 56 #define set_pte(ptep, pte) native_set_pte(ptep, pte) 57 #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) 58 - #define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd) 59 - #define set_pud_at(mm, addr, pudp, pud) native_set_pud_at(mm, addr, pudp, pud) 60 61 #define set_pte_atomic(ptep, pte) \ 62 native_set_pte_atomic(ptep, pte) ··· 84 85 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) 86 #define pmd_clear(pmd) native_pmd_clear(pmd) 87 - 88 - #define pte_update(mm, addr, ptep) do { } while (0) 89 90 #define pgd_val(x) native_pgd_val(x) 91 #define __pgd(x) native_make_pgd(x) ··· 975 native_set_pte(ptep, pte); 976 } 977 978 - static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr, 979 - pmd_t *pmdp , pmd_t pmd) 980 { 981 native_set_pmd(pmdp, pmd); 982 } 983 984 - static inline void native_set_pud_at(struct mm_struct *mm, unsigned long addr, 985 - pud_t *pudp, pud_t pud) 986 { 987 native_set_pud(pudp, pud); 988 } 989 - 990 - #ifndef CONFIG_PARAVIRT 991 - /* 992 - * Rules for using pte_update - it must be called after any PTE update which 993 - * has not been done using the set_pte / clear_pte interfaces. It is used by 994 - * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE 995 - * updates should either be sets, clears, or set_pte_atomic for P->P 996 - * transitions, which means this hook should only be called for user PTEs. 997 - * This hook implies a P->P protection or access change has taken place, which 998 - * requires a subsequent TLB flush. 999 - */ 1000 - #define pte_update(mm, addr, ptep) do { } while (0) 1001 - #endif 1002 1003 /* 1004 * We only update the dirty/accessed state if we set ··· 1014 pte_t *ptep) 1015 { 1016 pte_t pte = native_ptep_get_and_clear(ptep); 1017 - pte_update(mm, addr, ptep); 1018 return pte; 1019 } 1020 ··· 1040 unsigned long addr, pte_t *ptep) 1041 { 1042 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); 1043 - pte_update(mm, addr, ptep); 1044 } 1045 1046 #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
··· 55 #else /* !CONFIG_PARAVIRT */ 56 #define set_pte(ptep, pte) native_set_pte(ptep, pte) 57 #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) 58 59 #define set_pte_atomic(ptep, pte) \ 60 native_set_pte_atomic(ptep, pte) ··· 86 87 #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) 88 #define pmd_clear(pmd) native_pmd_clear(pmd) 89 90 #define pgd_val(x) native_pgd_val(x) 91 #define __pgd(x) native_make_pgd(x) ··· 979 native_set_pte(ptep, pte); 980 } 981 982 + static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 983 + pmd_t *pmdp, pmd_t pmd) 984 { 985 native_set_pmd(pmdp, pmd); 986 } 987 988 + static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, 989 + pud_t *pudp, pud_t pud) 990 { 991 native_set_pud(pudp, pud); 992 } 993 994 /* 995 * We only update the dirty/accessed state if we set ··· 1031 pte_t *ptep) 1032 { 1033 pte_t pte = native_ptep_get_and_clear(ptep); 1034 return pte; 1035 } 1036 ··· 1058 unsigned long addr, pte_t *ptep) 1059 { 1060 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); 1061 } 1062 1063 #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
+5 -5
arch/x86/include/asm/special_insns.h
··· 135 136 extern asmlinkage void native_load_gs_index(unsigned); 137 138 #ifdef CONFIG_PARAVIRT 139 #include <asm/paravirt.h> 140 #else ··· 176 static inline void write_cr3(unsigned long x) 177 { 178 native_write_cr3(x); 179 - } 180 - 181 - static inline unsigned long __read_cr4(void) 182 - { 183 - return native_read_cr4(); 184 } 185 186 static inline void __write_cr4(unsigned long x)
··· 135 136 extern asmlinkage void native_load_gs_index(unsigned); 137 138 + static inline unsigned long __read_cr4(void) 139 + { 140 + return native_read_cr4(); 141 + } 142 + 143 #ifdef CONFIG_PARAVIRT 144 #include <asm/paravirt.h> 145 #else ··· 171 static inline void write_cr3(unsigned long x) 172 { 173 native_write_cr3(x); 174 } 175 176 static inline void __write_cr4(unsigned long x)
-5
arch/x86/kernel/paravirt.c
··· 327 .set_debugreg = native_set_debugreg, 328 .read_cr0 = native_read_cr0, 329 .write_cr0 = native_write_cr0, 330 - .read_cr4 = native_read_cr4, 331 .write_cr4 = native_write_cr4, 332 #ifdef CONFIG_X86_64 333 .read_cr8 = native_read_cr8, ··· 342 .set_ldt = native_set_ldt, 343 .load_gdt = native_load_gdt, 344 .load_idt = native_load_idt, 345 - .store_idt = native_store_idt, 346 .store_tr = native_store_tr, 347 .load_tls = native_load_tls, 348 #ifdef CONFIG_X86_64 ··· 409 .set_pte = native_set_pte, 410 .set_pte_at = native_set_pte_at, 411 .set_pmd = native_set_pmd, 412 - .set_pmd_at = native_set_pmd_at, 413 - .pte_update = paravirt_nop, 414 415 .ptep_modify_prot_start = __ptep_modify_prot_start, 416 .ptep_modify_prot_commit = __ptep_modify_prot_commit, ··· 420 .pmd_clear = native_pmd_clear, 421 #endif 422 .set_pud = native_set_pud, 423 - .set_pud_at = native_set_pud_at, 424 425 .pmd_val = PTE_IDENT, 426 .make_pmd = PTE_IDENT,
··· 327 .set_debugreg = native_set_debugreg, 328 .read_cr0 = native_read_cr0, 329 .write_cr0 = native_write_cr0, 330 .write_cr4 = native_write_cr4, 331 #ifdef CONFIG_X86_64 332 .read_cr8 = native_read_cr8, ··· 343 .set_ldt = native_set_ldt, 344 .load_gdt = native_load_gdt, 345 .load_idt = native_load_idt, 346 .store_tr = native_store_tr, 347 .load_tls = native_load_tls, 348 #ifdef CONFIG_X86_64 ··· 411 .set_pte = native_set_pte, 412 .set_pte_at = native_set_pte_at, 413 .set_pmd = native_set_pmd, 414 415 .ptep_modify_prot_start = __ptep_modify_prot_start, 416 .ptep_modify_prot_commit = __ptep_modify_prot_commit, ··· 424 .pmd_clear = native_pmd_clear, 425 #endif 426 .set_pud = native_set_pud, 427 428 .pmd_val = PTE_IDENT, 429 .make_pmd = PTE_IDENT,
+1 -1
arch/x86/kvm/vmx.c
··· 5192 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 5193 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ 5194 5195 - native_store_idt(&dt); 5196 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ 5197 vmx->host_idt_base = dt.address; 5198
··· 5192 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 5193 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ 5194 5195 + store_idt(&dt); 5196 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ 5197 vmx->host_idt_base = dt.address; 5198
+1 -6
arch/x86/mm/pgtable.c
··· 426 { 427 int changed = !pte_same(*ptep, entry); 428 429 - if (changed && dirty) { 430 *ptep = entry; 431 - pte_update(vma->vm_mm, address, ptep); 432 - } 433 434 return changed; 435 } ··· 483 if (pte_young(*ptep)) 484 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, 485 (unsigned long *) &ptep->pte); 486 - 487 - if (ret) 488 - pte_update(vma->vm_mm, addr, ptep); 489 490 return ret; 491 }
··· 426 { 427 int changed = !pte_same(*ptep, entry); 428 429 + if (changed && dirty) 430 *ptep = entry; 431 432 return changed; 433 } ··· 485 if (pte_young(*ptep)) 486 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, 487 (unsigned long *) &ptep->pte); 488 489 return ret; 490 }
-2
arch/x86/xen/enlighten_pv.c
··· 1038 .read_cr0 = xen_read_cr0, 1039 .write_cr0 = xen_write_cr0, 1040 1041 - .read_cr4 = native_read_cr4, 1042 .write_cr4 = xen_write_cr4, 1043 1044 #ifdef CONFIG_X86_64 ··· 1072 .alloc_ldt = xen_alloc_ldt, 1073 .free_ldt = xen_free_ldt, 1074 1075 - .store_idt = native_store_idt, 1076 .store_tr = xen_store_tr, 1077 1078 .write_ldt_entry = xen_write_ldt_entry,
··· 1038 .read_cr0 = xen_read_cr0, 1039 .write_cr0 = xen_write_cr0, 1040 1041 .write_cr4 = xen_write_cr4, 1042 1043 #ifdef CONFIG_X86_64 ··· 1073 .alloc_ldt = xen_alloc_ldt, 1074 .free_ldt = xen_free_ldt, 1075 1076 .store_tr = xen_store_tr, 1077 1078 .write_ldt_entry = xen_write_ldt_entry,
-2
arch/x86/xen/mmu_pv.c
··· 2409 .flush_tlb_single = xen_flush_tlb_single, 2410 .flush_tlb_others = xen_flush_tlb_others, 2411 2412 - .pte_update = paravirt_nop, 2413 - 2414 .pgd_alloc = xen_pgd_alloc, 2415 .pgd_free = xen_pgd_free, 2416
··· 2409 .flush_tlb_single = xen_flush_tlb_single, 2410 .flush_tlb_others = xen_flush_tlb_others, 2411 2412 .pgd_alloc = xen_pgd_alloc, 2413 .pgd_free = xen_pgd_free, 2414