Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'dat-enhancement-1'

Heiko Carstens says:

====================

Add the Dat-Enhancement facility 1 to the list of facilities which are
required to start the kernel. The facility provides the CSPG and IDTE
instructions. In particular the CSPG instruction can be used to replace a
valid page table entry with a different page table entry, which also
differs in the page frame real address.

Without the CSPG instruction it is possible to use the CSP instruction to
change valid page table entries, however it only allows to change the lower
or higher 32 bits of such entries, which means it cannot be used to change
the page frame real address of valid page table entries.

Given that there is code around (e.g. HugeTLB vmemmap optimization) which
requires to change valid page table entries of the kernel mapping, without
the detour over an invalid page table entry, make the CSPG instruction
unconditionally available.

The Dat-Enhancement facility 1 is available since z990, which is older than
the currently supported minimum architecture (z10). Therefore adding this
the architecture level set shouldn't cause any problems.

====================

Signed-off-by: Heiko Carstens <hca@linux.ibm.com>

+16 -62
-1
arch/s390/include/asm/cpufeature.h
··· 27 27 #define cpu_has_edat1() test_facility(8) 28 28 #define cpu_has_edat2() test_facility(78) 29 29 #define cpu_has_gs() test_facility(133) 30 - #define cpu_has_idte() test_facility(3) 31 30 #define cpu_has_nx() test_facility(130) 32 31 #define cpu_has_rdp() test_facility(194) 33 32 #define cpu_has_seq_insn() test_facility(85)
+3 -16
arch/s390/include/asm/pgtable.h
··· 648 648 return 0; 649 649 } 650 650 651 - static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new) 652 - { 653 - union register_pair r1 = { .even = old, .odd = new, }; 654 - unsigned long address = (unsigned long)ptr | 1; 655 - 656 - asm volatile( 657 - " csp %[r1],%[address]" 658 - : [r1] "+&d" (r1.pair), "+m" (*ptr) 659 - : [address] "d" (address) 660 - : "cc"); 661 - } 662 - 663 651 /** 664 652 * cspg() - Compare and Swap and Purge (CSPG) 665 653 * @ptr: Pointer to the value to be exchanged ··· 1390 1402 int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep); 1391 1403 int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc, 1392 1404 unsigned long *oldpte, unsigned long *oldpgste); 1393 - void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr); 1394 1405 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr); 1395 1406 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr); 1396 1407 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr); ··· 1679 1692 1680 1693 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ 1681 1694 1682 - static inline void __pmdp_csp(pmd_t *pmdp) 1695 + static inline void __pmdp_cspg(pmd_t *pmdp) 1683 1696 { 1684 - csp((unsigned int *)pmdp + 1, pmd_val(*pmdp), 1685 - pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); 1697 + cspg((unsigned long *)pmdp, pmd_val(*pmdp), 1698 + pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID); 1686 1699 } 1687 1700 1688 1701 #define IDTE_GLOBAL 0
+4 -7
arch/s390/include/asm/tlbflush.h
··· 35 35 */ 36 36 static inline void __tlb_flush_global(void) 37 37 { 38 - unsigned int dummy = 0; 38 + unsigned long dummy = 0; 39 39 40 - csp(&dummy, 0, 0); 40 + cspg(&dummy, 0, 0); 41 41 } 42 42 43 43 /* ··· 54 54 cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask); 55 55 barrier(); 56 56 gmap_asce = READ_ONCE(mm->context.gmap_asce); 57 - if (cpu_has_idte() && gmap_asce != -1UL) { 57 + if (gmap_asce != -1UL) { 58 58 if (gmap_asce) 59 59 __tlb_flush_idte(gmap_asce); 60 60 __tlb_flush_idte(mm->context.asce); ··· 68 68 69 69 static inline void __tlb_flush_kernel(void) 70 70 { 71 - if (cpu_has_idte()) 72 - __tlb_flush_idte(init_mm.context.asce); 73 - else 74 - __tlb_flush_global(); 71 + __tlb_flush_idte(init_mm.context.asce); 75 72 } 76 73 77 74 static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
+5 -23
arch/s390/mm/gmap.c
··· 138 138 139 139 static void gmap_flush_tlb(struct gmap *gmap) 140 140 { 141 - if (cpu_has_idte()) 142 - __tlb_flush_idte(gmap->asce); 143 - else 144 - __tlb_flush_global(); 141 + __tlb_flush_idte(gmap->asce); 145 142 } 146 143 147 144 static void gmap_radix_tree_free(struct radix_tree_root *root) ··· 1985 1988 if (machine_has_tlb_guest()) 1986 1989 __pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce, 1987 1990 IDTE_GLOBAL); 1988 - else if (cpu_has_idte()) 1989 - __pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL); 1990 1991 else 1991 - __pmdp_csp(pmdp); 1992 + __pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL); 1992 1993 set_pmd(pmdp, new); 1993 1994 } 1994 1995 ··· 2007 2012 _SEGMENT_ENTRY_GMAP_UC | 2008 2013 _SEGMENT_ENTRY)); 2009 2014 if (purge) 2010 - __pmdp_csp(pmdp); 2015 + __pmdp_cspg(pmdp); 2011 2016 set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY)); 2012 2017 } 2013 2018 spin_unlock(&gmap->guest_table_lock); ··· 2026 2031 gmap_pmdp_clear(mm, vmaddr, 0); 2027 2032 } 2028 2033 EXPORT_SYMBOL_GPL(gmap_pmdp_invalidate); 2029 - 2030 - /** 2031 - * gmap_pmdp_csp - csp all affected guest pmd entries 2032 - * @mm: pointer to the process mm_struct 2033 - * @vmaddr: virtual address in the process address space 2034 - */ 2035 - void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr) 2036 - { 2037 - gmap_pmdp_clear(mm, vmaddr, 1); 2038 - } 2039 - EXPORT_SYMBOL_GPL(gmap_pmdp_csp); 2040 2034 2041 2035 /** 2042 2036 * gmap_pmdp_idte_local - invalidate and clear a guest pmd entry ··· 2050 2066 if (machine_has_tlb_guest()) 2051 2067 __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE, 2052 2068 gmap->asce, IDTE_LOCAL); 2053 - else if (cpu_has_idte()) 2069 + else 2054 2070 __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_LOCAL); 2055 2071 *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY); 2056 2072 } ··· 2083 2099 if (machine_has_tlb_guest()) 2084 2100 __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE, 2085 2101 gmap->asce, IDTE_GLOBAL); 2086 - else if (cpu_has_idte()) 2087 - __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL); 2088 2102 else 2089 - __pmdp_csp(pmdp); 2103 + __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL); 2090 2104 *pmdp = __pmd(_SEGMENT_ENTRY_EMPTY); 2091 2105 } 2092 2106 spin_unlock(&gmap->guest_table_lock);
+1 -3
arch/s390/mm/pageattr.c
··· 78 78 } 79 79 table = (unsigned long *)((unsigned long)old & mask); 80 80 crdte(*old, new, table, dtt, addr, get_lowcore()->kernel_asce.val); 81 - } else if (cpu_has_idte()) { 82 - cspg(old, *old, new); 83 81 } else { 84 - csp((unsigned int *)old + 1, *old, new); 82 + cspg(old, *old, new); 85 83 } 86 84 } 87 85
+2 -12
arch/s390/mm/pgtable.c
··· 360 360 mm->context.asce, IDTE_GLOBAL); 361 361 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) 362 362 gmap_pmdp_idte_global(mm, addr); 363 - } else if (cpu_has_idte()) { 363 + } else { 364 364 __pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL); 365 365 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) 366 366 gmap_pmdp_idte_global(mm, addr); 367 - } else { 368 - __pmdp_csp(pmdp); 369 - if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m) 370 - gmap_pmdp_csp(mm, addr); 371 367 } 372 368 } 373 369 ··· 483 487 if (machine_has_tlb_guest()) 484 488 __pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE, 485 489 mm->context.asce, IDTE_GLOBAL); 486 - else if (cpu_has_idte()) 487 - __pudp_idte(addr, pudp, 0, 0, IDTE_GLOBAL); 488 490 else 489 - /* 490 - * Invalid bit position is the same for pmd and pud, so we can 491 - * reuse _pmd_csp() here 492 - */ 493 - __pmdp_csp((pmd_t *) pudp); 491 + __pudp_idte(addr, pudp, 0, 0, IDTE_GLOBAL); 494 492 } 495 493 496 494 static inline pud_t pudp_flush_direct(struct mm_struct *mm,
+1
arch/s390/tools/gen_facilities.c
··· 29 29 .bits = (int[]){ 30 30 0, /* N3 instructions */ 31 31 1, /* z/Arch mode installed */ 32 + 3, /* dat-enhancement 1 */ 32 33 18, /* long displacement facility */ 33 34 21, /* extended-immediate facility */ 34 35 25, /* store clock fast */