Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc: drop _PAGE_FILE and pte_file()-related helpers

We've replaced remap_file_pages(2) implementation with emulation. Nobody
creates non-linear mapping anymore.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Kirill A. Shutemov and committed by
Linus Torvalds
780fc564 160cc266

+4 -26
+2 -7
arch/powerpc/include/asm/pgtable-ppc32.h
··· 333 333 /* 334 334 * Encode and decode a swap entry. 335 335 * Note that the bits we use in a PTE for representing a swap entry 336 - * must not include the _PAGE_PRESENT bit, the _PAGE_FILE bit, or the 337 - *_PAGE_HASHPTE bit (if used). -- paulus 336 + * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used). 337 + * -- paulus 338 338 */ 339 339 #define __swp_type(entry) ((entry).val & 0x1f) 340 340 #define __swp_offset(entry) ((entry).val >> 5) 341 341 #define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) }) 342 342 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) 343 343 #define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) 344 - 345 - /* Encode and decode a nonlinear file mapping entry */ 346 - #define PTE_FILE_MAX_BITS 29 347 - #define pte_to_pgoff(pte) (pte_val(pte) >> 3) 348 - #define pgoff_to_pte(off) ((pte_t) { ((off) << 3) | _PAGE_FILE }) 349 344 350 345 #ifndef CONFIG_PPC_4K_PAGES 351 346 void pgtable_cache_init(void);
+1 -4
arch/powerpc/include/asm/pgtable-ppc64.h
··· 352 352 #define __swp_entry(type, offset) ((swp_entry_t){((type)<< 1)|((offset)<<8)}) 353 353 #define __pte_to_swp_entry(pte) ((swp_entry_t){pte_val(pte) >> PTE_RPN_SHIFT}) 354 354 #define __swp_entry_to_pte(x) ((pte_t) { (x).val << PTE_RPN_SHIFT }) 355 - #define pte_to_pgoff(pte) (pte_val(pte) >> PTE_RPN_SHIFT) 356 - #define pgoff_to_pte(off) ((pte_t) {((off) << PTE_RPN_SHIFT)|_PAGE_FILE}) 357 - #define PTE_FILE_MAX_BITS (BITS_PER_LONG - PTE_RPN_SHIFT) 358 355 359 356 void pgtable_cache_add(unsigned shift, void (*ctor)(void *)); 360 357 void pgtable_cache_init(void); ··· 386 389 * The last three bits are intentionally left to zero. This memory location 387 390 * are also used as normal page PTE pointers. So if we have any pointers 388 391 * left around while we collapse a hugepage, we need to make sure 389 - * _PAGE_PRESENT and _PAGE_FILE bits of that are zero when we look at them 392 + * _PAGE_PRESENT bit of that is zero when we look at them 390 393 */ 391 394 static inline unsigned int hpte_valid(unsigned char *hpte_slot_array, int index) 392 395 {
-1
arch/powerpc/include/asm/pgtable.h
··· 34 34 { return (pte_val(pte) & (_PAGE_RW | _PAGE_RO)) != _PAGE_RO; } 35 35 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } 36 36 static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; } 37 - static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; } 38 37 static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } 39 38 static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } 40 39 static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); }
-1
arch/powerpc/include/asm/pte-40x.h
··· 38 38 */ 39 39 40 40 #define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */ 41 - #define _PAGE_FILE 0x001 /* when !present: nonlinear file mapping */ 42 41 #define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */ 43 42 #define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */ 44 43 #define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */
-5
arch/powerpc/include/asm/pte-44x.h
··· 44 44 * - PRESENT *must* be in the bottom three bits because swap cache 45 45 * entries use the top 29 bits for TLB2. 46 46 * 47 - * - FILE *must* be in the bottom three bits because swap cache 48 - * entries use the top 29 bits for TLB2. 49 - * 50 47 * - CACHE COHERENT bit (M) has no effect on original PPC440 cores, 51 48 * because it doesn't support SMP. However, some later 460 variants 52 49 * have -some- form of SMP support and so I keep the bit there for ··· 65 68 * 66 69 * There are three protection bits available for SWAP entry: 67 70 * _PAGE_PRESENT 68 - * _PAGE_FILE 69 71 * _PAGE_HASHPTE (if HW has) 70 72 * 71 73 * So those three bits have to be inside of 0-2nd LSB of PTE. ··· 73 77 74 78 #define _PAGE_PRESENT 0x00000001 /* S: PTE valid */ 75 79 #define _PAGE_RW 0x00000002 /* S: Write permission */ 76 - #define _PAGE_FILE 0x00000004 /* S: nonlinear file mapping */ 77 80 #define _PAGE_EXEC 0x00000004 /* H: Execute permission */ 78 81 #define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */ 79 82 #define _PAGE_DIRTY 0x00000010 /* S: Page dirty */
-1
arch/powerpc/include/asm/pte-8xx.h
··· 29 29 30 30 /* Definitions for 8xx embedded chips. */ 31 31 #define _PAGE_PRESENT 0x0001 /* Page is valid */ 32 - #define _PAGE_FILE 0x0002 /* when !present: nonlinear file mapping */ 33 32 #define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */ 34 33 #define _PAGE_SHARED 0x0004 /* No ASID (context) compare */ 35 34 #define _PAGE_SPECIAL 0x0008 /* SW entry, forced to 0 by the TLB miss */
-1
arch/powerpc/include/asm/pte-book3e.h
··· 10 10 11 11 /* Architected bits */ 12 12 #define _PAGE_PRESENT 0x000001 /* software: pte contains a translation */ 13 - #define _PAGE_FILE 0x000002 /* (!present only) software: pte holds file offset */ 14 13 #define _PAGE_SW1 0x000002 15 14 #define _PAGE_BAP_SR 0x000004 16 15 #define _PAGE_BAP_UR 0x000008
-3
arch/powerpc/include/asm/pte-fsl-booke.h
··· 13 13 - PRESENT *must* be in the bottom three bits because swap cache 14 14 entries use the top 29 bits. 15 15 16 - - FILE *must* be in the bottom three bits because swap cache 17 - entries use the top 29 bits. 18 16 */ 19 17 20 18 /* Definitions for FSL Book-E Cores */ 21 19 #define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */ 22 20 #define _PAGE_USER 0x00002 /* S: User page (maps to UR) */ 23 - #define _PAGE_FILE 0x00002 /* S: when !present: nonlinear file mapping */ 24 21 #define _PAGE_RW 0x00004 /* S: Write permission (SW) */ 25 22 #define _PAGE_DIRTY 0x00008 /* S: Page dirty */ 26 23 #define _PAGE_EXEC 0x00010 /* H: SX permission */
-1
arch/powerpc/include/asm/pte-hash32.h
··· 18 18 19 19 #define _PAGE_PRESENT 0x001 /* software: pte contains a translation */ 20 20 #define _PAGE_HASHPTE 0x002 /* hash_page has made an HPTE for this pte */ 21 - #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */ 22 21 #define _PAGE_USER 0x004 /* usermode access allowed */ 23 22 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */ 24 23 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
-1
arch/powerpc/include/asm/pte-hash64.h
··· 16 16 */ 17 17 #define _PAGE_PRESENT 0x0001 /* software: pte contains a translation */ 18 18 #define _PAGE_USER 0x0002 /* matches one of the PP bits */ 19 - #define _PAGE_FILE 0x0002 /* (!present only) software: pte holds file offset */ 20 19 #define _PAGE_EXEC 0x0004 /* No execute on POWER4 and newer (we invert) */ 21 20 #define _PAGE_GUARDED 0x0008 22 21 /* We can derive Memory coherence from _PAGE_NO_CACHE */
+1 -1
arch/powerpc/mm/pgtable_64.c
··· 782 782 { 783 783 pmd_t pmd; 784 784 /* 785 - * For a valid pte, we would have _PAGE_PRESENT or _PAGE_FILE always 785 + * For a valid pte, we would have _PAGE_PRESENT always 786 786 * set. We use this to check THP page at pmd level. 787 787 * leaf pte for huge page, bottom two bits != 00 788 788 */