Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sparc: drop pte_file()-related helpers

We've replaced remap_file_pages(2) implementation with emulation. Nobody
creates non-linear mapping anymore.

This patch also increase number of bits availble for swap offset.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Kirill A. Shutemov and committed by
Linus Torvalds
6a8c4820 8b70beac

+5 -73
-24
arch/sparc/include/asm/pgtable_32.h
··· 221 221 return pte_val(pte) & SRMMU_REF; 222 222 } 223 223 224 - /* 225 - * The following only work if pte_present() is not true. 226 - */ 227 - static inline int pte_file(pte_t pte) 228 - { 229 - return pte_val(pte) & SRMMU_FILE; 230 - } 231 - 232 224 static inline int pte_special(pte_t pte) 233 225 { 234 226 return 0; ··· 366 374 367 375 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 368 376 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 369 - 370 - /* file-offset-in-pte helpers */ 371 - static inline unsigned long pte_to_pgoff(pte_t pte) 372 - { 373 - return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT; 374 - } 375 - 376 - static inline pte_t pgoff_to_pte(unsigned long pgoff) 377 - { 378 - return __pte((pgoff << SRMMU_PTE_FILE_SHIFT) | SRMMU_FILE); 379 - } 380 - 381 - /* 382 - * This is made a constant because mm/fremap.c required a constant. 383 - */ 384 - #define PTE_FILE_MAX_BITS 24 385 377 386 378 static inline unsigned long 387 379 __get_phys (unsigned long addr)
-40
arch/sparc/include/asm/pgtable_64.h
··· 137 137 #define _PAGE_SOFT_4U _AC(0x0000000000001F80,UL) /* Software bits: */ 138 138 #define _PAGE_EXEC_4U _AC(0x0000000000001000,UL) /* Executable SW bit */ 139 139 #define _PAGE_MODIFIED_4U _AC(0x0000000000000800,UL) /* Modified (dirty) */ 140 - #define _PAGE_FILE_4U _AC(0x0000000000000800,UL) /* Pagecache page */ 141 140 #define _PAGE_ACCESSED_4U _AC(0x0000000000000400,UL) /* Accessed (ref'd) */ 142 141 #define _PAGE_READ_4U _AC(0x0000000000000200,UL) /* Readable SW Bit */ 143 142 #define _PAGE_WRITE_4U _AC(0x0000000000000100,UL) /* Writable SW Bit */ ··· 166 167 #define _PAGE_EXEC_4V _AC(0x0000000000000080,UL) /* Executable Page */ 167 168 #define _PAGE_W_4V _AC(0x0000000000000040,UL) /* Writable */ 168 169 #define _PAGE_SOFT_4V _AC(0x0000000000000030,UL) /* Software bits */ 169 - #define _PAGE_FILE_4V _AC(0x0000000000000020,UL) /* Pagecache page */ 170 170 #define _PAGE_PRESENT_4V _AC(0x0000000000000010,UL) /* Present */ 171 171 #define _PAGE_RESV_4V _AC(0x0000000000000008,UL) /* Reserved */ 172 172 #define _PAGE_SZ16GB_4V _AC(0x0000000000000007,UL) /* 16GB Page */ ··· 329 331 return __pmd(pte_val(pte)); 330 332 } 331 333 #endif 332 - 333 - static inline pte_t pgoff_to_pte(unsigned long off) 334 - { 335 - off <<= PAGE_SHIFT; 336 - 337 - __asm__ __volatile__( 338 - "\n661: or %0, %2, %0\n" 339 - " .section .sun4v_1insn_patch, \"ax\"\n" 340 - " .word 661b\n" 341 - " or %0, %3, %0\n" 342 - " .previous\n" 343 - : "=r" (off) 344 - : "0" (off), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V)); 345 - 346 - return __pte(off); 347 - } 348 334 349 335 static inline pgprot_t pgprot_noncached(pgprot_t prot) 350 336 { ··· 589 607 : "i" (_PAGE_EXEC_4U), "i" (_PAGE_EXEC_4V)); 590 608 591 609 return (pte_val(pte) & mask); 592 - } 593 - 594 - static inline unsigned long pte_file(pte_t pte) 595 - { 596 - unsigned long val = pte_val(pte); 597 - 598 - __asm__ __volatile__( 599 - "\n661: and %0, %2, %0\n" 600 - " .section .sun4v_1insn_patch, \"ax\"\n" 601 - " .word 661b\n" 602 - " and %0, %3, %0\n" 603 - " .previous\n" 604 - : "=r" (val) 605 - : "0" (val), "i" (_PAGE_FILE_4U), "i" (_PAGE_FILE_4V)); 606 - 607 - return val; 608 610 } 609 611 610 612 static inline unsigned long pte_present(pte_t pte) ··· 936 970 } ) 937 971 #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 938 972 #define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 939 - 940 - /* File offset in PTE support. */ 941 - unsigned long pte_file(pte_t); 942 - #define pte_to_pgoff(pte) (pte_val(pte) >> PAGE_SHIFT) 943 - pte_t pgoff_to_pte(unsigned long); 944 - #define PTE_FILE_MAX_BITS (64UL - PAGE_SHIFT - 1UL) 945 973 946 974 int page_in_phys_avail(unsigned long paddr); 947 975
+5 -9
arch/sparc/include/asm/pgtsrmmu.h
··· 80 80 #define SRMMU_PRIV 0x1c 81 81 #define SRMMU_PRIV_RDONLY 0x18 82 82 83 - #define SRMMU_FILE 0x40 /* Implemented in software */ 84 - 85 - #define SRMMU_PTE_FILE_SHIFT 8 /* == 32-PTE_FILE_MAX_BITS */ 86 - 87 83 #define SRMMU_CHG_MASK (0xffffff00 | SRMMU_REF | SRMMU_DIRTY) 88 84 89 85 /* SRMMU swap entry encoding ··· 90 94 * oooooooooooooooooootttttRRRRRRRR 91 95 * fedcba9876543210fedcba9876543210 92 96 * 93 - * The bottom 8 bits are reserved for protection and status bits, especially 94 - * FILE and PRESENT. 97 + * The bottom 7 bits are reserved for protection and status bits, especially 98 + * PRESENT. 95 99 */ 96 100 #define SRMMU_SWP_TYPE_MASK 0x1f 97 - #define SRMMU_SWP_TYPE_SHIFT SRMMU_PTE_FILE_SHIFT 98 - #define SRMMU_SWP_OFF_MASK 0x7ffff 99 - #define SRMMU_SWP_OFF_SHIFT (SRMMU_PTE_FILE_SHIFT + 5) 101 + #define SRMMU_SWP_TYPE_SHIFT 7 102 + #define SRMMU_SWP_OFF_MASK 0xfffff 103 + #define SRMMU_SWP_OFF_SHIFT (SRMMU_SWP_TYPE_SHIFT + 5) 100 104 101 105 /* Some day I will implement true fine grained access bits for 102 106 * user pages because the SRMMU gives us the capabilities to