Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/mm: Add new "set" flag argument to pte/pmd update function

pte_update() is a powerpc-ism used to change the bits of a PTE
when the access permission is being restricted (a flush is
potentially needed).

It uses atomic operations on when needed and handles the hash
synchronization on hash based processors.

It is currently only used to clear PTE bits and so the current
implementation doesn't provide a way to also set PTE bits.

The new _PAGE_NUMA bit, when set, is actually restricting access
so it must use that function too, so this change adds the ability
for pte_update() to also set bits.

We will use this later to set the _PAGE_NUMA bit.

Acked-by: Mel Gorman <mgorman@suse.de>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>

authored by

Aneesh Kumar K.V and committed by
Benjamin Herrenschmidt
88247e8d 49d9684a

+24 -18
+1 -1
arch/powerpc/include/asm/hugetlb.h
··· 127 127 unsigned long addr, pte_t *ptep) 128 128 { 129 129 #ifdef CONFIG_PPC64 130 - return __pte(pte_update(mm, addr, ptep, ~0UL, 1)); 130 + return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1)); 131 131 #else 132 132 return __pte(pte_update(ptep, ~0UL, 0)); 133 133 #endif
+15 -11
arch/powerpc/include/asm/pgtable-ppc64.h
··· 195 195 static inline unsigned long pte_update(struct mm_struct *mm, 196 196 unsigned long addr, 197 197 pte_t *ptep, unsigned long clr, 198 + unsigned long set, 198 199 int huge) 199 200 { 200 201 #ifdef PTE_ATOMIC_UPDATES ··· 206 205 andi. %1,%0,%6\n\ 207 206 bne- 1b \n\ 208 207 andc %1,%0,%4 \n\ 208 + or %1,%1,%7\n\ 209 209 stdcx. %1,0,%3 \n\ 210 210 bne- 1b" 211 211 : "=&r" (old), "=&r" (tmp), "=m" (*ptep) 212 - : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY) 212 + : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set) 213 213 : "cc" ); 214 214 #else 215 215 unsigned long old = pte_val(*ptep); 216 - *ptep = __pte(old & ~clr); 216 + *ptep = __pte((old & ~clr) | set); 217 217 #endif 218 218 /* huge pages use the old page table lock */ 219 219 if (!huge) ··· 233 231 { 234 232 unsigned long old; 235 233 236 - if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) 234 + if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) 237 235 return 0; 238 - old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0); 236 + old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); 239 237 return (old & _PAGE_ACCESSED) != 0; 240 238 } 241 239 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG ··· 254 252 if ((pte_val(*ptep) & _PAGE_RW) == 0) 255 253 return; 256 254 257 - pte_update(mm, addr, ptep, _PAGE_RW, 0); 255 + pte_update(mm, addr, ptep, _PAGE_RW, 0, 0); 258 256 } 259 257 260 258 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, ··· 263 261 if ((pte_val(*ptep) & _PAGE_RW) == 0) 264 262 return; 265 263 266 - pte_update(mm, addr, ptep, _PAGE_RW, 1); 264 + pte_update(mm, addr, ptep, _PAGE_RW, 0, 1); 267 265 } 268 266 269 267 /* ··· 286 284 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, 287 285 unsigned long addr, pte_t *ptep) 288 286 { 289 - unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0); 287 + unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0); 290 288 return __pte(old); 291 289 } 292 290 293 291 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, 294 292 pte_t * ptep) 295 293 { 296 - pte_update(mm, addr, ptep, ~0UL, 0); 294 + pte_update(mm, addr, ptep, ~0UL, 0, 0); 297 295 } 298 296 299 297 ··· 508 506 509 507 extern unsigned long pmd_hugepage_update(struct mm_struct *mm, 510 508 unsigned long addr, 511 - pmd_t *pmdp, unsigned long clr); 509 + pmd_t *pmdp, 510 + unsigned long clr, 511 + unsigned long set); 512 512 513 513 static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, 514 514 unsigned long addr, pmd_t *pmdp) ··· 519 515 520 516 if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0) 521 517 return 0; 522 - old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED); 518 + old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0); 523 519 return ((old & _PAGE_ACCESSED) != 0); 524 520 } 525 521 ··· 546 542 if ((pmd_val(*pmdp) & _PAGE_RW) == 0) 547 543 return; 548 544 549 - pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW); 545 + pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0); 550 546 } 551 547 552 548 #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
+7 -5
arch/powerpc/mm/pgtable_64.c
··· 510 510 } 511 511 512 512 unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, 513 - pmd_t *pmdp, unsigned long clr) 513 + pmd_t *pmdp, unsigned long clr, 514 + unsigned long set) 514 515 { 515 516 516 517 unsigned long old, tmp; ··· 527 526 andi. %1,%0,%6\n\ 528 527 bne- 1b \n\ 529 528 andc %1,%0,%4 \n\ 529 + or %1,%1,%7\n\ 530 530 stdcx. %1,0,%3 \n\ 531 531 bne- 1b" 532 532 : "=&r" (old), "=&r" (tmp), "=m" (*pmdp) 533 - : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY) 533 + : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY), "r" (set) 534 534 : "cc" ); 535 535 #else 536 536 old = pmd_val(*pmdp); 537 - *pmdp = __pmd(old & ~clr); 537 + *pmdp = __pmd((old & ~clr) | set); 538 538 #endif 539 539 if (old & _PAGE_HASHPTE) 540 540 hpte_do_hugepage_flush(mm, addr, pmdp); ··· 710 708 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 711 709 pmd_t *pmdp) 712 710 { 713 - pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT); 711 + pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0); 714 712 } 715 713 716 714 /* ··· 837 835 unsigned long old; 838 836 pgtable_t *pgtable_slot; 839 837 840 - old = pmd_hugepage_update(mm, addr, pmdp, ~0UL); 838 + old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0); 841 839 old_pmd = __pmd(old); 842 840 /* 843 841 * We have pmd == none and we are holding page_table_lock.
+1 -1
arch/powerpc/mm/subpage-prot.c
··· 78 78 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 79 79 arch_enter_lazy_mmu_mode(); 80 80 for (; npages > 0; --npages) { 81 - pte_update(mm, addr, pte, 0, 0); 81 + pte_update(mm, addr, pte, 0, 0, 0); 82 82 addr += PAGE_SIZE; 83 83 ++pte; 84 84 }