Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc/mm: Don't use pmd_val, pud_val and pgd_val as lvalue

We convert them static inline function here as we did with pte_val in
the previous patch

Acked-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>

authored by

Aneesh Kumar K.V and committed by
Michael Ellerman
f281b5d5 10bd3808

+154 -57
+5 -1
arch/powerpc/include/asm/book3s/32/pgtable.h
··· 105 105 #define pmd_none(pmd) (!pmd_val(pmd)) 106 106 #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) 107 107 #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) 108 - #define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0) 108 + static inline void pmd_clear(pmd_t *pmdp) 109 + { 110 + *pmdp = __pmd(0); 111 + } 112 + 109 113 110 114 /* 111 115 * When flushing the tlb entry for a page, we also need to flush the hash
+5 -1
arch/powerpc/include/asm/book3s/64/hash-4k.h
··· 71 71 #define pgd_none(pgd) (!pgd_val(pgd)) 72 72 #define pgd_bad(pgd) (pgd_val(pgd) == 0) 73 73 #define pgd_present(pgd) (pgd_val(pgd) != 0) 74 - #define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0) 75 74 #define pgd_page_vaddr(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS) 75 + 76 + static inline void pgd_clear(pgd_t *pgdp) 77 + { 78 + *pgdp = __pgd(0); 79 + } 76 80 77 81 static inline pte_t pgd_pte(pgd_t pgd) 78 82 {
+27 -9
arch/powerpc/include/asm/book3s/64/pgtable.h
··· 236 236 #define PMD_BAD_BITS (PTE_TABLE_SIZE-1) 237 237 #define PUD_BAD_BITS (PMD_TABLE_SIZE-1) 238 238 239 - #define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval)) 239 + static inline void pmd_set(pmd_t *pmdp, unsigned long val) 240 + { 241 + *pmdp = __pmd(val); 242 + } 243 + 244 + static inline void pmd_clear(pmd_t *pmdp) 245 + { 246 + *pmdp = __pmd(0); 247 + } 248 + 249 + 240 250 #define pmd_none(pmd) (!pmd_val(pmd)) 241 251 #define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ 242 252 || (pmd_val(pmd) & PMD_BAD_BITS)) 243 253 #define pmd_present(pmd) (!pmd_none(pmd)) 244 - #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) 245 254 #define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) 246 255 extern struct page *pmd_page(pmd_t pmd); 247 256 248 - #define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval)) 257 + static inline void pud_set(pud_t *pudp, unsigned long val) 258 + { 259 + *pudp = __pud(val); 260 + } 261 + 262 + static inline void pud_clear(pud_t *pudp) 263 + { 264 + *pudp = __pud(0); 265 + } 266 + 249 267 #define pud_none(pud) (!pud_val(pud)) 250 268 #define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \ 251 269 || (pud_val(pud) & PUD_BAD_BITS)) 252 270 #define pud_present(pud) (pud_val(pud) != 0) 253 - #define pud_clear(pudp) (pud_val(*(pudp)) = 0) 254 271 #define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS) 255 272 256 273 extern struct page *pud_page(pud_t pud); ··· 282 265 return __pud(pte_val(pte)); 283 266 } 284 267 #define pud_write(pud) pte_write(pud_pte(pud)) 285 - #define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) 286 268 #define pgd_write(pgd) pte_write(pgd_pte(pgd)) 269 + static inline void pgd_set(pgd_t *pgdp, unsigned long val) 270 + { 271 + *pgdp = __pgd(val); 272 + } 287 273 288 274 /* 289 275 * Find an entry in a page-table-directory. We combine the address region ··· 608 588 609 589 static inline pmd_t pmd_mknotpresent(pmd_t pmd) 610 590 { 611 - pmd_val(pmd) &= ~_PAGE_PRESENT; 612 - return pmd; 591 + return __pmd(pmd_val(pmd) & ~_PAGE_PRESENT); 613 592 } 614 593 615 594 static inline pmd_t pmd_mksplitting(pmd_t pmd) 616 595 { 617 - pmd_val(pmd) |= _PAGE_SPLITTING; 618 - return pmd; 596 + return __pmd(pmd_val(pmd) | _PAGE_SPLITTING); 619 597 } 620 598 621 599 #define __HAVE_ARCH_PMD_SAME
+26 -8
arch/powerpc/include/asm/page.h
··· 304 304 /* PMD level */ 305 305 #ifdef CONFIG_PPC64 306 306 typedef struct { unsigned long pmd; } pmd_t; 307 - #define pmd_val(x) ((x).pmd) 308 307 #define __pmd(x) ((pmd_t) { (x) }) 308 + static inline unsigned long pmd_val(pmd_t x) 309 + { 310 + return x.pmd; 311 + } 309 312 310 313 /* PUD level exusts only on 4k pages */ 311 314 #ifndef CONFIG_PPC_64K_PAGES 312 315 typedef struct { unsigned long pud; } pud_t; 313 - #define pud_val(x) ((x).pud) 314 316 #define __pud(x) ((pud_t) { (x) }) 317 + static inline unsigned long pud_val(pud_t x) 318 + { 319 + return x.pud; 320 + } 315 321 #endif /* !CONFIG_PPC_64K_PAGES */ 316 322 #endif /* CONFIG_PPC64 */ 317 323 318 324 /* PGD level */ 319 325 typedef struct { unsigned long pgd; } pgd_t; 320 - #define pgd_val(x) ((x).pgd) 321 326 #define __pgd(x) ((pgd_t) { (x) }) 327 + static inline unsigned long pgd_val(pgd_t x) 328 + { 329 + return x.pgd; 330 + } 322 331 323 332 /* Page protection bits */ 324 333 typedef struct { unsigned long pgprot; } pgprot_t; ··· 356 347 357 348 #ifdef CONFIG_PPC64 358 349 typedef unsigned long pmd_t; 359 - #define pmd_val(x) (x) 360 350 #define __pmd(x) (x) 351 + static inline unsigned long pmd_val(pmd_t pmd) 352 + { 353 + return pmd; 354 + } 361 355 362 356 #ifndef CONFIG_PPC_64K_PAGES 363 357 typedef unsigned long pud_t; 364 - #define pud_val(x) (x) 365 358 #define __pud(x) (x) 359 + static inline unsigned long pud_val(pud_t pud) 360 + { 361 + return pud; 362 + } 366 363 #endif /* !CONFIG_PPC_64K_PAGES */ 367 364 #endif /* CONFIG_PPC64 */ 368 365 369 366 typedef unsigned long pgd_t; 370 - #define pgd_val(x) (x) 371 - #define pgprot_val(x) (x) 367 + #define __pgd(x) (x) 368 + static inline unsigned long pgd_val(pgd_t pgd) 369 + { 370 + return pgd; 371 + } 372 372 373 373 typedef unsigned long pgprot_t; 374 - #define __pgd(x) (x) 374 + #define pgprot_val(x) (x) 375 375 #define __pgprot(x) (x) 376 376 377 377 #endif
+26 -8
arch/powerpc/include/asm/pgalloc-32.h
··· 21 21 /* #define pgd_populate(mm, pmd, pte) BUG() */ 22 22 23 23 #ifndef CONFIG_BOOKE 24 - #define pmd_populate_kernel(mm, pmd, pte) \ 25 - (pmd_val(*(pmd)) = __pa(pte) | _PMD_PRESENT) 26 - #define pmd_populate(mm, pmd, pte) \ 27 - (pmd_val(*(pmd)) = (page_to_pfn(pte) << PAGE_SHIFT) | _PMD_PRESENT) 24 + 25 + static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, 26 + pte_t *pte) 27 + { 28 + *pmdp = __pmd(__pa(pte) | _PMD_PRESENT); 29 + } 30 + 31 + static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, 32 + pgtable_t pte_page) 33 + { 34 + *pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_PRESENT); 35 + } 36 + 28 37 #define pmd_pgtable(pmd) pmd_page(pmd) 29 38 #else 30 - #define pmd_populate_kernel(mm, pmd, pte) \ 31 - (pmd_val(*(pmd)) = (unsigned long)pte | _PMD_PRESENT) 32 - #define pmd_populate(mm, pmd, pte) \ 33 - (pmd_val(*(pmd)) = (unsigned long)lowmem_page_address(pte) | _PMD_PRESENT) 39 + 40 + static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, 41 + pte_t *pte) 42 + { 43 + *pmdp = __pmd((unsigned long)pte | _PMD_PRESENT); 44 + } 45 + 46 + static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, 47 + pgtable_t pte_page) 48 + { 49 + *pmdp = __pmd((unsigned long)lowmem_page_address(pte_page) | _PMD_PRESENT); 50 + } 51 + 34 52 #define pmd_pgtable(pmd) pmd_page(pmd) 35 53 #endif 36 54
+13 -4
arch/powerpc/include/asm/pgalloc-64.h
··· 53 53 54 54 #ifndef CONFIG_PPC_64K_PAGES 55 55 56 - #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD) 56 + #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, (unsigned long)PUD) 57 57 58 58 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 59 59 { ··· 71 71 pud_set(pud, (unsigned long)pmd); 72 72 } 73 73 74 - #define pmd_populate(mm, pmd, pte_page) \ 75 - pmd_populate_kernel(mm, pmd, page_address(pte_page)) 76 - #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte)) 74 + static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, 75 + pte_t *pte) 76 + { 77 + pmd_set(pmd, (unsigned long)pte); 78 + } 79 + 80 + static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, 81 + pgtable_t pte_page) 82 + { 83 + pmd_set(pmd, (unsigned long)page_address(pte_page)); 84 + } 85 + 77 86 #define pmd_pgtable(pmd) pmd_page(pmd) 78 87 79 88 static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+6 -1
arch/powerpc/include/asm/pgtable-ppc32.h
··· 128 128 #define pmd_none(pmd) (!pmd_val(pmd)) 129 129 #define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) 130 130 #define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) 131 - #define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0) 131 + static inline void pmd_clear(pmd_t *pmdp) 132 + { 133 + *pmdp = __pmd(0); 134 + } 135 + 136 + 132 137 133 138 /* 134 139 * When flushing the tlb entry for a page, we also need to flush the hash
+5 -1
arch/powerpc/include/asm/pgtable-ppc64-4k.h
··· 55 55 #define pgd_none(pgd) (!pgd_val(pgd)) 56 56 #define pgd_bad(pgd) (pgd_val(pgd) == 0) 57 57 #define pgd_present(pgd) (pgd_val(pgd) != 0) 58 - #define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0) 59 58 #define pgd_page_vaddr(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS) 60 59 61 60 #ifndef __ASSEMBLY__ 61 + 62 + static inline void pgd_clear(pgd_t *pgdp) 63 + { 64 + *pgdp = __pgd(0); 65 + } 62 66 63 67 static inline pte_t pgd_pte(pgd_t pgd) 64 68 {
+27 -9
arch/powerpc/include/asm/pgtable-ppc64.h
··· 144 144 #define PMD_BAD_BITS (PTE_TABLE_SIZE-1) 145 145 #define PUD_BAD_BITS (PMD_TABLE_SIZE-1) 146 146 147 - #define pmd_set(pmdp, pmdval) (pmd_val(*(pmdp)) = (pmdval)) 147 + static inline void pmd_set(pmd_t *pmdp, unsigned long val) 148 + { 149 + *pmdp = __pmd(val); 150 + } 151 + 152 + static inline void pmd_clear(pmd_t *pmdp) 153 + { 154 + *pmdp = __pmd(0); 155 + } 156 + 148 157 #define pmd_none(pmd) (!pmd_val(pmd)) 149 158 #define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ 150 159 || (pmd_val(pmd) & PMD_BAD_BITS)) 151 160 #define pmd_present(pmd) (!pmd_none(pmd)) 152 - #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) 153 161 #define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) 154 162 extern struct page *pmd_page(pmd_t pmd); 155 163 156 - #define pud_set(pudp, pudval) (pud_val(*(pudp)) = (pudval)) 164 + static inline void pud_set(pud_t *pudp, unsigned long val) 165 + { 166 + *pudp = __pud(val); 167 + } 168 + 169 + static inline void pud_clear(pud_t *pudp) 170 + { 171 + *pudp = __pud(0); 172 + } 173 + 157 174 #define pud_none(pud) (!pud_val(pud)) 158 175 #define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \ 159 176 || (pud_val(pud) & PUD_BAD_BITS)) 160 177 #define pud_present(pud) (pud_val(pud) != 0) 161 - #define pud_clear(pudp) (pud_val(*(pudp)) = 0) 162 178 #define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS) 163 179 164 180 extern struct page *pud_page(pud_t pud); ··· 189 173 return __pud(pte_val(pte)); 190 174 } 191 175 #define pud_write(pud) pte_write(pud_pte(pud)) 192 - #define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) 193 176 #define pgd_write(pgd) pte_write(pgd_pte(pgd)) 177 + 178 + static inline void pgd_set(pgd_t *pgdp, unsigned long val) 179 + { 180 + *pgdp = __pgd(val); 181 + } 194 182 195 183 /* 196 184 * Find an entry in a page-table-directory. We combine the address region ··· 548 528 549 529 static inline pmd_t pmd_mknotpresent(pmd_t pmd) 550 530 { 551 - pmd_val(pmd) &= ~_PAGE_PRESENT; 552 - return pmd; 531 + return __pmd(pmd_val(pmd) & ~_PAGE_PRESENT); 553 532 } 554 533 555 534 static inline pmd_t pmd_mksplitting(pmd_t pmd) 556 535 { 557 - pmd_val(pmd) |= _PAGE_SPLITTING; 558 - return pmd; 536 + return __pmd(pmd_val(pmd) | _PAGE_SPLITTING); 559 537 } 560 538 561 539 #define __HAVE_ARCH_PMD_SAME
+5 -5
arch/powerpc/mm/40x_mmu.c
··· 110 110 unsigned long val = p | _PMD_SIZE_16M | _PAGE_EXEC | _PAGE_HWWRITE; 111 111 112 112 pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v); 113 - pmd_val(*pmdp++) = val; 114 - pmd_val(*pmdp++) = val; 115 - pmd_val(*pmdp++) = val; 116 - pmd_val(*pmdp++) = val; 113 + *pmdp++ = __pmd(val); 114 + *pmdp++ = __pmd(val); 115 + *pmdp++ = __pmd(val); 116 + *pmdp++ = __pmd(val); 117 117 118 118 v += LARGE_PAGE_SIZE_16M; 119 119 p += LARGE_PAGE_SIZE_16M; ··· 125 125 unsigned long val = p | _PMD_SIZE_4M | _PAGE_EXEC | _PAGE_HWWRITE; 126 126 127 127 pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v); 128 - pmd_val(*pmdp) = val; 128 + *pmdp = __pmd(val); 129 129 130 130 v += LARGE_PAGE_SIZE_4M; 131 131 p += LARGE_PAGE_SIZE_4M;
+9 -10
arch/powerpc/mm/pgtable_64.c
··· 759 759 760 760 static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) 761 761 { 762 - pmd_val(pmd) |= pgprot_val(pgprot); 763 - return pmd; 762 + return __pmd(pmd_val(pmd) | pgprot_val(pgprot)); 764 763 } 765 764 766 765 pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) 767 766 { 768 - pmd_t pmd; 767 + unsigned long pmdv; 769 768 /* 770 769 * For a valid pte, we would have _PAGE_PRESENT always 771 770 * set. We use this to check THP page at pmd level. 772 771 * leaf pte for huge page, bottom two bits != 00 773 772 */ 774 - pmd_val(pmd) = pfn << PTE_RPN_SHIFT; 775 - pmd_val(pmd) |= _PAGE_THP_HUGE; 776 - pmd = pmd_set_protbits(pmd, pgprot); 777 - return pmd; 773 + pmdv = pfn << PTE_RPN_SHIFT; 774 + pmdv |= _PAGE_THP_HUGE; 775 + return pmd_set_protbits(__pmd(pmdv), pgprot); 778 776 } 779 777 780 778 pmd_t mk_pmd(struct page *page, pgprot_t pgprot) ··· 782 784 783 785 pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 784 786 { 787 + unsigned long pmdv; 785 788 786 - pmd_val(pmd) &= _HPAGE_CHG_MASK; 787 - pmd = pmd_set_protbits(pmd, newprot); 788 - return pmd; 789 + pmdv = pmd_val(pmd); 790 + pmdv &= _HPAGE_CHG_MASK; 791 + return pmd_set_protbits(__pmd(pmdv), newprot); 789 792 } 790 793 791 794 /*