Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tile: add virt_to_kpte() API and clean up and document behavior

We use virt_to_pte(NULL, va) a lot, which isn't very obvious.
I added virt_to_kpte(va) as a more obvious wrapper function,
that also validates the va as being a kernel adddress.

And, I fixed the semantics of virt_to_pte() so that we handle
the pud and pmd the same way, and we now document the fact that
we handle the final pte level differently.

Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>

+30 -11
+1 -1
arch/tile/include/asm/mmu_context.h
··· 45 45 46 46 static inline void install_page_table(pgd_t *pgdir, int asid) 47 47 { 48 - pte_t *ptep = virt_to_pte(NULL, (unsigned long)pgdir); 48 + pte_t *ptep = virt_to_kpte((unsigned long)pgdir); 49 49 __install_page_table(pgdir, asid, *ptep); 50 50 } 51 51
+1
arch/tile/include/asm/page.h
··· 328 328 329 329 struct mm_struct; 330 330 extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr); 331 + extern pte_t *virt_to_kpte(unsigned long kaddr); 331 332 332 333 #endif /* !__ASSEMBLY__ */ 333 334
+3 -3
arch/tile/kernel/setup.c
··· 1600 1600 1601 1601 /* Update the vmalloc mapping and page home. */ 1602 1602 unsigned long addr = (unsigned long)ptr + i; 1603 - pte_t *ptep = virt_to_pte(NULL, addr); 1603 + pte_t *ptep = virt_to_kpte(addr); 1604 1604 pte_t pte = *ptep; 1605 1605 BUG_ON(pfn != pte_pfn(pte)); 1606 1606 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); ··· 1609 1609 1610 1610 /* Update the lowmem mapping for consistency. */ 1611 1611 lowmem_va = (unsigned long)pfn_to_kaddr(pfn); 1612 - ptep = virt_to_pte(NULL, lowmem_va); 1612 + ptep = virt_to_kpte(lowmem_va); 1613 1613 if (pte_huge(*ptep)) { 1614 1614 printk(KERN_DEBUG "early shatter of huge page" 1615 1615 " at %#lx\n", lowmem_va); 1616 1616 shatter_pmd((pmd_t *)ptep); 1617 - ptep = virt_to_pte(NULL, lowmem_va); 1617 + ptep = virt_to_kpte(lowmem_va); 1618 1618 BUG_ON(pte_huge(*ptep)); 1619 1619 } 1620 1620 BUG_ON(pfn != pte_pfn(*ptep));
+3 -3
arch/tile/mm/homecache.c
··· 200 200 #else 201 201 va = __fix_to_virt(FIX_HOMECACHE_BEGIN + smp_processor_id()); 202 202 #endif 203 - ptep = virt_to_pte(NULL, (unsigned long)va); 203 + ptep = virt_to_kpte(va); 204 204 pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL); 205 205 __set_pte(ptep, pte_set_home(pte, home)); 206 206 homecache_finv_page_va((void *)va, home); ··· 385 385 return initial_page_home(); 386 386 } else { 387 387 unsigned long kva = (unsigned long)page_address(page); 388 - return pte_to_home(*virt_to_pte(NULL, kva)); 388 + return pte_to_home(*virt_to_kpte(kva)); 389 389 } 390 390 } 391 391 EXPORT_SYMBOL(page_home); ··· 404 404 NULL, 0); 405 405 406 406 for (i = 0; i < pages; ++i, kva += PAGE_SIZE) { 407 - pte_t *ptep = virt_to_pte(NULL, kva); 407 + pte_t *ptep = virt_to_kpte(kva); 408 408 pte_t pteval = *ptep; 409 409 BUG_ON(!pte_present(pteval) || pte_huge(pteval)); 410 410 __set_pte(ptep, pte_set_home(pteval, home));
+2 -2
arch/tile/mm/init.c
··· 951 951 BUG_ON((addr & (PAGE_SIZE-1)) != 0); 952 952 for (; addr <= (unsigned long)__w1data_end - 1; addr += PAGE_SIZE) { 953 953 unsigned long pfn = kaddr_to_pfn((void *)addr); 954 - pte_t *ptep = virt_to_pte(NULL, addr); 954 + pte_t *ptep = virt_to_kpte(addr); 955 955 BUG_ON(pte_huge(*ptep)); /* not relevant for kdata_huge */ 956 956 set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL_RO)); 957 957 } ··· 997 997 */ 998 998 int pfn = kaddr_to_pfn((void *)addr); 999 999 struct page *page = pfn_to_page(pfn); 1000 - pte_t *ptep = virt_to_pte(NULL, addr); 1000 + pte_t *ptep = virt_to_kpte(addr); 1001 1001 if (!initfree) { 1002 1002 /* 1003 1003 * If debugging page accesses then do not free
+20 -2
arch/tile/mm/pgtable.c
··· 325 325 326 326 #endif 327 327 328 + /* 329 + * Return a pointer to the PTE that corresponds to the given 330 + * address in the given page table. A NULL page table just uses 331 + * the standard kernel page table; the preferred API in this case 332 + * is virt_to_kpte(). 333 + * 334 + * The returned pointer can point to a huge page in other levels 335 + * of the page table than the bottom, if the huge page is present 336 + * in the page table. For bottom-level PTEs, the returned pointer 337 + * can point to a PTE that is either present or not. 338 + */ 328 339 pte_t *virt_to_pte(struct mm_struct* mm, unsigned long addr) 329 340 { 330 341 pgd_t *pgd; ··· 352 341 if (pud_huge_page(*pud)) 353 342 return (pte_t *)pud; 354 343 pmd = pmd_offset(pud, addr); 355 - if (pmd_huge_page(*pmd)) 356 - return (pte_t *)pmd; 357 344 if (!pmd_present(*pmd)) 358 345 return NULL; 346 + if (pmd_huge_page(*pmd)) 347 + return (pte_t *)pmd; 359 348 return pte_offset_kernel(pmd, addr); 360 349 } 361 350 EXPORT_SYMBOL(virt_to_pte); 351 + 352 + pte_t *virt_to_kpte(unsigned long kaddr) 353 + { 354 + BUG_ON(kaddr < PAGE_OFFSET); 355 + return virt_to_pte(NULL, kaddr); 356 + } 357 + EXPORT_SYMBOL(virt_to_kpte); 362 358 363 359 pgprot_t set_remote_cache_cpu(pgprot_t prot, int cpu) 364 360 {