Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

m68k: Pass a pointer to virt_to_pfn() virt_to_page()

Functions that work on a pointer to virtual memory such as
virt_to_pfn() and users of that function such as
virt_to_page() are supposed to pass a pointer to virtual
memory, ideally a (void *) or other pointer. However since
many architectures implement virt_to_pfn() as a macro,
this function becomes polymorphic and accepts both a
(unsigned long) and a (void *).

Fix up the offending calls in arch/m68k with explicit casts.

The page table include <asm/pgtable.h> will include different
variants of the defines depending on whether you build for
classic m68k, ColdFire or Sun3, so fix all variants.

Delete Coldfire pte_pagenr() which was using unsigned long
semantics from __pte_page().

Tested-by: Geert Uytterhoeven <geert@linux-m68k.org>
Reviewed-by: Geert Uytterhoeven <geert@linux-m68k.org>
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>

+10 -10
+1 -2
arch/m68k/include/asm/mcf_pgtable.h
··· 115 115 pgd_val(*pgdp) = virt_to_phys(pmdp); 116 116 } 117 117 118 - #define __pte_page(pte) ((unsigned long) (pte_val(pte) & PAGE_MASK)) 118 + #define __pte_page(pte) ((void *) (pte_val(pte) & PAGE_MASK)) 119 119 #define pmd_page_vaddr(pmd) ((unsigned long) (pmd_val(pmd))) 120 120 121 121 static inline int pte_none(pte_t pte) ··· 134 134 pte_val(*ptep) = 0; 135 135 } 136 136 137 - #define pte_pagenr(pte) ((__pte_page(pte) - PAGE_OFFSET) >> PAGE_SHIFT) 138 137 #define pte_page(pte) virt_to_page(__pte_page(pte)) 139 138 140 139 static inline int pmd_none2(pmd_t *pmd) { return !pmd_val(*pmd); }
+2 -2
arch/m68k/include/asm/sun3_pgtable.h
··· 91 91 #define pmd_set(pmdp,ptep) do {} while (0) 92 92 93 93 #define __pte_page(pte) \ 94 - ((unsigned long) __va ((pte_val (pte) & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT)) 94 + (__va ((pte_val (pte) & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT)) 95 95 96 96 static inline unsigned long pmd_page_vaddr(pmd_t pmd) 97 97 { ··· 111 111 112 112 #define pte_page(pte) virt_to_page(__pte_page(pte)) 113 113 #define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT) 114 - #define pmd_page(pmd) virt_to_page(pmd_page_vaddr(pmd)) 114 + #define pmd_page(pmd) virt_to_page((void *)pmd_page_vaddr(pmd)) 115 115 116 116 117 117 static inline int pmd_none2 (pmd_t *pmd) { return !pmd_val (*pmd); }
+2 -1
arch/m68k/mm/mcfmmu.c
··· 69 69 70 70 /* now change pg_table to kernel virtual addresses */ 71 71 for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) { 72 - pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT); 72 + pte_t pte = pfn_pte(virt_to_pfn((void *)address), 73 + PAGE_INIT); 73 74 if (address >= (unsigned long) high_memory) 74 75 pte_val(pte) = 0; 75 76
+2 -2
arch/m68k/mm/motorola.c
··· 102 102 LIST_HEAD_INIT(ptable_list[1]), 103 103 }; 104 104 105 - #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru)) 105 + #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page((void *)(page))->lru)) 106 106 #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru)) 107 107 #define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index) 108 108 ··· 201 201 list_del(dp); 202 202 mmu_page_dtor((void *)page); 203 203 if (type == TABLE_PTE) 204 - pgtable_pte_page_dtor(virt_to_page(page)); 204 + pgtable_pte_page_dtor(virt_to_page((void *)page)); 205 205 free_page (page); 206 206 return 1; 207 207 } else if (ptable_list[type].next != dp) {
+1 -1
arch/m68k/mm/sun3mmu.c
··· 75 75 /* now change pg_table to kernel virtual addresses */ 76 76 pg_table = (pte_t *) __va ((unsigned long) pg_table); 77 77 for (i=0; i<PTRS_PER_PTE; ++i, ++pg_table) { 78 - pte_t pte = pfn_pte(virt_to_pfn(address), PAGE_INIT); 78 + pte_t pte = pfn_pte(virt_to_pfn((void *)address), PAGE_INIT); 79 79 if (address >= (unsigned long)high_memory) 80 80 pte_val (pte) = 0; 81 81 set_pte (pg_table, pte);
+1 -1
arch/m68k/sun3/dvma.c
··· 29 29 j = *(volatile unsigned long *)kaddr; 30 30 *(volatile unsigned long *)kaddr = j; 31 31 32 - ptep = pfn_pte(virt_to_pfn(kaddr), PAGE_KERNEL); 32 + ptep = pfn_pte(virt_to_pfn((void *)kaddr), PAGE_KERNEL); 33 33 pte = pte_val(ptep); 34 34 // pr_info("dvma_remap: addr %lx -> %lx pte %08lx\n", kaddr, vaddr, pte); 35 35 if(ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] != pte) {
+1 -1
arch/m68k/sun3x/dvma.c
··· 125 125 do { 126 126 pr_debug("mapping %08lx phys to %08lx\n", 127 127 __pa(kaddr), vaddr); 128 - set_pte(pte, pfn_pte(virt_to_pfn(kaddr), 128 + set_pte(pte, pfn_pte(virt_to_pfn((void *)kaddr), 129 129 PAGE_KERNEL)); 130 130 pte++; 131 131 kaddr += PAGE_SIZE;