Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

powerpc32: refactor x_mapped_by_bats() and x_mapped_by_tlbcam() together

x_mapped_by_bats() and x_mapped_by_tlbcam() serve the same kind of
purpose, and are never defined at the same time.
So rename them x_block_mapped() and define them in the relevant
places

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Scott Wood <oss@buserror.net>

authored by

Christophe Leroy and committed by
Scott Wood
3084cdb7 be00ed72

+22 -42
+4 -2
arch/powerpc/mm/fsl_booke_mmu.c
··· 72 72 return tlbcam_addrs[idx].limit - tlbcam_addrs[idx].start + 1; 73 73 } 74 74 75 + #ifdef CONFIG_FSL_BOOKE 75 76 /* 76 77 * Return PA for this VA if it is mapped by a CAM, or 0 77 78 */ 78 - phys_addr_t v_mapped_by_tlbcam(unsigned long va) 79 + phys_addr_t v_block_mapped(unsigned long va) 79 80 { 80 81 int b; 81 82 for (b = 0; b < tlbcam_index; ++b) ··· 88 87 /* 89 88 * Return VA for a given PA or 0 if not mapped 90 89 */ 91 - unsigned long p_mapped_by_tlbcam(phys_addr_t pa) 90 + unsigned long p_block_mapped(phys_addr_t pa) 92 91 { 93 92 int b; 94 93 for (b = 0; b < tlbcam_index; ++b) ··· 98 97 return tlbcam_addrs[b].start+(pa-tlbcam_addrs[b].phys); 99 98 return 0; 100 99 } 100 + #endif 101 101 102 102 /* 103 103 * Set up a variable-size TLB entry (tlbcam). The parameters are not checked;
+10
arch/powerpc/mm/mmu_decl.h
··· 159 159 u32 MAS7; 160 160 }; 161 161 #endif 162 + 163 + #if defined(CONFIG_6xx) || defined(CONFIG_FSL_BOOKE) 164 + /* 6xx have BATS */ 165 + /* FSL_BOOKE have TLBCAM */ 166 + phys_addr_t v_block_mapped(unsigned long va); 167 + unsigned long p_block_mapped(phys_addr_t pa); 168 + #else 169 + static inline phys_addr_t v_block_mapped(unsigned long va) { return 0; } 170 + static inline unsigned long p_block_mapped(phys_addr_t pa) { return 0; } 171 + #endif
+6 -38
arch/powerpc/mm/pgtable_32.c
··· 41 41 unsigned long ioremap_bot; 42 42 EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */ 43 43 44 - #ifdef CONFIG_6xx 45 - #define HAVE_BATS 1 46 - #endif 47 - 48 - #if defined(CONFIG_FSL_BOOKE) 49 - #define HAVE_TLBCAM 1 50 - #endif 51 - 52 44 extern char etext[], _stext[]; 53 - 54 - #ifdef HAVE_BATS 55 - extern phys_addr_t v_mapped_by_bats(unsigned long va); 56 - extern unsigned long p_mapped_by_bats(phys_addr_t pa); 57 - #else /* !HAVE_BATS */ 58 - #define v_mapped_by_bats(x) (0UL) 59 - #define p_mapped_by_bats(x) (0UL) 60 - #endif /* HAVE_BATS */ 61 - 62 - #ifdef HAVE_TLBCAM 63 - extern phys_addr_t v_mapped_by_tlbcam(unsigned long va); 64 - extern unsigned long p_mapped_by_tlbcam(phys_addr_t pa); 65 - #else /* !HAVE_TLBCAM */ 66 - #define v_mapped_by_tlbcam(x) (0UL) 67 - #define p_mapped_by_tlbcam(x) (0UL) 68 - #endif /* HAVE_TLBCAM */ 69 45 70 46 #define PGDIR_ORDER (32 + PGD_T_LOG2 - PGDIR_SHIFT) 71 47 ··· 204 228 205 229 /* 206 230 * Is it already mapped? Perhaps overlapped by a previous 207 - * BAT mapping. If the whole area is mapped then we're done, 208 - * otherwise remap it since we want to keep the virt addrs for 209 - * each request contiguous. 210 - * 211 - * We make the assumption here that if the bottom and top 212 - * of the range we want are mapped then it's mapped to the 213 - * same virt address (and this is contiguous). 214 - * -- Cort 231 + * mapping. 215 232 */ 216 - if ((v = p_mapped_by_bats(p)) /*&& p_mapped_by_bats(p+size-1)*/ ) 217 - goto out; 218 - 219 - if ((v = p_mapped_by_tlbcam(p))) 233 + v = p_block_mapped(p); 234 + if (v) 220 235 goto out; 221 236 222 237 if (slab_is_available()) { ··· 245 278 * If mapped by BATs then there is nothing to do. 246 279 * Calling vfree() generates a benign warning. 247 280 */ 248 - if (v_mapped_by_bats((unsigned long)addr)) return; 281 + if (v_block_mapped((unsigned long)addr)) 282 + return; 249 283 250 284 if (addr > high_memory && (unsigned long) addr < ioremap_bot) 251 285 vunmap((void *) (PAGE_MASK & (unsigned long)addr)); ··· 371 403 BUG_ON(PageHighMem(page)); 372 404 address = (unsigned long)page_address(page); 373 405 374 - if (v_mapped_by_bats(address) || v_mapped_by_tlbcam(address)) 406 + if (v_block_mapped(address)) 375 407 return 0; 376 408 if (!get_pteptr(&init_mm, address, &kpte, &kpmd)) 377 409 return -EINVAL;
+2 -2
arch/powerpc/mm/ppc_mmu_32.c
··· 49 49 /* 50 50 * Return PA for this VA if it is mapped by a BAT, or 0 51 51 */ 52 - phys_addr_t v_mapped_by_bats(unsigned long va) 52 + phys_addr_t v_block_mapped(unsigned long va) 53 53 { 54 54 int b; 55 55 for (b = 0; b < 4; ++b) ··· 61 61 /* 62 62 * Return VA for a given PA or 0 if not mapped 63 63 */ 64 - unsigned long p_mapped_by_bats(phys_addr_t pa) 64 + unsigned long p_block_mapped(phys_addr_t pa) 65 65 { 66 66 int b; 67 67 for (b = 0; b < 4; ++b)