Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: add size argument to __cpuc_flush_dcache_page

... and rename the function since it no longer operates on just
pages.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>

+137 -116
+5 -5
arch/arm/include/asm/cacheflush.h
··· 211 211 212 212 void (*coherent_kern_range)(unsigned long, unsigned long); 213 213 void (*coherent_user_range)(unsigned long, unsigned long); 214 - void (*flush_kern_dcache_page)(void *); 214 + void (*flush_kern_dcache_area)(void *, size_t); 215 215 216 216 void (*dma_inv_range)(const void *, const void *); 217 217 void (*dma_clean_range)(const void *, const void *); ··· 236 236 #define __cpuc_flush_user_range cpu_cache.flush_user_range 237 237 #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range 238 238 #define __cpuc_coherent_user_range cpu_cache.coherent_user_range 239 - #define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page 239 + #define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area 240 240 241 241 /* 242 242 * These are private to the dma-mapping API. Do not use directly. ··· 255 255 #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range) 256 256 #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range) 257 257 #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range) 258 - #define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page) 258 + #define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area) 259 259 260 260 extern void __cpuc_flush_kern_all(void); 261 261 extern void __cpuc_flush_user_all(void); 262 262 extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int); 263 263 extern void __cpuc_coherent_kern_range(unsigned long, unsigned long); 264 264 extern void __cpuc_coherent_user_range(unsigned long, unsigned long); 265 - extern void __cpuc_flush_dcache_page(void *); 265 + extern void __cpuc_flush_dcache_area(void *, size_t); 266 266 267 267 /* 268 268 * These are private to the dma-mapping API. Do not use directly. ··· 448 448 { 449 449 /* highmem pages are always flushed upon kunmap already */ 450 450 if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page)) 451 - __cpuc_flush_dcache_page(page_address(page)); 451 + __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); 452 452 } 453 453 454 454 #define flush_dcache_mmap_lock(mapping) \
+6 -5
arch/arm/mm/cache-fa.S
··· 127 127 mov pc, lr 128 128 129 129 /* 130 - * flush_kern_dcache_page(kaddr) 130 + * flush_kern_dcache_area(void *addr, size_t size) 131 131 * 132 132 * Ensure that the data held in the page kaddr is written back 133 133 * to the page in question. 134 134 * 135 - * - kaddr - kernel address (guaranteed to be page aligned) 135 + * - addr - kernel address 136 + * - size - size of region 136 137 */ 137 - ENTRY(fa_flush_kern_dcache_page) 138 - add r1, r0, #PAGE_SZ 138 + ENTRY(fa_flush_kern_dcache_area) 139 + add r1, r0, r1 139 140 1: mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line 140 141 add r0, r0, #CACHE_DLINESIZE 141 142 cmp r0, r1 ··· 214 213 .long fa_flush_user_cache_range 215 214 .long fa_coherent_kern_range 216 215 .long fa_coherent_user_range 217 - .long fa_flush_kern_dcache_page 216 + .long fa_flush_kern_dcache_area 218 217 .long fa_dma_inv_range 219 218 .long fa_dma_clean_range 220 219 .long fa_dma_flush_range
+5 -4
arch/arm/mm/cache-v3.S
··· 72 72 mov pc, lr 73 73 74 74 /* 75 - * flush_kern_dcache_page(void *page) 75 + * flush_kern_dcache_area(void *page, size_t size) 76 76 * 77 77 * Ensure no D cache aliasing occurs, either with itself or 78 78 * the I cache 79 79 * 80 - * - addr - page aligned address 80 + * - addr - kernel address 81 + * - size - region size 81 82 */ 82 - ENTRY(v3_flush_kern_dcache_page) 83 + ENTRY(v3_flush_kern_dcache_area) 83 84 /* FALLTHROUGH */ 84 85 85 86 /* ··· 130 129 .long v3_flush_user_cache_range 131 130 .long v3_coherent_kern_range 132 131 .long v3_coherent_user_range 133 - .long v3_flush_kern_dcache_page 132 + .long v3_flush_kern_dcache_area 134 133 .long v3_dma_inv_range 135 134 .long v3_dma_clean_range 136 135 .long v3_dma_flush_range
+5 -4
arch/arm/mm/cache-v4.S
··· 82 82 mov pc, lr 83 83 84 84 /* 85 - * flush_kern_dcache_page(void *page) 85 + * flush_kern_dcache_area(void *addr, size_t size) 86 86 * 87 87 * Ensure no D cache aliasing occurs, either with itself or 88 88 * the I cache 89 89 * 90 - * - addr - page aligned address 90 + * - addr - kernel address 91 + * - size - region size 91 92 */ 92 - ENTRY(v4_flush_kern_dcache_page) 93 + ENTRY(v4_flush_kern_dcache_area) 93 94 /* FALLTHROUGH */ 94 95 95 96 /* ··· 142 141 .long v4_flush_user_cache_range 143 142 .long v4_coherent_kern_range 144 143 .long v4_coherent_user_range 145 - .long v4_flush_kern_dcache_page 144 + .long v4_flush_kern_dcache_area 146 145 .long v4_dma_inv_range 147 146 .long v4_dma_clean_range 148 147 .long v4_dma_flush_range
+6 -5
arch/arm/mm/cache-v4wb.S
··· 114 114 mov pc, lr 115 115 116 116 /* 117 - * flush_kern_dcache_page(void *page) 117 + * flush_kern_dcache_area(void *addr, size_t size) 118 118 * 119 119 * Ensure no D cache aliasing occurs, either with itself or 120 120 * the I cache 121 121 * 122 - * - addr - page aligned address 122 + * - addr - kernel address 123 + * - size - region size 123 124 */ 124 - ENTRY(v4wb_flush_kern_dcache_page) 125 - add r1, r0, #PAGE_SZ 125 + ENTRY(v4wb_flush_kern_dcache_area) 126 + add r1, r0, r1 126 127 /* fall through */ 127 128 128 129 /* ··· 225 224 .long v4wb_flush_user_cache_range 226 225 .long v4wb_coherent_kern_range 227 226 .long v4wb_coherent_user_range 228 - .long v4wb_flush_kern_dcache_page 227 + .long v4wb_flush_kern_dcache_area 229 228 .long v4wb_dma_inv_range 230 229 .long v4wb_dma_clean_range 231 230 .long v4wb_dma_flush_range
+6 -5
arch/arm/mm/cache-v4wt.S
··· 117 117 mov pc, lr 118 118 119 119 /* 120 - * flush_kern_dcache_page(void *page) 120 + * flush_kern_dcache_area(void *addr, size_t size) 121 121 * 122 122 * Ensure no D cache aliasing occurs, either with itself or 123 123 * the I cache 124 124 * 125 - * - addr - page aligned address 125 + * - addr - kernel address 126 + * - size - region size 126 127 */ 127 - ENTRY(v4wt_flush_kern_dcache_page) 128 + ENTRY(v4wt_flush_kern_dcache_area) 128 129 mov r2, #0 129 130 mcr p15, 0, r2, c7, c5, 0 @ invalidate I cache 130 - add r1, r0, #PAGE_SZ 131 + add r1, r0, r1 131 132 /* fallthrough */ 132 133 133 134 /* ··· 181 180 .long v4wt_flush_user_cache_range 182 181 .long v4wt_coherent_kern_range 183 182 .long v4wt_coherent_user_range 184 - .long v4wt_flush_kern_dcache_page 183 + .long v4wt_flush_kern_dcache_area 185 184 .long v4wt_dma_inv_range 186 185 .long v4wt_dma_clean_range 187 186 .long v4wt_dma_flush_range
+6 -5
arch/arm/mm/cache-v6.S
··· 159 159 ENDPROC(v6_coherent_kern_range) 160 160 161 161 /* 162 - * v6_flush_kern_dcache_page(kaddr) 162 + * v6_flush_kern_dcache_area(void *addr, size_t size) 163 163 * 164 164 * Ensure that the data held in the page kaddr is written back 165 165 * to the page in question. 166 166 * 167 - * - kaddr - kernel address (guaranteed to be page aligned) 167 + * - addr - kernel address 168 + * - size - region size 168 169 */ 169 - ENTRY(v6_flush_kern_dcache_page) 170 - add r1, r0, #PAGE_SZ 170 + ENTRY(v6_flush_kern_dcache_area) 171 + add r1, r0, r1 171 172 1: 172 173 #ifdef HARVARD_CACHE 173 174 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line ··· 272 271 .long v6_flush_user_cache_range 273 272 .long v6_coherent_kern_range 274 273 .long v6_coherent_user_range 275 - .long v6_flush_kern_dcache_page 274 + .long v6_flush_kern_dcache_area 276 275 .long v6_dma_inv_range 277 276 .long v6_dma_clean_range 278 277 .long v6_dma_flush_range
+7 -6
arch/arm/mm/cache-v7.S
··· 186 186 ENDPROC(v7_coherent_user_range) 187 187 188 188 /* 189 - * v7_flush_kern_dcache_page(kaddr) 189 + * v7_flush_kern_dcache_area(void *addr, size_t size) 190 190 * 191 191 * Ensure that the data held in the page kaddr is written back 192 192 * to the page in question. 193 193 * 194 - * - kaddr - kernel address (guaranteed to be page aligned) 194 + * - addr - kernel address 195 + * - size - region size 195 196 */ 196 - ENTRY(v7_flush_kern_dcache_page) 197 + ENTRY(v7_flush_kern_dcache_area) 197 198 dcache_line_size r2, r3 198 - add r1, r0, #PAGE_SZ 199 + add r1, r0, r1 199 200 1: 200 201 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line 201 202 add r0, r0, r2 ··· 204 203 blo 1b 205 204 dsb 206 205 mov pc, lr 207 - ENDPROC(v7_flush_kern_dcache_page) 206 + ENDPROC(v7_flush_kern_dcache_area) 208 207 209 208 /* 210 209 * v7_dma_inv_range(start,end) ··· 280 279 .long v7_flush_user_cache_range 281 280 .long v7_coherent_kern_range 282 281 .long v7_coherent_user_range 283 - .long v7_flush_kern_dcache_page 282 + .long v7_flush_kern_dcache_area 284 283 .long v7_dma_inv_range 285 284 .long v7_dma_clean_range 286 285 .long v7_dma_flush_range
+2 -2
arch/arm/mm/flush.c
··· 131 131 */ 132 132 if (addr) 133 133 #endif 134 - __cpuc_flush_dcache_page(addr); 134 + __cpuc_flush_dcache_area(addr, PAGE_SIZE); 135 135 136 136 /* 137 137 * If this is a page cache page, and we have an aliasing VIPT cache, ··· 258 258 * in this mapping of the page. FIXME: this is overkill 259 259 * since we actually ask for a write-back and invalidate. 260 260 */ 261 - __cpuc_flush_dcache_page(page_address(page)); 261 + __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); 262 262 }
+1 -1
arch/arm/mm/highmem.c
··· 79 79 unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); 80 80 81 81 if (kvaddr >= (void *)FIXADDR_START) { 82 - __cpuc_flush_dcache_page((void *)vaddr); 82 + __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); 83 83 #ifdef CONFIG_DEBUG_HIGHMEM 84 84 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); 85 85 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
+1 -1
arch/arm/mm/nommu.c
··· 61 61 62 62 void flush_dcache_page(struct page *page) 63 63 { 64 - __cpuc_flush_dcache_page(page_address(page)); 64 + __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); 65 65 } 66 66 EXPORT_SYMBOL(flush_dcache_page); 67 67
+6 -5
arch/arm/mm/proc-arm1020.S
··· 231 231 mov pc, lr 232 232 233 233 /* 234 - * flush_kern_dcache_page(void *page) 234 + * flush_kern_dcache_area(void *addr, size_t size) 235 235 * 236 236 * Ensure no D cache aliasing occurs, either with itself or 237 237 * the I cache 238 238 * 239 - * - page - page aligned address 239 + * - addr - kernel address 240 + * - size - region size 240 241 */ 241 - ENTRY(arm1020_flush_kern_dcache_page) 242 + ENTRY(arm1020_flush_kern_dcache_area) 242 243 mov ip, #0 243 244 #ifndef CONFIG_CPU_DCACHE_DISABLE 244 - add r1, r0, #PAGE_SZ 245 + add r1, r0, r1 245 246 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 246 247 mcr p15, 0, ip, c7, c10, 4 @ drain WB 247 248 add r0, r0, #CACHE_DLINESIZE ··· 336 335 .long arm1020_flush_user_cache_range 337 336 .long arm1020_coherent_kern_range 338 337 .long arm1020_coherent_user_range 339 - .long arm1020_flush_kern_dcache_page 338 + .long arm1020_flush_kern_dcache_area 340 339 .long arm1020_dma_inv_range 341 340 .long arm1020_dma_clean_range 342 341 .long arm1020_dma_flush_range
+6 -5
arch/arm/mm/proc-arm1020e.S
··· 225 225 mov pc, lr 226 226 227 227 /* 228 - * flush_kern_dcache_page(void *page) 228 + * flush_kern_dcache_area(void *addr, size_t size) 229 229 * 230 230 * Ensure no D cache aliasing occurs, either with itself or 231 231 * the I cache 232 232 * 233 - * - page - page aligned address 233 + * - addr - kernel address 234 + * - size - region size 234 235 */ 235 - ENTRY(arm1020e_flush_kern_dcache_page) 236 + ENTRY(arm1020e_flush_kern_dcache_area) 236 237 mov ip, #0 237 238 #ifndef CONFIG_CPU_DCACHE_DISABLE 238 - add r1, r0, #PAGE_SZ 239 + add r1, r0, r1 239 240 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 240 241 add r0, r0, #CACHE_DLINESIZE 241 242 cmp r0, r1 ··· 322 321 .long arm1020e_flush_user_cache_range 323 322 .long arm1020e_coherent_kern_range 324 323 .long arm1020e_coherent_user_range 325 - .long arm1020e_flush_kern_dcache_page 324 + .long arm1020e_flush_kern_dcache_area 326 325 .long arm1020e_dma_inv_range 327 326 .long arm1020e_dma_clean_range 328 327 .long arm1020e_dma_flush_range
+6 -5
arch/arm/mm/proc-arm1022.S
··· 214 214 mov pc, lr 215 215 216 216 /* 217 - * flush_kern_dcache_page(void *page) 217 + * flush_kern_dcache_area(void *addr, size_t size) 218 218 * 219 219 * Ensure no D cache aliasing occurs, either with itself or 220 220 * the I cache 221 221 * 222 - * - page - page aligned address 222 + * - addr - kernel address 223 + * - size - region size 223 224 */ 224 - ENTRY(arm1022_flush_kern_dcache_page) 225 + ENTRY(arm1022_flush_kern_dcache_area) 225 226 mov ip, #0 226 227 #ifndef CONFIG_CPU_DCACHE_DISABLE 227 - add r1, r0, #PAGE_SZ 228 + add r1, r0, r1 228 229 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 229 230 add r0, r0, #CACHE_DLINESIZE 230 231 cmp r0, r1 ··· 311 310 .long arm1022_flush_user_cache_range 312 311 .long arm1022_coherent_kern_range 313 312 .long arm1022_coherent_user_range 314 - .long arm1022_flush_kern_dcache_page 313 + .long arm1022_flush_kern_dcache_area 315 314 .long arm1022_dma_inv_range 316 315 .long arm1022_dma_clean_range 317 316 .long arm1022_dma_flush_range
+6 -5
arch/arm/mm/proc-arm1026.S
··· 208 208 mov pc, lr 209 209 210 210 /* 211 - * flush_kern_dcache_page(void *page) 211 + * flush_kern_dcache_area(void *addr, size_t size) 212 212 * 213 213 * Ensure no D cache aliasing occurs, either with itself or 214 214 * the I cache 215 215 * 216 - * - page - page aligned address 216 + * - addr - kernel address 217 + * - size - region size 217 218 */ 218 - ENTRY(arm1026_flush_kern_dcache_page) 219 + ENTRY(arm1026_flush_kern_dcache_area) 219 220 mov ip, #0 220 221 #ifndef CONFIG_CPU_DCACHE_DISABLE 221 - add r1, r0, #PAGE_SZ 222 + add r1, r0, r1 222 223 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 223 224 add r0, r0, #CACHE_DLINESIZE 224 225 cmp r0, r1 ··· 305 304 .long arm1026_flush_user_cache_range 306 305 .long arm1026_coherent_kern_range 307 306 .long arm1026_coherent_user_range 308 - .long arm1026_flush_kern_dcache_page 307 + .long arm1026_flush_kern_dcache_area 309 308 .long arm1026_dma_inv_range 310 309 .long arm1026_dma_clean_range 311 310 .long arm1026_dma_flush_range
+6 -5
arch/arm/mm/proc-arm920.S
··· 207 207 mov pc, lr 208 208 209 209 /* 210 - * flush_kern_dcache_page(void *page) 210 + * flush_kern_dcache_area(void *addr, size_t size) 211 211 * 212 212 * Ensure no D cache aliasing occurs, either with itself or 213 213 * the I cache 214 214 * 215 - * - addr - page aligned address 215 + * - addr - kernel address 216 + * - size - region size 216 217 */ 217 - ENTRY(arm920_flush_kern_dcache_page) 218 - add r1, r0, #PAGE_SZ 218 + ENTRY(arm920_flush_kern_dcache_area) 219 + add r1, r0, r1 219 220 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 220 221 add r0, r0, #CACHE_DLINESIZE 221 222 cmp r0, r1 ··· 294 293 .long arm920_flush_user_cache_range 295 294 .long arm920_coherent_kern_range 296 295 .long arm920_coherent_user_range 297 - .long arm920_flush_kern_dcache_page 296 + .long arm920_flush_kern_dcache_area 298 297 .long arm920_dma_inv_range 299 298 .long arm920_dma_clean_range 300 299 .long arm920_dma_flush_range
+6 -5
arch/arm/mm/proc-arm922.S
··· 209 209 mov pc, lr 210 210 211 211 /* 212 - * flush_kern_dcache_page(void *page) 212 + * flush_kern_dcache_area(void *addr, size_t size) 213 213 * 214 214 * Ensure no D cache aliasing occurs, either with itself or 215 215 * the I cache 216 216 * 217 - * - addr - page aligned address 217 + * - addr - kernel address 218 + * - size - region size 218 219 */ 219 - ENTRY(arm922_flush_kern_dcache_page) 220 - add r1, r0, #PAGE_SZ 220 + ENTRY(arm922_flush_kern_dcache_area) 221 + add r1, r0, r1 221 222 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 222 223 add r0, r0, #CACHE_DLINESIZE 223 224 cmp r0, r1 ··· 296 295 .long arm922_flush_user_cache_range 297 296 .long arm922_coherent_kern_range 298 297 .long arm922_coherent_user_range 299 - .long arm922_flush_kern_dcache_page 298 + .long arm922_flush_kern_dcache_area 300 299 .long arm922_dma_inv_range 301 300 .long arm922_dma_clean_range 302 301 .long arm922_dma_flush_range
+6 -5
arch/arm/mm/proc-arm925.S
··· 251 251 mov pc, lr 252 252 253 253 /* 254 - * flush_kern_dcache_page(void *page) 254 + * flush_kern_dcache_area(void *addr, size_t size) 255 255 * 256 256 * Ensure no D cache aliasing occurs, either with itself or 257 257 * the I cache 258 258 * 259 - * - addr - page aligned address 259 + * - addr - kernel address 260 + * - size - region size 260 261 */ 261 - ENTRY(arm925_flush_kern_dcache_page) 262 - add r1, r0, #PAGE_SZ 262 + ENTRY(arm925_flush_kern_dcache_area) 263 + add r1, r0, r1 263 264 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 264 265 add r0, r0, #CACHE_DLINESIZE 265 266 cmp r0, r1 ··· 347 346 .long arm925_flush_user_cache_range 348 347 .long arm925_coherent_kern_range 349 348 .long arm925_coherent_user_range 350 - .long arm925_flush_kern_dcache_page 349 + .long arm925_flush_kern_dcache_area 351 350 .long arm925_dma_inv_range 352 351 .long arm925_dma_clean_range 353 352 .long arm925_dma_flush_range
+6 -5
arch/arm/mm/proc-arm926.S
··· 214 214 mov pc, lr 215 215 216 216 /* 217 - * flush_kern_dcache_page(void *page) 217 + * flush_kern_dcache_area(void *addr, size_t size) 218 218 * 219 219 * Ensure no D cache aliasing occurs, either with itself or 220 220 * the I cache 221 221 * 222 - * - addr - page aligned address 222 + * - addr - kernel address 223 + * - size - region size 223 224 */ 224 - ENTRY(arm926_flush_kern_dcache_page) 225 - add r1, r0, #PAGE_SZ 225 + ENTRY(arm926_flush_kern_dcache_area) 226 + add r1, r0, r1 226 227 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 227 228 add r0, r0, #CACHE_DLINESIZE 228 229 cmp r0, r1 ··· 310 309 .long arm926_flush_user_cache_range 311 310 .long arm926_coherent_kern_range 312 311 .long arm926_coherent_user_range 313 - .long arm926_flush_kern_dcache_page 312 + .long arm926_flush_kern_dcache_area 314 313 .long arm926_dma_inv_range 315 314 .long arm926_dma_clean_range 316 315 .long arm926_dma_flush_range
+5 -4
arch/arm/mm/proc-arm940.S
··· 141 141 /* FALLTHROUGH */ 142 142 143 143 /* 144 - * flush_kern_dcache_page(void *page) 144 + * flush_kern_dcache_area(void *addr, size_t size) 145 145 * 146 146 * Ensure no D cache aliasing occurs, either with itself or 147 147 * the I cache 148 148 * 149 - * - addr - page aligned address 149 + * - addr - kernel address 150 + * - size - region size 150 151 */ 151 - ENTRY(arm940_flush_kern_dcache_page) 152 + ENTRY(arm940_flush_kern_dcache_area) 152 153 mov ip, #0 153 154 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments 154 155 1: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries ··· 239 238 .long arm940_flush_user_cache_range 240 239 .long arm940_coherent_kern_range 241 240 .long arm940_coherent_user_range 242 - .long arm940_flush_kern_dcache_page 241 + .long arm940_flush_kern_dcache_area 243 242 .long arm940_dma_inv_range 244 243 .long arm940_dma_clean_range 245 244 .long arm940_dma_flush_range
+6 -5
arch/arm/mm/proc-arm946.S
··· 183 183 mov pc, lr 184 184 185 185 /* 186 - * flush_kern_dcache_page(void *page) 186 + * flush_kern_dcache_area(void *addr, size_t size) 187 187 * 188 188 * Ensure no D cache aliasing occurs, either with itself or 189 189 * the I cache 190 190 * 191 - * - addr - page aligned address 191 + * - addr - kernel address 192 + * - size - region size 192 193 * (same as arm926) 193 194 */ 194 - ENTRY(arm946_flush_kern_dcache_page) 195 - add r1, r0, #PAGE_SZ 195 + ENTRY(arm946_flush_kern_dcache_area) 196 + add r1, r0, r1 196 197 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 197 198 add r0, r0, #CACHE_DLINESIZE 198 199 cmp r0, r1 ··· 281 280 .long arm946_flush_user_cache_range 282 281 .long arm946_coherent_kern_range 283 282 .long arm946_coherent_user_range 284 - .long arm946_flush_kern_dcache_page 283 + .long arm946_flush_kern_dcache_area 285 284 .long arm946_dma_inv_range 286 285 .long arm946_dma_clean_range 287 286 .long arm946_dma_flush_range
+8 -7
arch/arm/mm/proc-feroceon.S
··· 226 226 mov pc, lr 227 227 228 228 /* 229 - * flush_kern_dcache_page(void *page) 229 + * flush_kern_dcache_area(void *addr, size_t size) 230 230 * 231 231 * Ensure no D cache aliasing occurs, either with itself or 232 232 * the I cache 233 233 * 234 - * - addr - page aligned address 234 + * - addr - kernel address 235 + * - size - region size 235 236 */ 236 237 .align 5 237 - ENTRY(feroceon_flush_kern_dcache_page) 238 - add r1, r0, #PAGE_SZ 238 + ENTRY(feroceon_flush_kern_dcache_area) 239 + add r1, r0, r1 239 240 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 240 241 add r0, r0, #CACHE_DLINESIZE 241 242 cmp r0, r1 ··· 247 246 mov pc, lr 248 247 249 248 .align 5 250 - ENTRY(feroceon_range_flush_kern_dcache_page) 249 + ENTRY(feroceon_range_flush_kern_dcache_area) 251 250 mrs r2, cpsr 252 251 add r1, r0, #PAGE_SZ - CACHE_DLINESIZE @ top addr is inclusive 253 252 orr r3, r2, #PSR_I_BIT ··· 373 372 .long feroceon_flush_user_cache_range 374 373 .long feroceon_coherent_kern_range 375 374 .long feroceon_coherent_user_range 376 - .long feroceon_flush_kern_dcache_page 375 + .long feroceon_flush_kern_dcache_area 377 376 .long feroceon_dma_inv_range 378 377 .long feroceon_dma_clean_range 379 378 .long feroceon_dma_flush_range ··· 384 383 .long feroceon_flush_user_cache_range 385 384 .long feroceon_coherent_kern_range 386 385 .long feroceon_coherent_user_range 387 - .long feroceon_range_flush_kern_dcache_page 386 + .long feroceon_range_flush_kern_dcache_area 388 387 .long feroceon_range_dma_inv_range 389 388 .long feroceon_range_dma_clean_range 390 389 .long feroceon_range_dma_flush_range
+6 -5
arch/arm/mm/proc-mohawk.S
··· 186 186 mov pc, lr 187 187 188 188 /* 189 - * flush_kern_dcache_page(void *page) 189 + * flush_kern_dcache_area(void *addr, size_t size) 190 190 * 191 191 * Ensure no D cache aliasing occurs, either with itself or 192 192 * the I cache 193 193 * 194 - * - addr - page aligned address 194 + * - addr - kernel address 195 + * - size - region size 195 196 */ 196 - ENTRY(mohawk_flush_kern_dcache_page) 197 - add r1, r0, #PAGE_SZ 197 + ENTRY(mohawk_flush_kern_dcache_area) 198 + add r1, r0, r1 198 199 1: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 199 200 add r0, r0, #CACHE_DLINESIZE 200 201 cmp r0, r1 ··· 274 273 .long mohawk_flush_user_cache_range 275 274 .long mohawk_coherent_kern_range 276 275 .long mohawk_coherent_user_range 277 - .long mohawk_flush_kern_dcache_page 276 + .long mohawk_flush_kern_dcache_area 278 277 .long mohawk_dma_inv_range 279 278 .long mohawk_dma_clean_range 280 279 .long mohawk_dma_flush_range
+1 -1
arch/arm/mm/proc-syms.c
··· 27 27 EXPORT_SYMBOL(__cpuc_flush_user_all); 28 28 EXPORT_SYMBOL(__cpuc_flush_user_range); 29 29 EXPORT_SYMBOL(__cpuc_coherent_kern_range); 30 - EXPORT_SYMBOL(__cpuc_flush_dcache_page); 30 + EXPORT_SYMBOL(__cpuc_flush_dcache_area); 31 31 #else 32 32 EXPORT_SYMBOL(cpu_cache); 33 33 #endif
+6 -5
arch/arm/mm/proc-xsc3.S
··· 226 226 mov pc, lr 227 227 228 228 /* 229 - * flush_kern_dcache_page(void *page) 229 + * flush_kern_dcache_area(void *addr, size_t size) 230 230 * 231 231 * Ensure no D cache aliasing occurs, either with itself or 232 232 * the I cache. 233 233 * 234 - * - addr - page aligned address 234 + * - addr - kernel address 235 + * - size - region size 235 236 */ 236 - ENTRY(xsc3_flush_kern_dcache_page) 237 - add r1, r0, #PAGE_SZ 237 + ENTRY(xsc3_flush_kern_dcache_area) 238 + add r1, r0, r1 238 239 1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line 239 240 add r0, r0, #CACHELINESIZE 240 241 cmp r0, r1 ··· 310 309 .long xsc3_flush_user_cache_range 311 310 .long xsc3_coherent_kern_range 312 311 .long xsc3_coherent_user_range 313 - .long xsc3_flush_kern_dcache_page 312 + .long xsc3_flush_kern_dcache_area 314 313 .long xsc3_dma_inv_range 315 314 .long xsc3_dma_clean_range 316 315 .long xsc3_dma_flush_range
+7 -6
arch/arm/mm/proc-xscale.S
··· 284 284 mov pc, lr 285 285 286 286 /* 287 - * flush_kern_dcache_page(void *page) 287 + * flush_kern_dcache_area(void *addr, size_t size) 288 288 * 289 289 * Ensure no D cache aliasing occurs, either with itself or 290 290 * the I cache 291 291 * 292 - * - addr - page aligned address 292 + * - addr - kernel address 293 + * - size - region size 293 294 */ 294 - ENTRY(xscale_flush_kern_dcache_page) 295 - add r1, r0, #PAGE_SZ 295 + ENTRY(xscale_flush_kern_dcache_area) 296 + add r1, r0, r1 296 297 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 297 298 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 298 299 add r0, r0, #CACHELINESIZE ··· 369 368 .long xscale_flush_user_cache_range 370 369 .long xscale_coherent_kern_range 371 370 .long xscale_coherent_user_range 372 - .long xscale_flush_kern_dcache_page 371 + .long xscale_flush_kern_dcache_area 373 372 .long xscale_dma_inv_range 374 373 .long xscale_dma_clean_range 375 374 .long xscale_dma_flush_range ··· 393 392 .long xscale_flush_user_cache_range 394 393 .long xscale_coherent_kern_range 395 394 .long xscale_coherent_user_range 396 - .long xscale_flush_kern_dcache_page 395 + .long xscale_flush_kern_dcache_area 397 396 .long xscale_dma_flush_range 398 397 .long xscale_dma_clean_range 399 398 .long xscale_dma_flush_range