Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: Convert __inval_cache_range() to area-based

__inval_cache_range() is already the odd one out among our data cache
maintenance routines as the only remaining range-based one; as we're
going to want an invalidation routine to call from C code for the pmem
API, let's tweak the prototype and name to bring it in line with the
clean operations, and to make its relationship with __dma_inv_area()
neatly mirror that of __clean_dcache_area_poc() and __dma_clean_area().
The loop clearing the early page tables gets mildly massaged in the
process for the sake of consistency.

Reviewed-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

authored by

Robin Murphy and committed by
Catalin Marinas
d46befef 09c2a7dc

+24 -18
+1
arch/arm64/include/asm/cacheflush.h
··· 67 67 */ 68 68 extern void flush_icache_range(unsigned long start, unsigned long end); 69 69 extern void __flush_dcache_area(void *addr, size_t len); 70 + extern void __inval_dcache_area(void *addr, size_t len); 70 71 extern void __clean_dcache_area_poc(void *addr, size_t len); 71 72 extern void __clean_dcache_area_pou(void *addr, size_t len); 72 73 extern long __flush_cache_user_range(unsigned long start, unsigned long end);
+9 -9
arch/arm64/kernel/head.S
··· 143 143 dmb sy // needed before dc ivac with 144 144 // MMU off 145 145 146 - add x1, x0, #0x20 // 4 x 8 bytes 147 - b __inval_cache_range // tail call 146 + mov x1, #0x20 // 4 x 8 bytes 147 + b __inval_dcache_area // tail call 148 148 ENDPROC(preserve_boot_args) 149 149 150 150 /* ··· 221 221 * dirty cache lines being evicted. 222 222 */ 223 223 adrp x0, idmap_pg_dir 224 - adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE 225 - bl __inval_cache_range 224 + ldr x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) 225 + bl __inval_dcache_area 226 226 227 227 /* 228 228 * Clear the idmap and swapper page tables. 229 229 */ 230 230 adrp x0, idmap_pg_dir 231 - adrp x6, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE 231 + ldr x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) 232 232 1: stp xzr, xzr, [x0], #16 233 233 stp xzr, xzr, [x0], #16 234 234 stp xzr, xzr, [x0], #16 235 235 stp xzr, xzr, [x0], #16 236 - cmp x0, x6 237 - b.lo 1b 236 + subs x1, x1, #64 237 + b.ne 1b 238 238 239 239 mov x7, SWAPPER_MM_MMUFLAGS 240 240 ··· 307 307 * tables again to remove any speculatively loaded cache lines. 308 308 */ 309 309 adrp x0, idmap_pg_dir 310 - adrp x1, swapper_pg_dir + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE 310 + ldr x1, =(IDMAP_DIR_SIZE + SWAPPER_DIR_SIZE + RESERVED_TTBR0_SIZE) 311 311 dmb sy 312 - bl __inval_cache_range 312 + bl __inval_dcache_area 313 313 314 314 ret x28 315 315 ENDPROC(__create_page_tables)
+14 -9
arch/arm64/mm/cache.S
··· 109 109 ENDPROC(__clean_dcache_area_pou) 110 110 111 111 /* 112 + * __inval_dcache_area(kaddr, size) 113 + * 114 + * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) 115 + * are invalidated. Any partial lines at the ends of the interval are 116 + * also cleaned to PoC to prevent data loss. 117 + * 118 + * - kaddr - kernel address 119 + * - size - size in question 120 + */ 121 + ENTRY(__inval_dcache_area) 122 + /* FALLTHROUGH */ 123 + 124 + /* 112 125 * __dma_inv_area(start, size) 113 126 * - start - virtual start address of region 114 127 * - size - size in question 115 128 */ 116 129 __dma_inv_area: 117 130 add x1, x1, x0 118 - /* FALLTHROUGH */ 119 - 120 - /* 121 - * __inval_cache_range(start, end) 122 - * - start - start address of region 123 - * - end - end address of region 124 - */ 125 - ENTRY(__inval_cache_range) 126 131 dcache_line_size x2, x3 127 132 sub x3, x2, #1 128 133 tst x1, x3 // end cache line aligned? ··· 145 140 b.lo 2b 146 141 dsb sy 147 142 ret 148 - ENDPIPROC(__inval_cache_range) 143 + ENDPIPROC(__inval_dcache_area) 149 144 ENDPROC(__dma_inv_area) 150 145 151 146 /*