Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: Rename arm64-internal cache maintenance functions

Although naming across the codebase isn't that consistent, it
tends to follow certain patterns. Moreover, the term "flush"
isn't defined in the Arm Architecture reference manual, and might
be interpreted to mean clean, invalidate, or both for a cache.

Rename arm64-internal functions to make the naming internally
consistent, as well as making it consistent with the Arm ARM, by
specifying whether it applies to the instruction, data, or both
caches, whether the operation is a clean, invalidate, or both.
Also specify which point the operation applies to, i.e., to the
point of unification (PoU), coherency (PoC), or persistence
(PoP).

This commit applies the following sed transformation to all files
under arch/arm64:

"s/\b__flush_cache_range\b/caches_clean_inval_pou_macro/g;"\
"s/\b__flush_icache_range\b/caches_clean_inval_pou/g;"\
"s/\binvalidate_icache_range\b/icache_inval_pou/g;"\
"s/\b__flush_dcache_area\b/dcache_clean_inval_poc/g;"\
"s/\b__inval_dcache_area\b/dcache_inval_poc/g;"\
"s/__clean_dcache_area_poc\b/dcache_clean_poc/g;"\
"s/\b__clean_dcache_area_pop\b/dcache_clean_pop/g;"\
"s/\b__clean_dcache_area_pou\b/dcache_clean_pou/g;"\
"s/\b__flush_cache_user_range\b/caches_clean_inval_user_pou/g;"\
"s/\b__flush_icache_all\b/icache_inval_all_pou/g;"

Note that __clean_dcache_area_poc is deliberately missing a word
boundary check at the beginning in order to match the efistub
symbols in image-vars.h.

Also note that, despite its name, __flush_icache_range operates
on both instruction and data caches. The name change here
reflects that.

No functional change intended.

Acked-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Fuad Tabba <tabba@google.com>
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20210524083001.2586635-19-tabba@google.com
Signed-off-by: Will Deacon <will@kernel.org>

authored by

Fuad Tabba and committed by
Will Deacon
fade9c2c 393239be

+98 -98
+1 -1
arch/arm64/include/asm/arch_gicv3.h
··· 125 125 #define gic_write_lpir(v, c) writeq_relaxed(v, c) 126 126 127 127 #define gic_flush_dcache_to_poc(a,l) \ 128 - __flush_dcache_area((unsigned long)(a), (unsigned long)(a)+(l)) 128 + dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l)) 129 129 130 130 #define gits_read_baser(c) readq_relaxed(c) 131 131 #define gits_write_baser(v, c) writeq_relaxed(v, c)
+18 -18
arch/arm64/include/asm/cacheflush.h
··· 34 34 * - start - virtual start address (inclusive) 35 35 * - end - virtual end address (exclusive) 36 36 * 37 - * __flush_icache_range(start, end) 37 + * caches_clean_inval_pou(start, end) 38 38 * 39 39 * Ensure coherency between the I-cache and the D-cache region to 40 40 * the Point of Unification. 41 41 * 42 - * __flush_cache_user_range(start, end) 42 + * caches_clean_inval_user_pou(start, end) 43 43 * 44 44 * Ensure coherency between the I-cache and the D-cache region to 45 45 * the Point of Unification. 46 46 * Use only if the region might access user memory. 47 47 * 48 - * invalidate_icache_range(start, end) 48 + * icache_inval_pou(start, end) 49 49 * 50 50 * Invalidate I-cache region to the Point of Unification. 51 51 * 52 - * __flush_dcache_area(start, end) 52 + * dcache_clean_inval_poc(start, end) 53 53 * 54 54 * Clean and invalidate D-cache region to the Point of Coherency. 55 55 * 56 - * __inval_dcache_area(start, end) 56 + * dcache_inval_poc(start, end) 57 57 * 58 58 * Invalidate D-cache region to the Point of Coherency. 59 59 * 60 - * __clean_dcache_area_poc(start, end) 60 + * dcache_clean_poc(start, end) 61 61 * 62 62 * Clean D-cache region to the Point of Coherency. 63 63 * 64 - * __clean_dcache_area_pop(start, end) 64 + * dcache_clean_pop(start, end) 65 65 * 66 66 * Clean D-cache region to the Point of Persistence. 67 67 * 68 - * __clean_dcache_area_pou(start, end) 68 + * dcache_clean_pou(start, end) 69 69 * 70 70 * Clean D-cache region to the Point of Unification. 71 71 */ 72 - extern void __flush_icache_range(unsigned long start, unsigned long end); 73 - extern void invalidate_icache_range(unsigned long start, unsigned long end); 74 - extern void __flush_dcache_area(unsigned long start, unsigned long end); 75 - extern void __inval_dcache_area(unsigned long start, unsigned long end); 76 - extern void __clean_dcache_area_poc(unsigned long start, unsigned long end); 77 - extern void __clean_dcache_area_pop(unsigned long start, unsigned long end); 78 - extern void __clean_dcache_area_pou(unsigned long start, unsigned long end); 79 - extern long __flush_cache_user_range(unsigned long start, unsigned long end); 72 + extern void caches_clean_inval_pou(unsigned long start, unsigned long end); 73 + extern void icache_inval_pou(unsigned long start, unsigned long end); 74 + extern void dcache_clean_inval_poc(unsigned long start, unsigned long end); 75 + extern void dcache_inval_poc(unsigned long start, unsigned long end); 76 + extern void dcache_clean_poc(unsigned long start, unsigned long end); 77 + extern void dcache_clean_pop(unsigned long start, unsigned long end); 78 + extern void dcache_clean_pou(unsigned long start, unsigned long end); 79 + extern long caches_clean_inval_user_pou(unsigned long start, unsigned long end); 80 80 extern void sync_icache_aliases(unsigned long start, unsigned long end); 81 81 82 82 static inline void flush_icache_range(unsigned long start, unsigned long end) 83 83 { 84 - __flush_icache_range(start, end); 84 + caches_clean_inval_pou(start, end); 85 85 86 86 /* 87 87 * IPI all online CPUs so that they undergo a context synchronization ··· 135 135 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 136 136 extern void flush_dcache_page(struct page *); 137 137 138 - static __always_inline void __flush_icache_all(void) 138 + static __always_inline void icache_inval_all_pou(void) 139 139 { 140 140 if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC)) 141 141 return;
+1 -1
arch/arm64/include/asm/efi.h
··· 137 137 138 138 static inline void efi_capsule_flush_cache_range(void *addr, int size) 139 139 { 140 - __flush_dcache_area((unsigned long)addr, (unsigned long)addr + size); 140 + dcache_clean_inval_poc((unsigned long)addr, (unsigned long)addr + size); 141 141 } 142 142 143 143 #endif /* _ASM_EFI_H */
+3 -3
arch/arm64/include/asm/kvm_mmu.h
··· 181 181 struct kvm; 182 182 183 183 #define kvm_flush_dcache_to_poc(a,l) \ 184 - __flush_dcache_area((unsigned long)(a), (unsigned long)(a)+(l)) 184 + dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l)) 185 185 186 186 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) 187 187 { ··· 209 209 { 210 210 if (icache_is_aliasing()) { 211 211 /* any kind of VIPT cache */ 212 - __flush_icache_all(); 212 + icache_inval_all_pou(); 213 213 } else if (is_kernel_in_hyp_mode() || !icache_is_vpipt()) { 214 214 /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */ 215 215 void *va = page_address(pfn_to_page(pfn)); 216 216 217 - invalidate_icache_range((unsigned long)va, 217 + icache_inval_pou((unsigned long)va, 218 218 (unsigned long)va + size); 219 219 } 220 220 }
+1 -1
arch/arm64/kernel/alternative.c
··· 181 181 */ 182 182 if (!is_module) { 183 183 dsb(ish); 184 - __flush_icache_all(); 184 + icache_inval_all_pou(); 185 185 isb(); 186 186 187 187 /* Ignore ARM64_CB bit from feature mask */
+2 -2
arch/arm64/kernel/efi-entry.S
··· 29 29 */ 30 30 ldr w1, =kernel_size 31 31 add x1, x0, x1 32 - bl __clean_dcache_area_poc 32 + bl dcache_clean_poc 33 33 ic ialluis 34 34 35 35 /* ··· 38 38 */ 39 39 adr x0, 0f 40 40 adr x1, 3f 41 - bl __clean_dcache_area_poc 41 + bl dcache_clean_poc 42 42 0: 43 43 /* Turn off Dcache and MMU */ 44 44 mrs x0, CurrentEL
+4 -4
arch/arm64/kernel/head.S
··· 118 118 // MMU off 119 119 120 120 add x1, x0, #0x20 // 4 x 8 bytes 121 - b __inval_dcache_area // tail call 121 + b dcache_inval_poc // tail call 122 122 SYM_CODE_END(preserve_boot_args) 123 123 124 124 /* ··· 268 268 */ 269 269 adrp x0, init_pg_dir 270 270 adrp x1, init_pg_end 271 - bl __inval_dcache_area 271 + bl dcache_inval_poc 272 272 273 273 /* 274 274 * Clear the init page tables. ··· 381 381 382 382 adrp x0, idmap_pg_dir 383 383 adrp x1, idmap_pg_end 384 - bl __inval_dcache_area 384 + bl dcache_inval_poc 385 385 386 386 adrp x0, init_pg_dir 387 387 adrp x1, init_pg_end 388 - bl __inval_dcache_area 388 + bl dcache_inval_poc 389 389 390 390 ret x28 391 391 SYM_FUNC_END(__create_page_tables)
+2 -2
arch/arm64/kernel/hibernate-asm.S
··· 45 45 * Because this code has to be copied to a 'safe' page, it can't call out to 46 46 * other functions by PC-relative address. Also remember that it may be 47 47 * mid-way through over-writing other functions. For this reason it contains 48 - * code from __flush_icache_range() and uses the copy_page() macro. 48 + * code from caches_clean_inval_pou() and uses the copy_page() macro. 49 49 * 50 50 * This 'safe' page is mapped via ttbr0, and executed from there. This function 51 51 * switches to a copy of the linear map in ttbr1, performs the restore, then ··· 87 87 copy_page x0, x1, x2, x3, x4, x5, x6, x7, x8, x9 88 88 89 89 add x1, x10, #PAGE_SIZE 90 - /* Clean the copied page to PoU - based on __flush_icache_range() */ 90 + /* Clean the copied page to PoU - based on caches_clean_inval_pou() */ 91 91 raw_dcache_line_size x2, x3 92 92 sub x3, x2, #1 93 93 bic x4, x10, x3
+6 -6
arch/arm64/kernel/hibernate.c
··· 210 210 return -ENOMEM; 211 211 212 212 memcpy(page, src_start, length); 213 - __flush_icache_range((unsigned long)page, (unsigned long)page + length); 213 + caches_clean_inval_pou((unsigned long)page, (unsigned long)page + length); 214 214 rc = trans_pgd_idmap_page(&trans_info, &trans_ttbr0, &t0sz, page); 215 215 if (rc) 216 216 return rc; ··· 381 381 ret = swsusp_save(); 382 382 } else { 383 383 /* Clean kernel core startup/idle code to PoC*/ 384 - __flush_dcache_area((unsigned long)__mmuoff_data_start, 384 + dcache_clean_inval_poc((unsigned long)__mmuoff_data_start, 385 385 (unsigned long)__mmuoff_data_end); 386 - __flush_dcache_area((unsigned long)__idmap_text_start, 386 + dcache_clean_inval_poc((unsigned long)__idmap_text_start, 387 387 (unsigned long)__idmap_text_end); 388 388 389 389 /* Clean kvm setup code to PoC? */ 390 390 if (el2_reset_needed()) { 391 - __flush_dcache_area( 391 + dcache_clean_inval_poc( 392 392 (unsigned long)__hyp_idmap_text_start, 393 393 (unsigned long)__hyp_idmap_text_end); 394 - __flush_dcache_area((unsigned long)__hyp_text_start, 394 + dcache_clean_inval_poc((unsigned long)__hyp_text_start, 395 395 (unsigned long)__hyp_text_end); 396 396 } 397 397 ··· 477 477 * The hibernate exit text contains a set of el2 vectors, that will 478 478 * be executed at el2 with the mmu off in order to reload hyp-stub. 479 479 */ 480 - __flush_dcache_area((unsigned long)hibernate_exit, 480 + dcache_clean_inval_poc((unsigned long)hibernate_exit, 481 481 (unsigned long)hibernate_exit + exit_size); 482 482 483 483 /*
+1 -1
arch/arm64/kernel/idreg-override.c
··· 237 237 238 238 for (i = 0; i < ARRAY_SIZE(regs); i++) { 239 239 if (regs[i]->override) 240 - __flush_dcache_area((unsigned long)regs[i]->override, 240 + dcache_clean_inval_poc((unsigned long)regs[i]->override, 241 241 (unsigned long)regs[i]->override + 242 242 sizeof(*regs[i]->override)); 243 243 }
+1 -1
arch/arm64/kernel/image-vars.h
··· 35 35 __efistub_strcmp = __pi_strcmp; 36 36 __efistub_strncmp = __pi_strncmp; 37 37 __efistub_strrchr = __pi_strrchr; 38 - __efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc; 38 + __efistub_dcache_clean_poc = __pi_dcache_clean_poc; 39 39 40 40 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) 41 41 __efistub___memcpy = __pi_memcpy;
+1 -1
arch/arm64/kernel/insn.c
··· 198 198 199 199 ret = aarch64_insn_write(tp, insn); 200 200 if (ret == 0) 201 - __flush_icache_range((uintptr_t)tp, 201 + caches_clean_inval_pou((uintptr_t)tp, 202 202 (uintptr_t)tp + AARCH64_INSN_SIZE); 203 203 204 204 return ret;
+3 -3
arch/arm64/kernel/kaslr.c
··· 72 72 * we end up running with module randomization disabled. 73 73 */ 74 74 module_alloc_base = (u64)_etext - MODULES_VSIZE; 75 - __flush_dcache_area((unsigned long)&module_alloc_base, 75 + dcache_clean_inval_poc((unsigned long)&module_alloc_base, 76 76 (unsigned long)&module_alloc_base + 77 77 sizeof(module_alloc_base)); 78 78 ··· 172 172 module_alloc_base += (module_range * (seed & ((1 << 21) - 1))) >> 21; 173 173 module_alloc_base &= PAGE_MASK; 174 174 175 - __flush_dcache_area((unsigned long)&module_alloc_base, 175 + dcache_clean_inval_poc((unsigned long)&module_alloc_base, 176 176 (unsigned long)&module_alloc_base + 177 177 sizeof(module_alloc_base)); 178 - __flush_dcache_area((unsigned long)&memstart_offset_seed, 178 + dcache_clean_inval_poc((unsigned long)&memstart_offset_seed, 179 179 (unsigned long)&memstart_offset_seed + 180 180 sizeof(memstart_offset_seed)); 181 181
+5 -5
arch/arm64/kernel/machine_kexec.c
··· 72 72 * For execution with the MMU off, reloc_code needs to be cleaned to the 73 73 * PoC and invalidated from the I-cache. 74 74 */ 75 - __flush_dcache_area((unsigned long)reloc_code, 75 + dcache_clean_inval_poc((unsigned long)reloc_code, 76 76 (unsigned long)reloc_code + 77 77 arm64_relocate_new_kernel_size); 78 - invalidate_icache_range((uintptr_t)reloc_code, 78 + icache_inval_pou((uintptr_t)reloc_code, 79 79 (uintptr_t)reloc_code + 80 80 arm64_relocate_new_kernel_size); 81 81 ··· 111 111 unsigned long addr; 112 112 113 113 /* flush the list entries. */ 114 - __flush_dcache_area((unsigned long)entry, 114 + dcache_clean_inval_poc((unsigned long)entry, 115 115 (unsigned long)entry + 116 116 sizeof(kimage_entry_t)); 117 117 ··· 128 128 break; 129 129 case IND_SOURCE: 130 130 /* flush the source pages. */ 131 - __flush_dcache_area(addr, addr + PAGE_SIZE); 131 + dcache_clean_inval_poc(addr, addr + PAGE_SIZE); 132 132 break; 133 133 case IND_DESTINATION: 134 134 break; ··· 155 155 kimage->segment[i].memsz, 156 156 kimage->segment[i].memsz / PAGE_SIZE); 157 157 158 - __flush_dcache_area( 158 + dcache_clean_inval_poc( 159 159 (unsigned long)phys_to_virt(kimage->segment[i].mem), 160 160 (unsigned long)phys_to_virt(kimage->segment[i].mem) + 161 161 kimage->segment[i].memsz);
+2 -2
arch/arm64/kernel/smp.c
··· 122 122 secondary_data.task = idle; 123 123 secondary_data.stack = task_stack_page(idle) + THREAD_SIZE; 124 124 update_cpu_boot_status(CPU_MMU_OFF); 125 - __flush_dcache_area((unsigned long)&secondary_data, 125 + dcache_clean_inval_poc((unsigned long)&secondary_data, 126 126 (unsigned long)&secondary_data + 127 127 sizeof(secondary_data)); 128 128 ··· 145 145 pr_crit("CPU%u: failed to come online\n", cpu); 146 146 secondary_data.task = NULL; 147 147 secondary_data.stack = NULL; 148 - __flush_dcache_area((unsigned long)&secondary_data, 148 + dcache_clean_inval_poc((unsigned long)&secondary_data, 149 149 (unsigned long)&secondary_data + 150 150 sizeof(secondary_data)); 151 151 status = READ_ONCE(secondary_data.status);
+2 -2
arch/arm64/kernel/smp_spin_table.c
··· 36 36 unsigned long size = sizeof(secondary_holding_pen_release); 37 37 38 38 secondary_holding_pen_release = val; 39 - __flush_dcache_area((unsigned long)start, (unsigned long)start + size); 39 + dcache_clean_inval_poc((unsigned long)start, (unsigned long)start + size); 40 40 } 41 41 42 42 ··· 90 90 * the boot protocol. 91 91 */ 92 92 writeq_relaxed(pa_holding_pen, release_addr); 93 - __flush_dcache_area((__force unsigned long)release_addr, 93 + dcache_clean_inval_poc((__force unsigned long)release_addr, 94 94 (__force unsigned long)release_addr + 95 95 sizeof(*release_addr)); 96 96
+1 -1
arch/arm64/kernel/sys_compat.c
··· 41 41 dsb(ish); 42 42 } 43 43 44 - ret = __flush_cache_user_range(start, start + chunk); 44 + ret = caches_clean_inval_user_pou(start, start + chunk); 45 45 if (ret) 46 46 return ret; 47 47
+1 -1
arch/arm64/kvm/arm.c
··· 1064 1064 if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) 1065 1065 stage2_unmap_vm(vcpu->kvm); 1066 1066 else 1067 - __flush_icache_all(); 1067 + icache_inval_all_pou(); 1068 1068 } 1069 1069 1070 1070 vcpu_reset_hcr(vcpu);
+2 -2
arch/arm64/kvm/hyp/nvhe/cache.S
··· 7 7 #include <asm/assembler.h> 8 8 #include <asm/alternative.h> 9 9 10 - SYM_FUNC_START_PI(__flush_dcache_area) 10 + SYM_FUNC_START_PI(dcache_clean_inval_poc) 11 11 dcache_by_line_op civac, sy, x0, x1, x2, x3 12 12 ret 13 - SYM_FUNC_END_PI(__flush_dcache_area) 13 + SYM_FUNC_END_PI(dcache_clean_inval_poc)
+1 -1
arch/arm64/kvm/hyp/nvhe/setup.c
··· 134 134 for (i = 0; i < hyp_nr_cpus; i++) { 135 135 params = per_cpu_ptr(&kvm_init_params, i); 136 136 params->pgd_pa = __hyp_pa(pkvm_pgtable.pgd); 137 - __flush_dcache_area((unsigned long)params, 137 + dcache_clean_inval_poc((unsigned long)params, 138 138 (unsigned long)params + sizeof(*params)); 139 139 } 140 140 }
+1 -1
arch/arm64/kvm/hyp/nvhe/tlb.c
··· 104 104 * you should be running with VHE enabled. 105 105 */ 106 106 if (icache_is_vpipt()) 107 - __flush_icache_all(); 107 + icache_inval_all_pou(); 108 108 109 109 __tlb_switch_to_host(&cxt); 110 110 }
+2 -2
arch/arm64/kvm/hyp/pgtable.c
··· 841 841 if (need_flush) { 842 842 kvm_pte_t *pte_follow = kvm_pte_follow(pte, mm_ops); 843 843 844 - __flush_dcache_area((unsigned long)pte_follow, 844 + dcache_clean_inval_poc((unsigned long)pte_follow, 845 845 (unsigned long)pte_follow + 846 846 kvm_granule_size(level)); 847 847 } ··· 997 997 return 0; 998 998 999 999 pte_follow = kvm_pte_follow(pte, mm_ops); 1000 - __flush_dcache_area((unsigned long)pte_follow, 1000 + dcache_clean_inval_poc((unsigned long)pte_follow, 1001 1001 (unsigned long)pte_follow + 1002 1002 kvm_granule_size(level)); 1003 1003 return 0;
+2 -2
arch/arm64/lib/uaccess_flushcache.c
··· 15 15 * barrier to order the cache maintenance against the memcpy. 16 16 */ 17 17 memcpy(dst, src, cnt); 18 - __clean_dcache_area_pop((unsigned long)dst, (unsigned long)dst + cnt); 18 + dcache_clean_pop((unsigned long)dst, (unsigned long)dst + cnt); 19 19 } 20 20 EXPORT_SYMBOL_GPL(memcpy_flushcache); 21 21 ··· 33 33 rc = raw_copy_from_user(to, from, n); 34 34 35 35 /* See above */ 36 - __clean_dcache_area_pop((unsigned long)to, (unsigned long)to + n - rc); 36 + dcache_clean_pop((unsigned long)to, (unsigned long)to + n - rc); 37 37 return rc; 38 38 }
+29 -29
arch/arm64/mm/cache.S
··· 15 15 #include <asm/asm-uaccess.h> 16 16 17 17 /* 18 - * __flush_cache_range(start,end) [fixup] 18 + * caches_clean_inval_pou_macro(start,end) [fixup] 19 19 * 20 20 * Ensure that the I and D caches are coherent within specified region. 21 21 * This is typically used when code has been written to a memory region, ··· 25 25 * - end - virtual end address of region 26 26 * - fixup - optional label to branch to on user fault 27 27 */ 28 - .macro __flush_cache_range, fixup 28 + .macro caches_clean_inval_pou_macro, fixup 29 29 alternative_if ARM64_HAS_CACHE_IDC 30 30 dsb ishst 31 31 b .Ldc_skip_\@ ··· 43 43 .endm 44 44 45 45 /* 46 - * __flush_icache_range(start,end) 46 + * caches_clean_inval_pou(start,end) 47 47 * 48 48 * Ensure that the I and D caches are coherent within specified region. 49 49 * This is typically used when code has been written to a memory region, ··· 52 52 * - start - virtual start address of region 53 53 * - end - virtual end address of region 54 54 */ 55 - SYM_FUNC_START(__flush_icache_range) 56 - __flush_cache_range 55 + SYM_FUNC_START(caches_clean_inval_pou) 56 + caches_clean_inval_pou_macro 57 57 ret 58 - SYM_FUNC_END(__flush_icache_range) 58 + SYM_FUNC_END(caches_clean_inval_pou) 59 59 60 60 /* 61 - * __flush_cache_user_range(start,end) 61 + * caches_clean_inval_user_pou(start,end) 62 62 * 63 63 * Ensure that the I and D caches are coherent within specified region. 64 64 * This is typically used when code has been written to a memory region, ··· 67 67 * - start - virtual start address of region 68 68 * - end - virtual end address of region 69 69 */ 70 - SYM_FUNC_START(__flush_cache_user_range) 70 + SYM_FUNC_START(caches_clean_inval_user_pou) 71 71 uaccess_ttbr0_enable x2, x3, x4 72 72 73 - __flush_cache_range 2f 73 + caches_clean_inval_pou_macro 2f 74 74 mov x0, xzr 75 75 1: 76 76 uaccess_ttbr0_disable x1, x2 ··· 78 78 2: 79 79 mov x0, #-EFAULT 80 80 b 1b 81 - SYM_FUNC_END(__flush_cache_user_range) 81 + SYM_FUNC_END(caches_clean_inval_user_pou) 82 82 83 83 /* 84 - * invalidate_icache_range(start,end) 84 + * icache_inval_pou(start,end) 85 85 * 86 86 * Ensure that the I cache is invalid within specified region. 87 87 * 88 88 * - start - virtual start address of region 89 89 * - end - virtual end address of region 90 90 */ 91 - SYM_FUNC_START(invalidate_icache_range) 91 + SYM_FUNC_START(icache_inval_pou) 92 92 alternative_if ARM64_HAS_CACHE_DIC 93 93 isb 94 94 ret ··· 96 96 97 97 invalidate_icache_by_line x0, x1, x2, x3 98 98 ret 99 - SYM_FUNC_END(invalidate_icache_range) 99 + SYM_FUNC_END(icache_inval_pou) 100 100 101 101 /* 102 - * __flush_dcache_area(start, end) 102 + * dcache_clean_inval_poc(start, end) 103 103 * 104 104 * Ensure that any D-cache lines for the interval [start, end) 105 105 * are cleaned and invalidated to the PoC. ··· 107 107 * - start - virtual start address of region 108 108 * - end - virtual end address of region 109 109 */ 110 - SYM_FUNC_START_PI(__flush_dcache_area) 110 + SYM_FUNC_START_PI(dcache_clean_inval_poc) 111 111 dcache_by_line_op civac, sy, x0, x1, x2, x3 112 112 ret 113 - SYM_FUNC_END_PI(__flush_dcache_area) 113 + SYM_FUNC_END_PI(dcache_clean_inval_poc) 114 114 115 115 /* 116 - * __clean_dcache_area_pou(start, end) 116 + * dcache_clean_pou(start, end) 117 117 * 118 118 * Ensure that any D-cache lines for the interval [start, end) 119 119 * are cleaned to the PoU. ··· 121 121 * - start - virtual start address of region 122 122 * - end - virtual end address of region 123 123 */ 124 - SYM_FUNC_START(__clean_dcache_area_pou) 124 + SYM_FUNC_START(dcache_clean_pou) 125 125 alternative_if ARM64_HAS_CACHE_IDC 126 126 dsb ishst 127 127 ret 128 128 alternative_else_nop_endif 129 129 dcache_by_line_op cvau, ish, x0, x1, x2, x3 130 130 ret 131 - SYM_FUNC_END(__clean_dcache_area_pou) 131 + SYM_FUNC_END(dcache_clean_pou) 132 132 133 133 /* 134 - * __inval_dcache_area(start, end) 134 + * dcache_inval_poc(start, end) 135 135 * 136 136 * Ensure that any D-cache lines for the interval [start, end) 137 137 * are invalidated. Any partial lines at the ends of the interval are ··· 141 141 * - end - kernel end address of region 142 142 */ 143 143 SYM_FUNC_START_LOCAL(__dma_inv_area) 144 - SYM_FUNC_START_PI(__inval_dcache_area) 144 + SYM_FUNC_START_PI(dcache_inval_poc) 145 145 /* FALLTHROUGH */ 146 146 147 147 /* ··· 166 166 b.lo 2b 167 167 dsb sy 168 168 ret 169 - SYM_FUNC_END_PI(__inval_dcache_area) 169 + SYM_FUNC_END_PI(dcache_inval_poc) 170 170 SYM_FUNC_END(__dma_inv_area) 171 171 172 172 /* 173 - * __clean_dcache_area_poc(start, end) 173 + * dcache_clean_poc(start, end) 174 174 * 175 175 * Ensure that any D-cache lines for the interval [start, end) 176 176 * are cleaned to the PoC. ··· 179 179 * - end - virtual end address of region 180 180 */ 181 181 SYM_FUNC_START_LOCAL(__dma_clean_area) 182 - SYM_FUNC_START_PI(__clean_dcache_area_poc) 182 + SYM_FUNC_START_PI(dcache_clean_poc) 183 183 /* FALLTHROUGH */ 184 184 185 185 /* ··· 189 189 */ 190 190 dcache_by_line_op cvac, sy, x0, x1, x2, x3 191 191 ret 192 - SYM_FUNC_END_PI(__clean_dcache_area_poc) 192 + SYM_FUNC_END_PI(dcache_clean_poc) 193 193 SYM_FUNC_END(__dma_clean_area) 194 194 195 195 /* 196 - * __clean_dcache_area_pop(start, end) 196 + * dcache_clean_pop(start, end) 197 197 * 198 198 * Ensure that any D-cache lines for the interval [start, end) 199 199 * are cleaned to the PoP. ··· 201 201 * - start - virtual start address of region 202 202 * - end - virtual end address of region 203 203 */ 204 - SYM_FUNC_START_PI(__clean_dcache_area_pop) 204 + SYM_FUNC_START_PI(dcache_clean_pop) 205 205 alternative_if_not ARM64_HAS_DCPOP 206 - b __clean_dcache_area_poc 206 + b dcache_clean_poc 207 207 alternative_else_nop_endif 208 208 dcache_by_line_op cvap, sy, x0, x1, x2, x3 209 209 ret 210 - SYM_FUNC_END_PI(__clean_dcache_area_pop) 210 + SYM_FUNC_END_PI(dcache_clean_pop) 211 211 212 212 /* 213 213 * __dma_flush_area(start, size)
+6 -6
arch/arm64/mm/flush.c
··· 17 17 void sync_icache_aliases(unsigned long start, unsigned long end) 18 18 { 19 19 if (icache_is_aliasing()) { 20 - __clean_dcache_area_pou(start, end); 21 - __flush_icache_all(); 20 + dcache_clean_pou(start, end); 21 + icache_inval_all_pou(); 22 22 } else { 23 23 /* 24 24 * Don't issue kick_all_cpus_sync() after I-cache invalidation 25 25 * for user mappings. 26 26 */ 27 - __flush_icache_range(start, end); 27 + caches_clean_inval_pou(start, end); 28 28 } 29 29 } 30 30 ··· 76 76 /* 77 77 * Additional functions defined in assembly. 78 78 */ 79 - EXPORT_SYMBOL(__flush_icache_range); 79 + EXPORT_SYMBOL(caches_clean_inval_pou); 80 80 81 81 #ifdef CONFIG_ARCH_HAS_PMEM_API 82 82 void arch_wb_cache_pmem(void *addr, size_t size) 83 83 { 84 84 /* Ensure order against any prior non-cacheable writes */ 85 85 dmb(osh); 86 - __clean_dcache_area_pop((unsigned long)addr, (unsigned long)addr + size); 86 + dcache_clean_pop((unsigned long)addr, (unsigned long)addr + size); 87 87 } 88 88 EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); 89 89 90 90 void arch_invalidate_pmem(void *addr, size_t size) 91 91 { 92 - __inval_dcache_area((unsigned long)addr, (unsigned long)addr + size); 92 + dcache_inval_poc((unsigned long)addr, (unsigned long)addr + size); 93 93 } 94 94 EXPORT_SYMBOL_GPL(arch_invalidate_pmem); 95 95 #endif