Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

csky/ftrace: Fixup ftrace_modify_code deadlock without CPU_HAS_ICACHE_INS

If ICACHE_INS is not supported, we use IPI to sync icache on each
core. But ftrace_modify_code is called from stop_machine from default
implementation of arch_ftrace_update_code and stop_machine callback
is irq_disabled. When you call ipi with irq_disabled, a deadlock will
happen.

We couldn't use icache_flush with irq_disabled, but startup make_nop
is specific case and it needn't ipi other cores.

Signed-off-by: Guo Ren <guoren@linux.alibaba.com>

Guo Ren dd7c983e 89a3927a

+70 -6
+31
arch/csky/kernel/ftrace.c
··· 3 3 4 4 #include <linux/ftrace.h> 5 5 #include <linux/uaccess.h> 6 + #include <linux/stop_machine.h> 6 7 #include <asm/cacheflush.h> 7 8 8 9 #ifdef CONFIG_DYNAMIC_FTRACE ··· 201 200 } 202 201 #endif /* CONFIG_DYNAMIC_FTRACE */ 203 202 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 203 + 204 + #ifndef CONFIG_CPU_HAS_ICACHE_INS 205 + struct ftrace_modify_param { 206 + int command; 207 + atomic_t cpu_count; 208 + }; 209 + 210 + static int __ftrace_modify_code(void *data) 211 + { 212 + struct ftrace_modify_param *param = data; 213 + 214 + if (atomic_inc_return(&param->cpu_count) == 1) { 215 + ftrace_modify_all_code(param->command); 216 + atomic_inc(&param->cpu_count); 217 + } else { 218 + while (atomic_read(&param->cpu_count) <= num_online_cpus()) 219 + cpu_relax(); 220 + local_icache_inv_all(NULL); 221 + } 222 + 223 + return 0; 224 + } 225 + 226 + void arch_ftrace_update_code(int command) 227 + { 228 + struct ftrace_modify_param param = { command, ATOMIC_INIT(0) }; 229 + 230 + stop_machine(__ftrace_modify_code, &param, cpu_online_mask); 231 + } 232 + #endif 204 233 205 234 /* _mcount is defined in abi's mcount.S */ 206 235 EXPORT_SYMBOL(_mcount);
+39 -6
arch/csky/mm/cachev2.c
··· 7 7 #include <asm/cache.h> 8 8 #include <asm/barrier.h> 9 9 10 + /* for L1-cache */ 10 11 #define INS_CACHE (1 << 0) 12 + #define DATA_CACHE (1 << 1) 11 13 #define CACHE_INV (1 << 4) 14 + #define CACHE_CLR (1 << 5) 15 + #define CACHE_OMS (1 << 6) 12 16 13 17 void local_icache_inv_all(void *priv) 14 18 { 15 19 mtcr("cr17", INS_CACHE|CACHE_INV); 16 20 sync_is(); 17 - } 18 - 19 - void icache_inv_all(void) 20 - { 21 - on_each_cpu(local_icache_inv_all, NULL, 1); 22 21 } 23 22 24 23 #ifdef CONFIG_CPU_HAS_ICACHE_INS ··· 30 31 sync_is(); 31 32 } 32 33 #else 34 + struct cache_range { 35 + unsigned long start; 36 + unsigned long end; 37 + }; 38 + 39 + static DEFINE_SPINLOCK(cache_lock); 40 + 41 + static inline void cache_op_line(unsigned long i, unsigned int val) 42 + { 43 + mtcr("cr22", i); 44 + mtcr("cr17", val); 45 + } 46 + 47 + void local_icache_inv_range(void *priv) 48 + { 49 + struct cache_range *param = priv; 50 + unsigned long i = param->start & ~(L1_CACHE_BYTES - 1); 51 + unsigned long flags; 52 + 53 + spin_lock_irqsave(&cache_lock, flags); 54 + 55 + for (; i < param->end; i += L1_CACHE_BYTES) 56 + cache_op_line(i, INS_CACHE | CACHE_INV | CACHE_OMS); 57 + 58 + spin_unlock_irqrestore(&cache_lock, flags); 59 + 60 + sync_is(); 61 + } 62 + 33 63 void icache_inv_range(unsigned long start, unsigned long end) 34 64 { 35 - icache_inv_all(); 65 + struct cache_range param = { start, end }; 66 + 67 + if (irqs_disabled()) 68 + local_icache_inv_range(&param); 69 + else 70 + on_each_cpu(local_icache_inv_range, &param, 1); 36 71 } 37 72 #endif 38 73