Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

blackfin: smp: adapt to generic smp helpers

Replace blackfin ipi message queue with generic smp helper function.

Signed-off-by: Steven Miao <realmz6@gmail.com>
Signed-off-by: Bob Liu <lliubbo@gmail.com>

authored by

Steven Miao and committed by
Bob Liu
50888469 0d7614f0

+66 -154
+1
arch/blackfin/Kconfig
··· 38 38 select GENERIC_ATOMIC64 39 39 select GENERIC_IRQ_PROBE 40 40 select IRQ_PER_CPU if SMP 41 + select USE_GENERIC_SMP_HELPERS if SMP 41 42 select HAVE_NMI_WATCHDOG if NMI_WATCHDOG 42 43 select GENERIC_SMP_IDLE_THREAD 43 44 select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS
+2
arch/blackfin/include/asm/smp.h
··· 18 18 #define raw_smp_processor_id() blackfin_core_id() 19 19 20 20 extern void bfin_relocate_coreb_l1_mem(void); 21 + extern void arch_send_call_function_single_ipi(int cpu); 22 + extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 21 23 22 24 #if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1) 23 25 asmlinkage void blackfin_icache_flush_range_l1(unsigned long *ptr);
+63 -154
arch/blackfin/mach-common/smp.c
··· 48 48 49 49 struct blackfin_initial_pda __cpuinitdata initial_pda_coreb; 50 50 51 - #define BFIN_IPI_TIMER 0 52 - #define BFIN_IPI_RESCHEDULE 1 53 - #define BFIN_IPI_CALL_FUNC 2 54 - #define BFIN_IPI_CPU_STOP 3 51 + enum ipi_message_type { 52 + BFIN_IPI_TIMER, 53 + BFIN_IPI_RESCHEDULE, 54 + BFIN_IPI_CALL_FUNC, 55 + BFIN_IPI_CALL_FUNC_SINGLE, 56 + BFIN_IPI_CPU_STOP, 57 + }; 55 58 56 59 struct blackfin_flush_data { 57 60 unsigned long start; ··· 63 60 64 61 void *secondary_stack; 65 62 66 - 67 - struct smp_call_struct { 68 - void (*func)(void *info); 69 - void *info; 70 - int wait; 71 - cpumask_t *waitmask; 72 - }; 73 - 74 63 static struct blackfin_flush_data smp_flush_data; 75 64 76 65 static DEFINE_SPINLOCK(stop_lock); 77 - 78 - struct ipi_message { 79 - unsigned long type; 80 - struct smp_call_struct call_struct; 81 - }; 82 66 83 67 /* A magic number - stress test shows this is safe for common cases */ 84 68 #define BFIN_IPI_MSGQ_LEN 5 85 69 86 70 /* Simple FIFO buffer, overflow leads to panic */ 87 - struct ipi_message_queue { 88 - spinlock_t lock; 71 + struct ipi_data { 89 72 unsigned long count; 90 - unsigned long head; /* head of the queue */ 91 - struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN]; 73 + unsigned long bits; 92 74 }; 93 75 94 - static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue); 76 + static DEFINE_PER_CPU(struct ipi_data, bfin_ipi); 95 77 96 78 static void ipi_cpu_stop(unsigned int cpu) 97 79 { ··· 117 129 blackfin_icache_flush_range(fdata->start, fdata->end); 118 130 } 119 131 120 - static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) 121 - { 122 - int wait; 123 - void (*func)(void *info); 124 - void *info; 125 - func = msg->call_struct.func; 126 - info = msg->call_struct.info; 127 - wait = msg->call_struct.wait; 128 - func(info); 129 - if (wait) { 130 - #ifdef __ARCH_SYNC_CORE_DCACHE 131 - /* 132 - * 'wait' usually means synchronization between CPUs. 133 - * Invalidate D cache in case shared data was changed 134 - * by func() to ensure cache coherence. 135 - */ 136 - resync_core_dcache(); 137 - #endif 138 - cpumask_clear_cpu(cpu, msg->call_struct.waitmask); 139 - } 140 - } 141 - 142 132 /* Use IRQ_SUPPLE_0 to request reschedule. 143 133 * When returning from interrupt to user space, 144 134 * there is chance to reschedule */ ··· 138 172 139 173 static irqreturn_t ipi_handler_int1(int irq, void *dev_instance) 140 174 { 141 - struct ipi_message *msg; 142 - struct ipi_message_queue *msg_queue; 175 + struct ipi_data *bfin_ipi_data; 143 176 unsigned int cpu = smp_processor_id(); 144 - unsigned long flags; 177 + unsigned long pending; 178 + unsigned long msg; 145 179 146 180 platform_clear_ipi(cpu, IRQ_SUPPLE_1); 147 181 148 - msg_queue = &__get_cpu_var(ipi_msg_queue); 182 + bfin_ipi_data = &__get_cpu_var(bfin_ipi); 149 183 150 - spin_lock_irqsave(&msg_queue->lock, flags); 184 + while ((pending = xchg(&bfin_ipi_data->bits, 0)) != 0) { 185 + msg = 0; 186 + do { 187 + msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1); 188 + switch (msg) { 189 + case BFIN_IPI_TIMER: 190 + ipi_timer(); 191 + break; 192 + case BFIN_IPI_RESCHEDULE: 193 + scheduler_ipi(); 194 + break; 195 + case BFIN_IPI_CALL_FUNC: 196 + generic_smp_call_function_interrupt(); 197 + break; 151 198 152 - while (msg_queue->count) { 153 - msg = &msg_queue->ipi_message[msg_queue->head]; 154 - switch (msg->type) { 155 - case BFIN_IPI_TIMER: 156 - ipi_timer(); 157 - break; 158 - case BFIN_IPI_RESCHEDULE: 159 - scheduler_ipi(); 160 - break; 161 - case BFIN_IPI_CALL_FUNC: 162 - ipi_call_function(cpu, msg); 163 - break; 164 - case BFIN_IPI_CPU_STOP: 165 - ipi_cpu_stop(cpu); 166 - break; 167 - default: 168 - printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n", 169 - cpu, msg->type); 170 - break; 171 - } 172 - msg_queue->head++; 173 - msg_queue->head %= BFIN_IPI_MSGQ_LEN; 174 - msg_queue->count--; 199 + case BFIN_IPI_CALL_FUNC_SINGLE: 200 + generic_smp_call_function_single_interrupt(); 201 + break; 202 + 203 + case BFIN_IPI_CPU_STOP: 204 + ipi_cpu_stop(cpu); 205 + break; 206 + } 207 + } while (msg < BITS_PER_LONG); 208 + 209 + smp_mb(); 175 210 } 176 - spin_unlock_irqrestore(&msg_queue->lock, flags); 177 211 return IRQ_HANDLED; 178 212 } 179 213 180 - static void ipi_queue_init(void) 214 + static void bfin_ipi_init(void) 181 215 { 182 216 unsigned int cpu; 183 - struct ipi_message_queue *msg_queue; 217 + struct ipi_data *bfin_ipi_data; 184 218 for_each_possible_cpu(cpu) { 185 - msg_queue = &per_cpu(ipi_msg_queue, cpu); 186 - spin_lock_init(&msg_queue->lock); 187 - msg_queue->count = 0; 188 - msg_queue->head = 0; 219 + bfin_ipi_data = &per_cpu(bfin_ipi, cpu); 220 + bfin_ipi_data->bits = 0; 221 + bfin_ipi_data->count = 0; 189 222 } 190 223 } 191 224 192 - static inline void smp_send_message(cpumask_t callmap, unsigned long type, 193 - void (*func) (void *info), void *info, int wait) 225 + void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg) 194 226 { 195 227 unsigned int cpu; 196 - struct ipi_message_queue *msg_queue; 197 - struct ipi_message *msg; 198 - unsigned long flags, next_msg; 199 - cpumask_t waitmask; /* waitmask is shared by all cpus */ 228 + struct ipi_data *bfin_ipi_data; 229 + unsigned long flags; 200 230 201 - cpumask_copy(&waitmask, &callmap); 202 - for_each_cpu(cpu, &callmap) { 203 - msg_queue = &per_cpu(ipi_msg_queue, cpu); 204 - spin_lock_irqsave(&msg_queue->lock, flags); 205 - if (msg_queue->count < BFIN_IPI_MSGQ_LEN) { 206 - next_msg = (msg_queue->head + msg_queue->count) 207 - % BFIN_IPI_MSGQ_LEN; 208 - msg = &msg_queue->ipi_message[next_msg]; 209 - msg->type = type; 210 - if (type == BFIN_IPI_CALL_FUNC) { 211 - msg->call_struct.func = func; 212 - msg->call_struct.info = info; 213 - msg->call_struct.wait = wait; 214 - msg->call_struct.waitmask = &waitmask; 215 - } 216 - msg_queue->count++; 217 - } else 218 - panic("IPI message queue overflow\n"); 219 - spin_unlock_irqrestore(&msg_queue->lock, flags); 231 + local_irq_save(flags); 232 + 233 + for_each_cpu(cpu, cpumask) { 234 + bfin_ipi_data = &per_cpu(bfin_ipi, cpu); 235 + smp_mb(); 236 + set_bit(msg, &bfin_ipi_data->bits); 237 + bfin_ipi_data->count++; 220 238 platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1); 221 239 } 222 240 223 - if (wait) { 224 - while (!cpumask_empty(&waitmask)) 225 - blackfin_dcache_invalidate_range( 226 - (unsigned long)(&waitmask), 227 - (unsigned long)(&waitmask)); 228 - #ifdef __ARCH_SYNC_CORE_DCACHE 229 - /* 230 - * Invalidate D cache in case shared data was changed by 231 - * other processors to ensure cache coherence. 232 - */ 233 - resync_core_dcache(); 234 - #endif 235 - } 241 + local_irq_restore(flags); 236 242 } 237 243 238 - int smp_call_function(void (*func)(void *info), void *info, int wait) 244 + void arch_send_call_function_single_ipi(int cpu) 239 245 { 240 - cpumask_t callmap; 241 - 242 - preempt_disable(); 243 - cpumask_copy(&callmap, cpu_online_mask); 244 - cpumask_clear_cpu(smp_processor_id(), &callmap); 245 - if (!cpumask_empty(&callmap)) 246 - smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait); 247 - 248 - preempt_enable(); 249 - 250 - return 0; 246 + send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC_SINGLE); 251 247 } 252 - EXPORT_SYMBOL_GPL(smp_call_function); 253 248 254 - int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, 255 - int wait) 249 + void arch_send_call_function_ipi_mask(const struct cpumask *mask) 256 250 { 257 - unsigned int cpu = cpuid; 258 - cpumask_t callmap; 259 - 260 - if (cpu_is_offline(cpu)) 261 - return 0; 262 - cpumask_clear(&callmap); 263 - cpumask_set_cpu(cpu, &callmap); 264 - 265 - smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait); 266 - 267 - return 0; 251 + send_ipi(mask, BFIN_IPI_CALL_FUNC); 268 252 } 269 - EXPORT_SYMBOL_GPL(smp_call_function_single); 270 253 271 254 void smp_send_reschedule(int cpu) 272 255 { 273 - cpumask_t callmap; 274 - /* simply trigger an ipi */ 275 - 276 - cpumask_clear(&callmap); 277 - cpumask_set_cpu(cpu, &callmap); 278 - 279 - smp_send_message(callmap, BFIN_IPI_RESCHEDULE, NULL, NULL, 0); 256 + send_ipi(cpumask_of(cpu), BFIN_IPI_RESCHEDULE); 280 257 281 258 return; 282 259 } 283 260 284 261 void smp_send_msg(const struct cpumask *mask, unsigned long type) 285 262 { 286 - smp_send_message(*mask, type, NULL, NULL, 0); 263 + send_ipi(mask, type); 287 264 } 288 265 289 266 void smp_timer_broadcast(const struct cpumask *mask) ··· 242 333 cpumask_copy(&callmap, cpu_online_mask); 243 334 cpumask_clear_cpu(smp_processor_id(), &callmap); 244 335 if (!cpumask_empty(&callmap)) 245 - smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0); 336 + send_ipi(&callmap, BFIN_IPI_CPU_STOP); 246 337 247 338 preempt_enable(); 248 339 ··· 345 436 void __init smp_prepare_cpus(unsigned int max_cpus) 346 437 { 347 438 platform_prepare_cpus(max_cpus); 348 - ipi_queue_init(); 439 + bfin_ipi_init(); 349 440 platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0); 350 441 platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1); 351 442 }