Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
generic-ipi: use per cpu data for single cpu ipi calls
cpumask: convert lib/smp_processor_id to new cpumask ops
signals, debug: fix BUG: using smp_processor_id() in preemptible code in print_fatal_signal()

+36 -4
+2
kernel/signal.c
··· 909 909 } 910 910 #endif 911 911 printk("\n"); 912 + preempt_disable(); 912 913 show_regs(regs); 914 + preempt_enable(); 913 915 } 914 916 915 917 static int __init setup_print_fatal_signals(char *str)
+33 -3
kernel/smp.c
··· 18 18 enum { 19 19 CSD_FLAG_WAIT = 0x01, 20 20 CSD_FLAG_ALLOC = 0x02, 21 + CSD_FLAG_LOCK = 0x04, 21 22 }; 22 23 23 24 struct call_function_data { ··· 187 186 if (data_flags & CSD_FLAG_WAIT) { 188 187 smp_wmb(); 189 188 data->flags &= ~CSD_FLAG_WAIT; 189 + } else if (data_flags & CSD_FLAG_LOCK) { 190 + smp_wmb(); 191 + data->flags &= ~CSD_FLAG_LOCK; 190 192 } else if (data_flags & CSD_FLAG_ALLOC) 191 193 kfree(data); 192 194 } ··· 199 195 smp_read_barrier_depends(); 200 196 } 201 197 } 198 + 199 + static DEFINE_PER_CPU(struct call_single_data, csd_data); 202 200 203 201 /* 204 202 * smp_call_function_single - Run a function on a specific CPU ··· 230 224 func(info); 231 225 local_irq_restore(flags); 232 226 } else if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { 233 - struct call_single_data *data = NULL; 227 + struct call_single_data *data; 234 228 235 229 if (!wait) { 230 + /* 231 + * We are calling a function on a single CPU 232 + * and we are not going to wait for it to finish. 233 + * We first try to allocate the data, but if we 234 + * fail, we fall back to use a per cpu data to pass 235 + * the information to that CPU. Since all callers 236 + * of this code will use the same data, we must 237 + * synchronize the callers to prevent a new caller 238 + * from corrupting the data before the callee 239 + * can access it. 240 + * 241 + * The CSD_FLAG_LOCK is used to let us know when 242 + * the IPI handler is done with the data. 243 + * The first caller will set it, and the callee 244 + * will clear it. The next caller must wait for 245 + * it to clear before we set it again. This 246 + * will make sure the callee is done with the 247 + * data before a new caller will use it. 248 + */ 236 249 data = kmalloc(sizeof(*data), GFP_ATOMIC); 237 250 if (data) 238 251 data->flags = CSD_FLAG_ALLOC; 239 - } 240 - if (!data) { 252 + else { 253 + data = &per_cpu(csd_data, me); 254 + while (data->flags & CSD_FLAG_LOCK) 255 + cpu_relax(); 256 + data->flags = CSD_FLAG_LOCK; 257 + } 258 + } else { 241 259 data = &d; 242 260 data->flags = CSD_FLAG_WAIT; 243 261 }
+1 -1
lib/smp_processor_id.c
··· 22 22 * Kernel threads bound to a single CPU can safely use 23 23 * smp_processor_id(): 24 24 */ 25 - if (cpus_equal(current->cpus_allowed, cpumask_of_cpu(this_cpu))) 25 + if (cpumask_equal(&current->cpus_allowed, cpumask_of(this_cpu))) 26 26 goto out; 27 27 28 28 /*