Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

genirq: Allow forcing cpu affinity of interrupts

The current implementation of irq_set_affinity() refuses rightfully to
route an interrupt to an offline cpu.

But there is a special case, where this is actually desired. Some of
the ARM SoCs have per cpu timers which require setting the affinity
during cpu startup where the cpu is not yet in the online mask.

If we can't do that, then the local timer interrupt for the about to
become online cpu is routed to some random online cpu.

The developers of the affected machines tried to work around that
issue, but that results in a massive mess in that timer code.

We have a yet unused argument in the set_affinity callbacks of the irq
chips, which I added back then for a similar reason. It was never
required so it got not used. But I'm happy that I never removed it.

That allows us to implement a sane handling of the above scenario. So
the affected SoC drivers can add the required force handling to their
interrupt chip, switch the timer code to irq_force_affinity() and
things just work.

This does not affect any existing user of irq_set_affinity().

Tagged for stable to allow a simple fix of the affected SoC clock
event drivers.

Reported-and-tested-by: Krzysztof Kozlowski <k.kozlowski@samsung.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Kyungmin Park <kyungmin.park@samsung.com>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Cc: Tomasz Figa <t.figa@samsung.com>,
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>,
Cc: Kukjin Kim <kgene.kim@samsung.com>
Cc: linux-arm-kernel@lists.infradead.org,
Cc: stable@vger.kernel.org
Link: http://lkml.kernel.org/r/20140416143315.717251504@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

+43 -14
+1 -1
arch/mips/cavium-octeon/octeon-irq.c
··· 635 635 cpumask_clear(&new_affinity); 636 636 cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity); 637 637 } 638 - __irq_set_affinity_locked(data, &new_affinity); 638 + irq_set_affinity_locked(data, &new_affinity, false); 639 639 } 640 640 641 641 static int octeon_irq_ciu_set_affinity(struct irq_data *data,
+34 -1
include/linux/interrupt.h
··· 203 203 204 204 extern cpumask_var_t irq_default_affinity; 205 205 206 - extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask); 206 + /* Internal implementation. Use the helpers below */ 207 + extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask, 208 + bool force); 209 + 210 + /** 211 + * irq_set_affinity - Set the irq affinity of a given irq 212 + * @irq: Interrupt to set affinity 213 + * @mask: cpumask 214 + * 215 + * Fails if cpumask does not contain an online CPU 216 + */ 217 + static inline int 218 + irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) 219 + { 220 + return __irq_set_affinity(irq, cpumask, false); 221 + } 222 + 223 + /** 224 + * irq_force_affinity - Force the irq affinity of a given irq 225 + * @irq: Interrupt to set affinity 226 + * @mask: cpumask 227 + * 228 + * Same as irq_set_affinity, but without checking the mask against 229 + * online cpus. 230 + * 231 + * Solely for low level cpu hotplug code, where we need to make per 232 + * cpu interrupts affine before the cpu becomes online. 233 + */ 234 + static inline int 235 + irq_force_affinity(unsigned int irq, const struct cpumask *cpumask) 236 + { 237 + return __irq_set_affinity(irq, cpumask, true); 238 + } 239 + 207 240 extern int irq_can_set_affinity(unsigned int irq); 208 241 extern int irq_select_affinity(unsigned int irq); 209 242
+2 -1
include/linux/irq.h
··· 394 394 395 395 extern void irq_cpu_online(void); 396 396 extern void irq_cpu_offline(void); 397 - extern int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *cpumask); 397 + extern int irq_set_affinity_locked(struct irq_data *data, 398 + const struct cpumask *cpumask, bool force); 398 399 399 400 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) 400 401 void irq_move_irq(struct irq_data *data);
+6 -11
kernel/irq/manage.c
··· 180 180 struct irq_chip *chip = irq_data_get_irq_chip(data); 181 181 int ret; 182 182 183 - ret = chip->irq_set_affinity(data, mask, false); 183 + ret = chip->irq_set_affinity(data, mask, force); 184 184 switch (ret) { 185 185 case IRQ_SET_MASK_OK: 186 186 cpumask_copy(data->affinity, mask); ··· 192 192 return ret; 193 193 } 194 194 195 - int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) 195 + int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, 196 + bool force) 196 197 { 197 198 struct irq_chip *chip = irq_data_get_irq_chip(data); 198 199 struct irq_desc *desc = irq_data_to_desc(data); ··· 203 202 return -EINVAL; 204 203 205 204 if (irq_can_move_pcntxt(data)) { 206 - ret = irq_do_set_affinity(data, mask, false); 205 + ret = irq_do_set_affinity(data, mask, force); 207 206 } else { 208 207 irqd_set_move_pending(data); 209 208 irq_copy_pending(desc, mask); ··· 218 217 return ret; 219 218 } 220 219 221 - /** 222 - * irq_set_affinity - Set the irq affinity of a given irq 223 - * @irq: Interrupt to set affinity 224 - * @mask: cpumask 225 - * 226 - */ 227 - int irq_set_affinity(unsigned int irq, const struct cpumask *mask) 220 + int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) 228 221 { 229 222 struct irq_desc *desc = irq_to_desc(irq); 230 223 unsigned long flags; ··· 228 233 return -EINVAL; 229 234 230 235 raw_spin_lock_irqsave(&desc->lock, flags); 231 - ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask); 236 + ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); 232 237 raw_spin_unlock_irqrestore(&desc->lock, flags); 233 238 return ret; 234 239 }