Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:
"A few ARM fixes:

- Dietmar Eggemann noticed an issue with IRQ migration during CPU
hotplug stress testing.

- Mathieu Desnoyers noticed that a previous fix broke optimised
kprobes.

- Robin Murphy noticed a case where we were not clearing the dma_ops"

* tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm:
ARM: 8835/1: dma-mapping: Clear DMA ops on teardown
ARM: 8834/1: Fix: kprobes: optimized kprobes illegal instruction
ARM: 8824/1: fix a migrating irq bug when hotplug cpu

+5 -65
+1
arch/arm/Kconfig
··· 1400 1400 config HOTPLUG_CPU 1401 1401 bool "Support for hot-pluggable CPUs" 1402 1402 depends on SMP 1403 + select GENERIC_IRQ_MIGRATION 1403 1404 help 1404 1405 Say Y here to experiment with turning CPUs off and on. CPUs 1405 1406 can be controlled through /sys/devices/system/cpu.
-1
arch/arm/include/asm/irq.h
··· 25 25 #ifndef __ASSEMBLY__ 26 26 struct irqaction; 27 27 struct pt_regs; 28 - extern void migrate_irqs(void); 29 28 30 29 extern void asm_do_IRQ(unsigned int, struct pt_regs *); 31 30 void handle_IRQ(unsigned int, struct pt_regs *);
-62
arch/arm/kernel/irq.c
··· 31 31 #include <linux/smp.h> 32 32 #include <linux/init.h> 33 33 #include <linux/seq_file.h> 34 - #include <linux/ratelimit.h> 35 34 #include <linux/errno.h> 36 35 #include <linux/list.h> 37 36 #include <linux/kallsyms.h> ··· 108 109 return nr_irqs; 109 110 } 110 111 #endif 111 - 112 - #ifdef CONFIG_HOTPLUG_CPU 113 - static bool migrate_one_irq(struct irq_desc *desc) 114 - { 115 - struct irq_data *d = irq_desc_get_irq_data(desc); 116 - const struct cpumask *affinity = irq_data_get_affinity_mask(d); 117 - struct irq_chip *c; 118 - bool ret = false; 119 - 120 - /* 121 - * If this is a per-CPU interrupt, or the affinity does not 122 - * include this CPU, then we have nothing to do. 123 - */ 124 - if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) 125 - return false; 126 - 127 - if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { 128 - affinity = cpu_online_mask; 129 - ret = true; 130 - } 131 - 132 - c = irq_data_get_irq_chip(d); 133 - if (!c->irq_set_affinity) 134 - pr_debug("IRQ%u: unable to set affinity\n", d->irq); 135 - else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) 136 - cpumask_copy(irq_data_get_affinity_mask(d), affinity); 137 - 138 - return ret; 139 - } 140 - 141 - /* 142 - * The current CPU has been marked offline. Migrate IRQs off this CPU. 143 - * If the affinity settings do not allow other CPUs, force them onto any 144 - * available CPU. 145 - * 146 - * Note: we must iterate over all IRQs, whether they have an attached 147 - * action structure or not, as we need to get chained interrupts too. 148 - */ 149 - void migrate_irqs(void) 150 - { 151 - unsigned int i; 152 - struct irq_desc *desc; 153 - unsigned long flags; 154 - 155 - local_irq_save(flags); 156 - 157 - for_each_irq_desc(i, desc) { 158 - bool affinity_broken; 159 - 160 - raw_spin_lock(&desc->lock); 161 - affinity_broken = migrate_one_irq(desc); 162 - raw_spin_unlock(&desc->lock); 163 - 164 - if (affinity_broken) 165 - pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n", 166 - i, smp_processor_id()); 167 - } 168 - 169 - local_irq_restore(flags); 170 - } 171 - #endif /* CONFIG_HOTPLUG_CPU */
+1 -1
arch/arm/kernel/smp.c
··· 254 254 /* 255 255 * OK - migrate IRQs away from this CPU 256 256 */ 257 - migrate_irqs(); 257 + irq_migrate_all_off_this_cpu(); 258 258 259 259 /* 260 260 * Flush user cache and TLB mappings, and then remove this CPU
+2
arch/arm/mm/dma-mapping.c
··· 2390 2390 return; 2391 2391 2392 2392 arm_teardown_iommu_dma_ops(dev); 2393 + /* Let arch_setup_dma_ops() start again from scratch upon re-probe */ 2394 + set_dma_ops(dev, NULL); 2393 2395 }
+1 -1
arch/arm/probes/kprobes/opt-arm.c
··· 247 247 } 248 248 249 249 /* Copy arch-dep-instance from template. */ 250 - memcpy(code, (unsigned char *)optprobe_template_entry, 250 + memcpy(code, (unsigned long *)&optprobe_template_entry, 251 251 TMPL_END_IDX * sizeof(kprobe_opcode_t)); 252 252 253 253 /* Adjust buffer according to instruction. */