Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq fixes from Thomas Gleixner:
"This udpate delivers:

- A fix for dynamic interrupt allocation on x86 which is required to
exclude the GSI interrupts from the dynamic allocatable range.

This was detected with the newfangled tablet SoCs which have GPIOs
and therefor allocate a range of interrupts. The MSI allocations
already excluded the GSI range, so we never noticed before.

- The last missing set_irq_affinity() repair, which was delayed due
to testing issues

- A few bug fixes for the armada SoC interrupt controller

- A memory allocation fix for the TI crossbar interrupt controller

- A trivial kernel-doc warning fix"

* 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
irqchip: irq-crossbar: Not allocating enough memory
irqchip: armanda: Sanitize set_irq_affinity()
genirq: x86: Ensure that dynamic irq allocation does not conflict
linux/interrupt.h: fix new kernel-doc warnings
irqchip: armada-370-xp: Fix releasing of MSIs
irqchip: armada-370-xp: implement the ->check_device() msi_chip operation
irqchip: armada-370-xp: fix invalid cast of signed value into unsigned variable

Changed files
+42 -37
arch
x86
kernel
apic
drivers
include
kernel
+5
arch/x86/kernel/apic/io_apic.c
··· 3425 3425 return nr_irqs_gsi; 3426 3426 } 3427 3427 3428 + unsigned int arch_dynirq_lower_bound(unsigned int from) 3429 + { 3430 + return from < nr_irqs_gsi ? nr_irqs_gsi : from; 3431 + } 3432 + 3428 3433 int __init arch_probe_nr_irqs(void) 3429 3434 { 3430 3435 int nr;
+20 -34
drivers/irqchip/irq-armada-370-xp.c
··· 41 41 #define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30) 42 42 #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34) 43 43 #define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4) 44 + #define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF 44 45 45 46 #define ARMADA_370_XP_CPU_INTACK_OFFS (0x44) 46 47 #define ARMADA_375_PPI_CAUSE (0x10) ··· 133 132 struct msi_desc *desc) 134 133 { 135 134 struct msi_msg msg; 136 - irq_hw_number_t hwirq; 137 - int virq; 135 + int virq, hwirq; 138 136 139 137 hwirq = armada_370_xp_alloc_msi(); 140 138 if (hwirq < 0) ··· 159 159 unsigned int irq) 160 160 { 161 161 struct irq_data *d = irq_get_irq_data(irq); 162 + unsigned long hwirq = d->hwirq; 163 + 162 164 irq_dispose_mapping(irq); 163 - armada_370_xp_free_msi(d->hwirq); 165 + armada_370_xp_free_msi(hwirq); 166 + } 167 + 168 + static int armada_370_xp_check_msi_device(struct msi_chip *chip, struct pci_dev *dev, 169 + int nvec, int type) 170 + { 171 + /* We support MSI, but not MSI-X */ 172 + if (type == PCI_CAP_ID_MSI) 173 + return 0; 174 + return -EINVAL; 164 175 } 165 176 166 177 static struct irq_chip armada_370_xp_msi_irq_chip = { ··· 212 201 213 202 msi_chip->setup_irq = armada_370_xp_setup_msi_irq; 214 203 msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq; 204 + msi_chip->check_device = armada_370_xp_check_msi_device; 215 205 msi_chip->of_node = node; 216 206 217 207 armada_370_xp_msi_domain = ··· 256 244 static int armada_xp_set_affinity(struct irq_data *d, 257 245 const struct cpumask *mask_val, bool force) 258 246 { 259 - unsigned long reg; 260 - unsigned long new_mask = 0; 261 - unsigned long online_mask = 0; 262 - unsigned long count = 0; 263 247 irq_hw_number_t hwirq = irqd_to_hwirq(d); 248 + unsigned long reg, mask; 264 249 int cpu; 265 250 266 - for_each_cpu(cpu, mask_val) { 267 - new_mask |= 1 << cpu_logical_map(cpu); 268 - count++; 269 - } 270 - 271 - /* 272 - * Forbid mutlicore interrupt affinity 273 - * This is required since the MPIC HW doesn't limit 274 - * several CPUs from acknowledging the same interrupt. 275 - */ 276 - if (count > 1) 277 - return -EINVAL; 278 - 279 - for_each_cpu(cpu, cpu_online_mask) 280 - online_mask |= 1 << cpu_logical_map(cpu); 251 + /* Select a single core from the affinity mask which is online */ 252 + cpu = cpumask_any_and(mask_val, cpu_online_mask); 253 + mask = 1UL << cpu_logical_map(cpu); 281 254 282 255 raw_spin_lock(&irq_controller_lock); 283 - 284 256 reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); 285 - reg = (reg & (~online_mask)) | new_mask; 257 + reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask; 286 258 writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); 287 - 288 259 raw_spin_unlock(&irq_controller_lock); 289 260 290 261 return 0; ··· 489 494 490 495 #ifdef CONFIG_SMP 491 496 armada_xp_mpic_smp_cpu_init(); 492 - 493 - /* 494 - * Set the default affinity from all CPUs to the boot cpu. 495 - * This is required since the MPIC doesn't limit several CPUs 496 - * from acknowledging the same interrupt. 497 - */ 498 - cpumask_clear(irq_default_affinity); 499 - cpumask_set_cpu(smp_processor_id(), irq_default_affinity); 500 - 501 497 #endif 502 498 503 499 armada_370_xp_msi_init(node, main_int_res.start);
+1 -1
drivers/irqchip/irq-crossbar.c
··· 107 107 int i, size, max, reserved = 0, entry; 108 108 const __be32 *irqsr; 109 109 110 - cb = kzalloc(sizeof(struct cb_device *), GFP_KERNEL); 110 + cb = kzalloc(sizeof(*cb), GFP_KERNEL); 111 111 112 112 if (!cb) 113 113 return -ENOMEM;
+2 -2
include/linux/interrupt.h
··· 210 210 /** 211 211 * irq_set_affinity - Set the irq affinity of a given irq 212 212 * @irq: Interrupt to set affinity 213 - * @mask: cpumask 213 + * @cpumask: cpumask 214 214 * 215 215 * Fails if cpumask does not contain an online CPU 216 216 */ ··· 223 223 /** 224 224 * irq_force_affinity - Force the irq affinity of a given irq 225 225 * @irq: Interrupt to set affinity 226 - * @mask: cpumask 226 + * @cpumask: cpumask 227 227 * 228 228 * Same as irq_set_affinity, but without checking the mask against 229 229 * online cpus.
+2
include/linux/irq.h
··· 603 603 return d ? irqd_get_trigger_type(d) : 0; 604 604 } 605 605 606 + unsigned int arch_dynirq_lower_bound(unsigned int from); 607 + 606 608 int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, 607 609 struct module *owner); 608 610
+7
kernel/irq/irqdesc.c
··· 363 363 if (from > irq) 364 364 return -EINVAL; 365 365 from = irq; 366 + } else { 367 + /* 368 + * For interrupts which are freely allocated the 369 + * architecture can force a lower bound to the @from 370 + * argument. x86 uses this to exclude the GSI space. 371 + */ 372 + from = arch_dynirq_lower_bound(from); 366 373 } 367 374 368 375 mutex_lock(&sparse_irq_lock);
+5
kernel/softirq.c
··· 779 779 { 780 780 return 0; 781 781 } 782 + 783 + unsigned int __weak arch_dynirq_lower_bound(unsigned int from) 784 + { 785 + return from; 786 + }