Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq fixes from Thomas Gleixner:
"This udpate delivers:

- A fix for dynamic interrupt allocation on x86 which is required to
exclude the GSI interrupts from the dynamic allocatable range.

This was detected with the newfangled tablet SoCs which have GPIOs
and therefor allocate a range of interrupts. The MSI allocations
already excluded the GSI range, so we never noticed before.

- The last missing set_irq_affinity() repair, which was delayed due
to testing issues

- A few bug fixes for the armada SoC interrupt controller

- A memory allocation fix for the TI crossbar interrupt controller

- A trivial kernel-doc warning fix"

* 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
irqchip: irq-crossbar: Not allocating enough memory
irqchip: armanda: Sanitize set_irq_affinity()
genirq: x86: Ensure that dynamic irq allocation does not conflict
linux/interrupt.h: fix new kernel-doc warnings
irqchip: armada-370-xp: Fix releasing of MSIs
irqchip: armada-370-xp: implement the ->check_device() msi_chip operation
irqchip: armada-370-xp: fix invalid cast of signed value into unsigned variable

+42 -37
+5
arch/x86/kernel/apic/io_apic.c
··· 3425 return nr_irqs_gsi; 3426 } 3427 3428 int __init arch_probe_nr_irqs(void) 3429 { 3430 int nr;
··· 3425 return nr_irqs_gsi; 3426 } 3427 3428 + unsigned int arch_dynirq_lower_bound(unsigned int from) 3429 + { 3430 + return from < nr_irqs_gsi ? nr_irqs_gsi : from; 3431 + } 3432 + 3433 int __init arch_probe_nr_irqs(void) 3434 { 3435 int nr;
+20 -34
drivers/irqchip/irq-armada-370-xp.c
··· 41 #define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30) 42 #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34) 43 #define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4) 44 45 #define ARMADA_370_XP_CPU_INTACK_OFFS (0x44) 46 #define ARMADA_375_PPI_CAUSE (0x10) ··· 133 struct msi_desc *desc) 134 { 135 struct msi_msg msg; 136 - irq_hw_number_t hwirq; 137 - int virq; 138 139 hwirq = armada_370_xp_alloc_msi(); 140 if (hwirq < 0) ··· 159 unsigned int irq) 160 { 161 struct irq_data *d = irq_get_irq_data(irq); 162 irq_dispose_mapping(irq); 163 - armada_370_xp_free_msi(d->hwirq); 164 } 165 166 static struct irq_chip armada_370_xp_msi_irq_chip = { ··· 212 213 msi_chip->setup_irq = armada_370_xp_setup_msi_irq; 214 msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq; 215 msi_chip->of_node = node; 216 217 armada_370_xp_msi_domain = ··· 256 static int armada_xp_set_affinity(struct irq_data *d, 257 const struct cpumask *mask_val, bool force) 258 { 259 - unsigned long reg; 260 - unsigned long new_mask = 0; 261 - unsigned long online_mask = 0; 262 - unsigned long count = 0; 263 irq_hw_number_t hwirq = irqd_to_hwirq(d); 264 int cpu; 265 266 - for_each_cpu(cpu, mask_val) { 267 - new_mask |= 1 << cpu_logical_map(cpu); 268 - count++; 269 - } 270 - 271 - /* 272 - * Forbid mutlicore interrupt affinity 273 - * This is required since the MPIC HW doesn't limit 274 - * several CPUs from acknowledging the same interrupt. 275 - */ 276 - if (count > 1) 277 - return -EINVAL; 278 - 279 - for_each_cpu(cpu, cpu_online_mask) 280 - online_mask |= 1 << cpu_logical_map(cpu); 281 282 raw_spin_lock(&irq_controller_lock); 283 - 284 reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); 285 - reg = (reg & (~online_mask)) | new_mask; 286 writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); 287 - 288 raw_spin_unlock(&irq_controller_lock); 289 290 return 0; ··· 489 490 #ifdef CONFIG_SMP 491 armada_xp_mpic_smp_cpu_init(); 492 - 493 - /* 494 - * Set the default affinity from all CPUs to the boot cpu. 495 - * This is required since the MPIC doesn't limit several CPUs 496 - * from acknowledging the same interrupt. 497 - */ 498 - cpumask_clear(irq_default_affinity); 499 - cpumask_set_cpu(smp_processor_id(), irq_default_affinity); 500 - 501 #endif 502 503 armada_370_xp_msi_init(node, main_int_res.start);
··· 41 #define ARMADA_370_XP_INT_SET_ENABLE_OFFS (0x30) 42 #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34) 43 #define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4) 44 + #define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF 45 46 #define ARMADA_370_XP_CPU_INTACK_OFFS (0x44) 47 #define ARMADA_375_PPI_CAUSE (0x10) ··· 132 struct msi_desc *desc) 133 { 134 struct msi_msg msg; 135 + int virq, hwirq; 136 137 hwirq = armada_370_xp_alloc_msi(); 138 if (hwirq < 0) ··· 159 unsigned int irq) 160 { 161 struct irq_data *d = irq_get_irq_data(irq); 162 + unsigned long hwirq = d->hwirq; 163 + 164 irq_dispose_mapping(irq); 165 + armada_370_xp_free_msi(hwirq); 166 + } 167 + 168 + static int armada_370_xp_check_msi_device(struct msi_chip *chip, struct pci_dev *dev, 169 + int nvec, int type) 170 + { 171 + /* We support MSI, but not MSI-X */ 172 + if (type == PCI_CAP_ID_MSI) 173 + return 0; 174 + return -EINVAL; 175 } 176 177 static struct irq_chip armada_370_xp_msi_irq_chip = { ··· 201 202 msi_chip->setup_irq = armada_370_xp_setup_msi_irq; 203 msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq; 204 + msi_chip->check_device = armada_370_xp_check_msi_device; 205 msi_chip->of_node = node; 206 207 armada_370_xp_msi_domain = ··· 244 static int armada_xp_set_affinity(struct irq_data *d, 245 const struct cpumask *mask_val, bool force) 246 { 247 irq_hw_number_t hwirq = irqd_to_hwirq(d); 248 + unsigned long reg, mask; 249 int cpu; 250 251 + /* Select a single core from the affinity mask which is online */ 252 + cpu = cpumask_any_and(mask_val, cpu_online_mask); 253 + mask = 1UL << cpu_logical_map(cpu); 254 255 raw_spin_lock(&irq_controller_lock); 256 reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); 257 + reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask; 258 writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq)); 259 raw_spin_unlock(&irq_controller_lock); 260 261 return 0; ··· 494 495 #ifdef CONFIG_SMP 496 armada_xp_mpic_smp_cpu_init(); 497 #endif 498 499 armada_370_xp_msi_init(node, main_int_res.start);
+1 -1
drivers/irqchip/irq-crossbar.c
··· 107 int i, size, max, reserved = 0, entry; 108 const __be32 *irqsr; 109 110 - cb = kzalloc(sizeof(struct cb_device *), GFP_KERNEL); 111 112 if (!cb) 113 return -ENOMEM;
··· 107 int i, size, max, reserved = 0, entry; 108 const __be32 *irqsr; 109 110 + cb = kzalloc(sizeof(*cb), GFP_KERNEL); 111 112 if (!cb) 113 return -ENOMEM;
+2 -2
include/linux/interrupt.h
··· 210 /** 211 * irq_set_affinity - Set the irq affinity of a given irq 212 * @irq: Interrupt to set affinity 213 - * @mask: cpumask 214 * 215 * Fails if cpumask does not contain an online CPU 216 */ ··· 223 /** 224 * irq_force_affinity - Force the irq affinity of a given irq 225 * @irq: Interrupt to set affinity 226 - * @mask: cpumask 227 * 228 * Same as irq_set_affinity, but without checking the mask against 229 * online cpus.
··· 210 /** 211 * irq_set_affinity - Set the irq affinity of a given irq 212 * @irq: Interrupt to set affinity 213 + * @cpumask: cpumask 214 * 215 * Fails if cpumask does not contain an online CPU 216 */ ··· 223 /** 224 * irq_force_affinity - Force the irq affinity of a given irq 225 * @irq: Interrupt to set affinity 226 + * @cpumask: cpumask 227 * 228 * Same as irq_set_affinity, but without checking the mask against 229 * online cpus.
+2
include/linux/irq.h
··· 603 return d ? irqd_get_trigger_type(d) : 0; 604 } 605 606 int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, 607 struct module *owner); 608
··· 603 return d ? irqd_get_trigger_type(d) : 0; 604 } 605 606 + unsigned int arch_dynirq_lower_bound(unsigned int from); 607 + 608 int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, 609 struct module *owner); 610
+7
kernel/irq/irqdesc.c
··· 363 if (from > irq) 364 return -EINVAL; 365 from = irq; 366 } 367 368 mutex_lock(&sparse_irq_lock);
··· 363 if (from > irq) 364 return -EINVAL; 365 from = irq; 366 + } else { 367 + /* 368 + * For interrupts which are freely allocated the 369 + * architecture can force a lower bound to the @from 370 + * argument. x86 uses this to exclude the GSI space. 371 + */ 372 + from = arch_dynirq_lower_bound(from); 373 } 374 375 mutex_lock(&sparse_irq_lock);
+5
kernel/softirq.c
··· 779 { 780 return 0; 781 }
··· 779 { 780 return 0; 781 } 782 + 783 + unsigned int __weak arch_dynirq_lower_bound(unsigned int from) 784 + { 785 + return from; 786 + }