Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

irqdomain: Avoid activating interrupts more than once

Since commit f3b0946d629c ("genirq/msi: Make sure PCI MSIs are
activated early"), we can end-up activating a PCI/MSI twice (once
at allocation time, and once at startup time).

This is normally of no consequences, except that there is some
HW out there that may misbehave if activate is used more than once
(the GICv3 ITS, for example, uses the activate callback
to issue the MAPVI command, and the architecture spec says that
"If there is an existing mapping for the EventID-DeviceID
combination, behavior is UNPREDICTABLE").

While this could be worked around in each individual driver, it may
make more sense to tackle the issue at the core level. In order to
avoid getting in that situation, let's have a per-interrupt flag
to remember if we have already activated that interrupt or not.

Fixes: f3b0946d629c ("genirq/msi: Make sure PCI MSIs are activated early")
Reported-and-tested-by: Andre Przywara <andre.przywara@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Cc: stable@vger.kernel.org
Link: http://lkml.kernel.org/r/1484668848-24361-1-git-send-email-marc.zyngier@arm.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

authored by

Marc Zyngier and committed by
Thomas Gleixner
08d85f3e 566cf877

+47 -14
+17
include/linux/irq.h
··· 184 * 185 * IRQD_TRIGGER_MASK - Mask for the trigger type bits 186 * IRQD_SETAFFINITY_PENDING - Affinity setting is pending 187 * IRQD_NO_BALANCING - Balancing disabled for this IRQ 188 * IRQD_PER_CPU - Interrupt is per cpu 189 * IRQD_AFFINITY_SET - Interrupt affinity was set ··· 203 enum { 204 IRQD_TRIGGER_MASK = 0xf, 205 IRQD_SETAFFINITY_PENDING = (1 << 8), 206 IRQD_NO_BALANCING = (1 << 10), 207 IRQD_PER_CPU = (1 << 11), 208 IRQD_AFFINITY_SET = (1 << 12), ··· 312 static inline bool irqd_affinity_is_managed(struct irq_data *d) 313 { 314 return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED; 315 } 316 317 #undef __irqd_to_state
··· 184 * 185 * IRQD_TRIGGER_MASK - Mask for the trigger type bits 186 * IRQD_SETAFFINITY_PENDING - Affinity setting is pending 187 + * IRQD_ACTIVATED - Interrupt has already been activated 188 * IRQD_NO_BALANCING - Balancing disabled for this IRQ 189 * IRQD_PER_CPU - Interrupt is per cpu 190 * IRQD_AFFINITY_SET - Interrupt affinity was set ··· 202 enum { 203 IRQD_TRIGGER_MASK = 0xf, 204 IRQD_SETAFFINITY_PENDING = (1 << 8), 205 + IRQD_ACTIVATED = (1 << 9), 206 IRQD_NO_BALANCING = (1 << 10), 207 IRQD_PER_CPU = (1 << 11), 208 IRQD_AFFINITY_SET = (1 << 12), ··· 310 static inline bool irqd_affinity_is_managed(struct irq_data *d) 311 { 312 return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED; 313 + } 314 + 315 + static inline bool irqd_is_activated(struct irq_data *d) 316 + { 317 + return __irqd_to_state(d) & IRQD_ACTIVATED; 318 + } 319 + 320 + static inline void irqd_set_activated(struct irq_data *d) 321 + { 322 + __irqd_to_state(d) |= IRQD_ACTIVATED; 323 + } 324 + 325 + static inline void irqd_clr_activated(struct irq_data *d) 326 + { 327 + __irqd_to_state(d) &= ~IRQD_ACTIVATED; 328 } 329 330 #undef __irqd_to_state
+30 -14
kernel/irq/irqdomain.c
··· 1346 } 1347 EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent); 1348 1349 /** 1350 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate 1351 * interrupt ··· 1380 */ 1381 void irq_domain_activate_irq(struct irq_data *irq_data) 1382 { 1383 - if (irq_data && irq_data->domain) { 1384 - struct irq_domain *domain = irq_data->domain; 1385 - 1386 - if (irq_data->parent_data) 1387 - irq_domain_activate_irq(irq_data->parent_data); 1388 - if (domain->ops->activate) 1389 - domain->ops->activate(domain, irq_data); 1390 } 1391 } 1392 ··· 1396 */ 1397 void irq_domain_deactivate_irq(struct irq_data *irq_data) 1398 { 1399 - if (irq_data && irq_data->domain) { 1400 - struct irq_domain *domain = irq_data->domain; 1401 - 1402 - if (domain->ops->deactivate) 1403 - domain->ops->deactivate(domain, irq_data); 1404 - if (irq_data->parent_data) 1405 - irq_domain_deactivate_irq(irq_data->parent_data); 1406 } 1407 } 1408
··· 1346 } 1347 EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent); 1348 1349 + static void __irq_domain_activate_irq(struct irq_data *irq_data) 1350 + { 1351 + if (irq_data && irq_data->domain) { 1352 + struct irq_domain *domain = irq_data->domain; 1353 + 1354 + if (irq_data->parent_data) 1355 + __irq_domain_activate_irq(irq_data->parent_data); 1356 + if (domain->ops->activate) 1357 + domain->ops->activate(domain, irq_data); 1358 + } 1359 + } 1360 + 1361 + static void __irq_domain_deactivate_irq(struct irq_data *irq_data) 1362 + { 1363 + if (irq_data && irq_data->domain) { 1364 + struct irq_domain *domain = irq_data->domain; 1365 + 1366 + if (domain->ops->deactivate) 1367 + domain->ops->deactivate(domain, irq_data); 1368 + if (irq_data->parent_data) 1369 + __irq_domain_deactivate_irq(irq_data->parent_data); 1370 + } 1371 + } 1372 + 1373 /** 1374 * irq_domain_activate_irq - Call domain_ops->activate recursively to activate 1375 * interrupt ··· 1356 */ 1357 void irq_domain_activate_irq(struct irq_data *irq_data) 1358 { 1359 + if (!irqd_is_activated(irq_data)) { 1360 + __irq_domain_activate_irq(irq_data); 1361 + irqd_set_activated(irq_data); 1362 } 1363 } 1364 ··· 1376 */ 1377 void irq_domain_deactivate_irq(struct irq_data *irq_data) 1378 { 1379 + if (irqd_is_activated(irq_data)) { 1380 + __irq_domain_deactivate_irq(irq_data); 1381 + irqd_clr_activated(irq_data); 1382 } 1383 } 1384