+2
arch/x86/kernel/apic/io_apic.c
+2
arch/x86/kernel/apic/io_apic.c
···
2117
2117
if (idx != -1 && irq_trigger(idx))
2118
2118
unmask_ioapic_irq(irq_get_chip_data(0));
2119
2119
}
2120
+
irq_domain_deactivate_irq(irq_data);
2120
2121
irq_domain_activate_irq(irq_data);
2121
2122
if (timer_irq_works()) {
2122
2123
if (disable_timer_pin_1 > 0)
···
2139
2138
* legacy devices should be connected to IO APIC #0
2140
2139
*/
2141
2140
replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
2141
+
irq_domain_deactivate_irq(irq_data);
2142
2142
irq_domain_activate_irq(irq_data);
2143
2143
legacy_pic->unmask(0);
2144
2144
if (timer_irq_works()) {
+1
arch/x86/kernel/hpet.c
+1
arch/x86/kernel/hpet.c
+17
include/linux/irq.h
+17
include/linux/irq.h
···
184
184
*
185
185
* IRQD_TRIGGER_MASK - Mask for the trigger type bits
186
186
* IRQD_SETAFFINITY_PENDING - Affinity setting is pending
187
+
* IRQD_ACTIVATED - Interrupt has already been activated
187
188
* IRQD_NO_BALANCING - Balancing disabled for this IRQ
188
189
* IRQD_PER_CPU - Interrupt is per cpu
189
190
* IRQD_AFFINITY_SET - Interrupt affinity was set
···
203
202
enum {
204
203
IRQD_TRIGGER_MASK = 0xf,
205
204
IRQD_SETAFFINITY_PENDING = (1 << 8),
205
+
IRQD_ACTIVATED = (1 << 9),
206
206
IRQD_NO_BALANCING = (1 << 10),
207
207
IRQD_PER_CPU = (1 << 11),
208
208
IRQD_AFFINITY_SET = (1 << 12),
···
312
310
static inline bool irqd_affinity_is_managed(struct irq_data *d)
313
311
{
314
312
return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED;
313
+
}
314
+
315
+
static inline bool irqd_is_activated(struct irq_data *d)
316
+
{
317
+
return __irqd_to_state(d) & IRQD_ACTIVATED;
318
+
}
319
+
320
+
static inline void irqd_set_activated(struct irq_data *d)
321
+
{
322
+
__irqd_to_state(d) |= IRQD_ACTIVATED;
323
+
}
324
+
325
+
static inline void irqd_clr_activated(struct irq_data *d)
326
+
{
327
+
__irqd_to_state(d) &= ~IRQD_ACTIVATED;
315
328
}
316
329
317
330
#undef __irqd_to_state
+30
-14
kernel/irq/irqdomain.c
+30
-14
kernel/irq/irqdomain.c
···
1346
1346
}
1347
1347
EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
1348
1348
1349
+
static void __irq_domain_activate_irq(struct irq_data *irq_data)
1350
+
{
1351
+
if (irq_data && irq_data->domain) {
1352
+
struct irq_domain *domain = irq_data->domain;
1353
+
1354
+
if (irq_data->parent_data)
1355
+
__irq_domain_activate_irq(irq_data->parent_data);
1356
+
if (domain->ops->activate)
1357
+
domain->ops->activate(domain, irq_data);
1358
+
}
1359
+
}
1360
+
1361
+
static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
1362
+
{
1363
+
if (irq_data && irq_data->domain) {
1364
+
struct irq_domain *domain = irq_data->domain;
1365
+
1366
+
if (domain->ops->deactivate)
1367
+
domain->ops->deactivate(domain, irq_data);
1368
+
if (irq_data->parent_data)
1369
+
__irq_domain_deactivate_irq(irq_data->parent_data);
1370
+
}
1371
+
}
1372
+
1349
1373
/**
1350
1374
* irq_domain_activate_irq - Call domain_ops->activate recursively to activate
1351
1375
* interrupt
···
1380
1356
*/
1381
1357
void irq_domain_activate_irq(struct irq_data *irq_data)
1382
1358
{
1383
-
if (irq_data && irq_data->domain) {
1384
-
struct irq_domain *domain = irq_data->domain;
1385
-
1386
-
if (irq_data->parent_data)
1387
-
irq_domain_activate_irq(irq_data->parent_data);
1388
-
if (domain->ops->activate)
1389
-
domain->ops->activate(domain, irq_data);
1359
+
if (!irqd_is_activated(irq_data)) {
1360
+
__irq_domain_activate_irq(irq_data);
1361
+
irqd_set_activated(irq_data);
1390
1362
}
1391
1363
}
1392
1364
···
1396
1376
*/
1397
1377
void irq_domain_deactivate_irq(struct irq_data *irq_data)
1398
1378
{
1399
-
if (irq_data && irq_data->domain) {
1400
-
struct irq_domain *domain = irq_data->domain;
1401
-
1402
-
if (domain->ops->deactivate)
1403
-
domain->ops->deactivate(domain, irq_data);
1404
-
if (irq_data->parent_data)
1405
-
irq_domain_deactivate_irq(irq_data->parent_data);
1379
+
if (irqd_is_activated(irq_data)) {
1380
+
__irq_domain_deactivate_irq(irq_data);
1381
+
irqd_clr_activated(irq_data);
1406
1382
}
1407
1383
}
1408
1384