···3030config GENERIC_PENDING_IRQ3131 bool32323333+# Support for generic irq migrating off cpu before the cpu is offline.3434+config GENERIC_IRQ_MIGRATION3535+ bool3636+3337# Alpha specific irq affinity mechanism3438config AUTO_IRQ_AFFINITY3539 bool
···11+/*22+ * Generic cpu hotunplug interrupt migration code copied from the33+ * arch/arm implementation44+ *55+ * Copyright (C) Russell King66+ *77+ * This program is free software; you can redistribute it and/or modify88+ * it under the terms of the GNU General Public License version 2 as99+ * published by the Free Software Foundation.1010+ */1111+#include <linux/interrupt.h>1212+#include <linux/ratelimit.h>1313+#include <linux/irq.h>1414+1515+#include "internals.h"1616+1717+static bool migrate_one_irq(struct irq_desc *desc)1818+{1919+ struct irq_data *d = irq_desc_get_irq_data(desc);2020+ const struct cpumask *affinity = d->common->affinity;2121+ struct irq_chip *c;2222+ bool ret = false;2323+2424+ /*2525+ * If this is a per-CPU interrupt, or the affinity does not2626+ * include this CPU, then we have nothing to do.2727+ */2828+ if (irqd_is_per_cpu(d) ||2929+ !cpumask_test_cpu(smp_processor_id(), affinity))3030+ return false;3131+3232+ if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {3333+ affinity = cpu_online_mask;3434+ ret = true;3535+ }3636+3737+ c = irq_data_get_irq_chip(d);3838+ if (!c->irq_set_affinity) {3939+ pr_warn_ratelimited("IRQ%u: unable to set affinity\n", d->irq);4040+ } else {4141+ int r = irq_do_set_affinity(d, affinity, false);4242+ if (r)4343+ pr_warn_ratelimited("IRQ%u: set affinity failed(%d).\n",4444+ d->irq, r);4545+ }4646+4747+ return ret;4848+}4949+5050+/**5151+ * irq_migrate_all_off_this_cpu - Migrate irqs away from offline cpu5252+ *5353+ * The current CPU has been marked offline. Migrate IRQs off this CPU.5454+ * If the affinity settings do not allow other CPUs, force them onto any5555+ * available CPU.5656+ *5757+ * Note: we must iterate over all IRQs, whether they have an attached5858+ * action structure or not, as we need to get chained interrupts too.5959+ */6060+void irq_migrate_all_off_this_cpu(void)6161+{6262+ unsigned int irq;6363+ struct irq_desc *desc;6464+ unsigned long flags;6565+6666+ local_irq_save(flags);6767+6868+ for_each_active_irq(irq) {6969+ bool affinity_broken;7070+7171+ desc = irq_to_desc(irq);7272+ raw_spin_lock(&desc->lock);7373+ affinity_broken = migrate_one_irq(desc);7474+ raw_spin_unlock(&desc->lock);7575+7676+ if (affinity_broken)7777+ pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",7878+ irq, smp_processor_id());7979+ }8080+8181+ local_irq_restore(flags);8282+}