···9090 from being initiated from tasks that might run on the CPU to9191 be de-jittered. (It is OK to force this CPU offline and then9292 bring it back online before you start your application.)9393-BLOCK_IOPOLL_SOFTIRQ: Do all of the following:9393+IRQ_POLL_SOFTIRQ: Do all of the following:94941. Force block-device interrupts onto some other CPU.95952. Initiate any block I/O and block-I/O polling on other CPUs.96963. Once your application has started, prevent CPU-hotplug operations
···66#include <linux/module.h>77#include <linux/init.h>88#include <linux/bio.h>99-#include <linux/blkdev.h>109#include <linux/interrupt.h>1110#include <linux/cpu.h>1212-#include <linux/blk-iopoll.h>1111+#include <linux/irq_poll.h>1312#include <linux/delay.h>14131515-#include "blk.h"1616-1717-static unsigned int blk_iopoll_budget __read_mostly = 256;1414+static unsigned int irq_poll_budget __read_mostly = 256;18151916static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);20172118/**2222- * blk_iopoll_sched - Schedule a run of the iopoll handler1919+ * irq_poll_sched - Schedule a run of the iopoll handler2320 * @iop: The parent iopoll structure2421 *2522 * Description:2626- * Add this blk_iopoll structure to the pending poll list and trigger the2323+ * Add this irq_poll structure to the pending poll list and trigger the2724 * raise of the blk iopoll softirq. The driver must already have gotten a2828- * successful return from blk_iopoll_sched_prep() before calling this.2525+ * successful return from irq_poll_sched_prep() before calling this.2926 **/3030-void blk_iopoll_sched(struct blk_iopoll *iop)2727+void irq_poll_sched(struct irq_poll *iop)3128{3229 unsigned long flags;33303431 local_irq_save(flags);3532 list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));3636- __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);3333+ __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);3734 local_irq_restore(flags);3835}3939-EXPORT_SYMBOL(blk_iopoll_sched);3636+EXPORT_SYMBOL(irq_poll_sched);40374138/**4242- * __blk_iopoll_complete - Mark this @iop as un-polled again3939+ * __irq_poll_complete - Mark this @iop as un-polled again4340 * @iop: The parent iopoll structure4441 *4542 * Description:4646- * See blk_iopoll_complete(). This function must be called with interrupts4343+ * See irq_poll_complete(). This function must be called with interrupts4744 * disabled.4845 **/4949-void __blk_iopoll_complete(struct blk_iopoll *iop)4646+void __irq_poll_complete(struct irq_poll *iop)5047{5148 list_del(&iop->list);5249 smp_mb__before_atomic();5353- clear_bit_unlock(IOPOLL_F_SCHED, &iop->state);5050+ clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state);5451}5555-EXPORT_SYMBOL(__blk_iopoll_complete);5252+EXPORT_SYMBOL(__irq_poll_complete);56535754/**5858- * blk_iopoll_complete - Mark this @iop as un-polled again5555+ * irq_poll_complete - Mark this @iop as un-polled again5956 * @iop: The parent iopoll structure6057 *6158 * Description:6259 * If a driver consumes less than the assigned budget in its run of the6360 * iopoll handler, it'll end the polled mode by calling this function. The6464- * iopoll handler will not be invoked again before blk_iopoll_sched_prep()6161+ * iopoll handler will not be invoked again before irq_poll_sched_prep()6562 * is called.6663 **/6767-void blk_iopoll_complete(struct blk_iopoll *iop)6464+void irq_poll_complete(struct irq_poll *iop)6865{6966 unsigned long flags;70677168 local_irq_save(flags);7272- __blk_iopoll_complete(iop);6969+ __irq_poll_complete(iop);7370 local_irq_restore(flags);7471}7575-EXPORT_SYMBOL(blk_iopoll_complete);7272+EXPORT_SYMBOL(irq_poll_complete);76737777-static void blk_iopoll_softirq(struct softirq_action *h)7474+static void irq_poll_softirq(struct softirq_action *h)7875{7976 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll);8080- int rearm = 0, budget = blk_iopoll_budget;7777+ int rearm = 0, budget = irq_poll_budget;8178 unsigned long start_time = jiffies;82798380 local_irq_disable();84818582 while (!list_empty(list)) {8686- struct blk_iopoll *iop;8383+ struct irq_poll *iop;8784 int work, weight;88858986 /*···98101 * entries to the tail of this list, and only ->poll()99102 * calls can remove this head entry from the list.100103 */101101- iop = list_entry(list->next, struct blk_iopoll, list);104104+ iop = list_entry(list->next, struct irq_poll, list);102105103106 weight = iop->weight;104107 work = 0;105105- if (test_bit(IOPOLL_F_SCHED, &iop->state))108108+ if (test_bit(IRQ_POLL_F_SCHED, &iop->state))106109 work = iop->poll(iop, weight);107110108111 budget -= work;···118121 * move the instance around on the list at-will.119122 */120123 if (work >= weight) {121121- if (blk_iopoll_disable_pending(iop))122122- __blk_iopoll_complete(iop);124124+ if (irq_poll_disable_pending(iop))125125+ __irq_poll_complete(iop);123126 else124127 list_move_tail(&iop->list, list);125128 }126129 }127130128131 if (rearm)129129- __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);132132+ __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);130133131134 local_irq_enable();132135}133136134137/**135135- * blk_iopoll_disable - Disable iopoll on this @iop138138+ * irq_poll_disable - Disable iopoll on this @iop136139 * @iop: The parent iopoll structure137140 *138141 * Description:139142 * Disable io polling and wait for any pending callbacks to have completed.140143 **/141141-void blk_iopoll_disable(struct blk_iopoll *iop)144144+void irq_poll_disable(struct irq_poll *iop)142145{143143- set_bit(IOPOLL_F_DISABLE, &iop->state);144144- while (test_and_set_bit(IOPOLL_F_SCHED, &iop->state))146146+ set_bit(IRQ_POLL_F_DISABLE, &iop->state);147147+ while (test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state))145148 msleep(1);146146- clear_bit(IOPOLL_F_DISABLE, &iop->state);149149+ clear_bit(IRQ_POLL_F_DISABLE, &iop->state);147150}148148-EXPORT_SYMBOL(blk_iopoll_disable);151151+EXPORT_SYMBOL(irq_poll_disable);149152150153/**151151- * blk_iopoll_enable - Enable iopoll on this @iop154154+ * irq_poll_enable - Enable iopoll on this @iop152155 * @iop: The parent iopoll structure153156 *154157 * Description:155158 * Enable iopoll on this @iop. Note that the handler run will not be156159 * scheduled, it will only mark it as active.157160 **/158158-void blk_iopoll_enable(struct blk_iopoll *iop)161161+void irq_poll_enable(struct irq_poll *iop)159162{160160- BUG_ON(!test_bit(IOPOLL_F_SCHED, &iop->state));163163+ BUG_ON(!test_bit(IRQ_POLL_F_SCHED, &iop->state));161164 smp_mb__before_atomic();162162- clear_bit_unlock(IOPOLL_F_SCHED, &iop->state);165165+ clear_bit_unlock(IRQ_POLL_F_SCHED, &iop->state);163166}164164-EXPORT_SYMBOL(blk_iopoll_enable);167167+EXPORT_SYMBOL(irq_poll_enable);165168166169/**167167- * blk_iopoll_init - Initialize this @iop170170+ * irq_poll_init - Initialize this @iop168171 * @iop: The parent iopoll structure169172 * @weight: The default weight (or command completion budget)170173 * @poll_fn: The handler to invoke171174 *172175 * Description:173173- * Initialize this blk_iopoll structure. Before being actively used, the174174- * driver must call blk_iopoll_enable().176176+ * Initialize this irq_poll structure. Before being actively used, the177177+ * driver must call irq_poll_enable().175178 **/176176-void blk_iopoll_init(struct blk_iopoll *iop, int weight, blk_iopoll_fn *poll_fn)179179+void irq_poll_init(struct irq_poll *iop, int weight, irq_poll_fn *poll_fn)177180{178181 memset(iop, 0, sizeof(*iop));179182 INIT_LIST_HEAD(&iop->list);180183 iop->weight = weight;181184 iop->poll = poll_fn;182182- set_bit(IOPOLL_F_SCHED, &iop->state);185185+ set_bit(IRQ_POLL_F_SCHED, &iop->state);183186}184184-EXPORT_SYMBOL(blk_iopoll_init);187187+EXPORT_SYMBOL(irq_poll_init);185188186186-static int blk_iopoll_cpu_notify(struct notifier_block *self,189189+static int irq_poll_cpu_notify(struct notifier_block *self,187190 unsigned long action, void *hcpu)188191{189192 /*···196199 local_irq_disable();197200 list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),198201 this_cpu_ptr(&blk_cpu_iopoll));199199- __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);202202+ __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);200203 local_irq_enable();201204 }202205203206 return NOTIFY_OK;204207}205208206206-static struct notifier_block blk_iopoll_cpu_notifier = {207207- .notifier_call = blk_iopoll_cpu_notify,209209+static struct notifier_block irq_poll_cpu_notifier = {210210+ .notifier_call = irq_poll_cpu_notify,208211};209212210210-static __init int blk_iopoll_setup(void)213213+static __init int irq_poll_setup(void)211214{212215 int i;213216214217 for_each_possible_cpu(i)215218 INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i));216219217217- open_softirq(BLOCK_IOPOLL_SOFTIRQ, blk_iopoll_softirq);218218- register_hotcpu_notifier(&blk_iopoll_cpu_notifier);220220+ open_softirq(IRQ_POLL_SOFTIRQ, irq_poll_softirq);221221+ register_hotcpu_notifier(&irq_poll_cpu_notifier);219222 return 0;220223}221221-subsys_initcall(blk_iopoll_setup);224224+subsys_initcall(irq_poll_setup);
+1
drivers/scsi/Kconfig
···11021102 tristate "IBM Power Linux RAID adapter support"11031103 depends on PCI && SCSI && ATA11041104 select FW_LOADER11051105+ select IRQ_POLL11051106 ---help---11061107 This driver supports the IBM Power Linux family RAID adapters.11071108 This includes IBM pSeries 5712, 5703, 5709, and 570A, as well
+1
drivers/scsi/be2iscsi/Kconfig
···33 depends on PCI && SCSI && NET44 select SCSI_ISCSI_ATTRS55 select ISCSI_BOOT_SYSFS66+ select IRQ_POLL6778 help89 This driver implements the iSCSI functionality for Emulex
···1292129212931293 for (i = 0; i < phba->num_cpus; i++) {12941294 pbe_eq = &phwi_context->be_eq[i];12951295- blk_iopoll_disable(&pbe_eq->iopoll);12951295+ irq_poll_disable(&pbe_eq->iopoll);12961296 beiscsi_process_cq(pbe_eq);12971297- blk_iopoll_enable(&pbe_eq->iopoll);12971297+ irq_poll_enable(&pbe_eq->iopoll);12981298 }12991299}13001300
+12-12
drivers/scsi/be2iscsi/be_main.c
···910910 num_eq_processed = 0;911911 while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]912912 & EQE_VALID_MASK) {913913- if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))914914- blk_iopoll_sched(&pbe_eq->iopoll);913913+ if (!irq_poll_sched_prep(&pbe_eq->iopoll))914914+ irq_poll_sched(&pbe_eq->iopoll);915915916916 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);917917 queue_tail_inc(eq);···972972 spin_unlock_irqrestore(&phba->isr_lock, flags);973973 num_mcceq_processed++;974974 } else {975975- if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))976976- blk_iopoll_sched(&pbe_eq->iopoll);975975+ if (!irq_poll_sched_prep(&pbe_eq->iopoll))976976+ irq_poll_sched(&pbe_eq->iopoll);977977 num_ioeq_processed++;978978 }979979 AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);···22952295 hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);22962296}2297229722982298-static int be_iopoll(struct blk_iopoll *iop, int budget)22982298+static int be_iopoll(struct irq_poll *iop, int budget)22992299{23002300 unsigned int ret;23012301 struct beiscsi_hba *phba;···23062306 pbe_eq->cq_count += ret;23072307 if (ret < budget) {23082308 phba = pbe_eq->phba;23092309- blk_iopoll_complete(iop);23092309+ irq_poll_complete(iop);23102310 beiscsi_log(phba, KERN_INFO,23112311 BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,23122312 "BM_%d : rearm pbe_eq->q.id =%d\n",···5293529352945294 for (i = 0; i < phba->num_cpus; i++) {52955295 pbe_eq = &phwi_context->be_eq[i];52965296- blk_iopoll_disable(&pbe_eq->iopoll);52965296+ irq_poll_disable(&pbe_eq->iopoll);52975297 }5298529852995299 if (unload_state == BEISCSI_CLEAN_UNLOAD) {···5579557955805580 for (i = 0; i < phba->num_cpus; i++) {55815581 pbe_eq = &phwi_context->be_eq[i];55825582- blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,55825582+ irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget,55835583 be_iopoll);55845584- blk_iopoll_enable(&pbe_eq->iopoll);55845584+ irq_poll_enable(&pbe_eq->iopoll);55855585 }5586558655875587 i = (phba->msix_enabled) ? i : 0;···5752575257535753 for (i = 0; i < phba->num_cpus; i++) {57545754 pbe_eq = &phwi_context->be_eq[i];57555755- blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,57555755+ irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget,57565756 be_iopoll);57575757- blk_iopoll_enable(&pbe_eq->iopoll);57575757+ irq_poll_enable(&pbe_eq->iopoll);57585758 }5759575957605760 i = (phba->msix_enabled) ? i : 0;···57955795 destroy_workqueue(phba->wq);57965796 for (i = 0; i < phba->num_cpus; i++) {57975797 pbe_eq = &phwi_context->be_eq[i];57985798- blk_iopoll_disable(&pbe_eq->iopoll);57985798+ irq_poll_disable(&pbe_eq->iopoll);57995799 }58005800free_twq:58015801 beiscsi_clean_port(phba);
+14-14
drivers/scsi/ipr.c
···36383638 .store = ipr_store_reset_adapter36393639};3640364036413641-static int ipr_iopoll(struct blk_iopoll *iop, int budget);36413641+static int ipr_iopoll(struct irq_poll *iop, int budget);36423642 /**36433643 * ipr_show_iopoll_weight - Show ipr polling mode36443644 * @dev: class device struct···36813681 int i;3682368236833683 if (!ioa_cfg->sis64) {36843684- dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");36843684+ dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");36853685 return -EINVAL;36863686 }36873687 if (kstrtoul(buf, 10, &user_iopoll_weight))36883688 return -EINVAL;3689368936903690 if (user_iopoll_weight > 256) {36913691- dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");36913691+ dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");36923692 return -EINVAL;36933693 }3694369436953695 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {36963696- dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");36963696+ dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");36973697 return strlen(buf);36983698 }3699369937003700 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {37013701 for (i = 1; i < ioa_cfg->hrrq_num; i++)37023702- blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);37023702+ irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);37033703 }3704370437053705 spin_lock_irqsave(shost->host_lock, lock_flags);37063706 ioa_cfg->iopoll_weight = user_iopoll_weight;37073707 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {37083708 for (i = 1; i < ioa_cfg->hrrq_num; i++) {37093709- blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,37093709+ irq_poll_init(&ioa_cfg->hrrq[i].iopoll,37103710 ioa_cfg->iopoll_weight, ipr_iopoll);37113711- blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);37113711+ irq_poll_enable(&ioa_cfg->hrrq[i].iopoll);37123712 }37133713 }37143714 spin_unlock_irqrestore(shost->host_lock, lock_flags);···55695569 return num_hrrq;55705570}5571557155725572-static int ipr_iopoll(struct blk_iopoll *iop, int budget)55725572+static int ipr_iopoll(struct irq_poll *iop, int budget)55735573{55745574 struct ipr_ioa_cfg *ioa_cfg;55755575 struct ipr_hrr_queue *hrrq;···55855585 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);5586558655875587 if (completed_ops < budget)55885588- blk_iopoll_complete(iop);55885588+ irq_poll_complete(iop);55895589 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);5590559055915591 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {···56935693 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {56945694 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==56955695 hrrq->toggle_bit) {56965696- if (!blk_iopoll_sched_prep(&hrrq->iopoll))56975697- blk_iopoll_sched(&hrrq->iopoll);56965696+ if (!irq_poll_sched_prep(&hrrq->iopoll))56975697+ irq_poll_sched(&hrrq->iopoll);56985698 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);56995699 return IRQ_HANDLED;57005700 }···10405104051040610406 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {1040710407 for (i = 1; i < ioa_cfg->hrrq_num; i++) {1040810408- blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,1040810408+ irq_poll_init(&ioa_cfg->hrrq[i].iopoll,1040910409 ioa_cfg->iopoll_weight, ipr_iopoll);1041010410- blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);1041010410+ irq_poll_enable(&ioa_cfg->hrrq[i].iopoll);1041110411 }1041210412 }1041310413···1043610436 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {1043710437 ioa_cfg->iopoll_weight = 0;1043810438 for (i = 1; i < ioa_cfg->hrrq_num; i++)1043910439- blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);1043910439+ irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);1044010440 }10441104411044210442 while (ioa_cfg->in_reset_reload) {
···11-#ifndef BLK_IOPOLL_H22-#define BLK_IOPOLL_H33-44-struct blk_iopoll;55-typedef int (blk_iopoll_fn)(struct blk_iopoll *, int);66-77-struct blk_iopoll {88- struct list_head list;99- unsigned long state;1010- unsigned long data;1111- int weight;1212- int max;1313- blk_iopoll_fn *poll;1414-};1515-1616-enum {1717- IOPOLL_F_SCHED = 0,1818- IOPOLL_F_DISABLE = 1,1919-};2020-2121-/*2222- * Returns 0 if we successfully set the IOPOLL_F_SCHED bit, indicating2323- * that we were the first to acquire this iop for scheduling. If this iop2424- * is currently disabled, return "failure".2525- */2626-static inline int blk_iopoll_sched_prep(struct blk_iopoll *iop)2727-{2828- if (!test_bit(IOPOLL_F_DISABLE, &iop->state))2929- return test_and_set_bit(IOPOLL_F_SCHED, &iop->state);3030-3131- return 1;3232-}3333-3434-static inline int blk_iopoll_disable_pending(struct blk_iopoll *iop)3535-{3636- return test_bit(IOPOLL_F_DISABLE, &iop->state);3737-}3838-3939-extern void blk_iopoll_sched(struct blk_iopoll *);4040-extern void blk_iopoll_init(struct blk_iopoll *, int, blk_iopoll_fn *);4141-extern void blk_iopoll_complete(struct blk_iopoll *);4242-extern void __blk_iopoll_complete(struct blk_iopoll *);4343-extern void blk_iopoll_enable(struct blk_iopoll *);4444-extern void blk_iopoll_disable(struct blk_iopoll *);4545-4646-#endif
+1-1
include/linux/interrupt.h
···412412 NET_TX_SOFTIRQ,413413 NET_RX_SOFTIRQ,414414 BLOCK_SOFTIRQ,415415- BLOCK_IOPOLL_SOFTIRQ,415415+ IRQ_POLL_SOFTIRQ,416416 TASKLET_SOFTIRQ,417417 SCHED_SOFTIRQ,418418 HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
+46
include/linux/irq_poll.h
···11+#ifndef IRQ_POLL_H22+#define IRQ_POLL_H33+44+struct irq_poll;55+typedef int (irq_poll_fn)(struct irq_poll *, int);66+77+struct irq_poll {88+ struct list_head list;99+ unsigned long state;1010+ unsigned long data;1111+ int weight;1212+ int max;1313+ irq_poll_fn *poll;1414+};1515+1616+enum {1717+ IRQ_POLL_F_SCHED = 0,1818+ IRQ_POLL_F_DISABLE = 1,1919+};2020+2121+/*2222+ * Returns 0 if we successfully set the IRQ_POLL_F_SCHED bit, indicating2323+ * that we were the first to acquire this iop for scheduling. If this iop2424+ * is currently disabled, return "failure".2525+ */2626+static inline int irq_poll_sched_prep(struct irq_poll *iop)2727+{2828+ if (!test_bit(IRQ_POLL_F_DISABLE, &iop->state))2929+ return test_and_set_bit(IRQ_POLL_F_SCHED, &iop->state);3030+3131+ return 1;3232+}3333+3434+static inline int irq_poll_disable_pending(struct irq_poll *iop)3535+{3636+ return test_bit(IRQ_POLL_F_DISABLE, &iop->state);3737+}3838+3939+extern void irq_poll_sched(struct irq_poll *);4040+extern void irq_poll_init(struct irq_poll *, int, irq_poll_fn *);4141+extern void irq_poll_complete(struct irq_poll *);4242+extern void __irq_poll_complete(struct irq_poll *);4343+extern void irq_poll_enable(struct irq_poll *);4444+extern void irq_poll_disable(struct irq_poll *);4545+4646+#endif
···475475 information. This data is useful for drivers handling476476 DDR SDRAM controllers.477477478478+config IRQ_POLL479479+ bool "IRQ polling library"480480+ help481481+ Helper library to poll interrupt mitigation using polling.482482+478483config MPILIB479484 tristate480485 select CLZ_TAB