qlge: Fix MSI/legacy single interrupt bug.

The chip can issue spurious interrupts for single interrupt
modes. We use disable to clear the condition and allow processing to
continue. Also got rid of legacy specific code since it now needs to
be done on MSI single irq also.

Signed-off-by: Ron Mercer <ron.mercer@qlogic.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>

authored by Ron Mercer and committed by Jeff Garzik bb0d215c b891a902

+47 -47
+1 -4
drivers/net/qlge/qlge.h
··· 1375 spinlock_t adapter_lock; 1376 spinlock_t hw_lock; 1377 spinlock_t stats_lock; 1378 - spinlock_t legacy_lock; /* used for maintaining legacy intr sync */ 1379 1380 /* PCI Bus Relative Register Addresses */ 1381 void __iomem *reg_base; ··· 1397 u32 intr_count; 1398 struct msix_entry *msi_x_entry; 1399 struct intr_context intr_context[MAX_RX_RINGS]; 1400 - 1401 - int (*legacy_check) (struct ql_adapter *); 1402 1403 int tx_ring_count; /* One per online CPU. */ 1404 u32 rss_ring_first_cq_id;/* index of first inbound (rss) rx_ring */ ··· 1499 void ql_mpi_reset_work(struct work_struct *work); 1500 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit); 1501 void ql_queue_asic_error(struct ql_adapter *qdev); 1502 - void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr); 1503 void ql_set_ethtool_ops(struct net_device *ndev); 1504 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data); 1505
··· 1375 spinlock_t adapter_lock; 1376 spinlock_t hw_lock; 1377 spinlock_t stats_lock; 1378 1379 /* PCI Bus Relative Register Addresses */ 1380 void __iomem *reg_base; ··· 1398 u32 intr_count; 1399 struct msix_entry *msi_x_entry; 1400 struct intr_context intr_context[MAX_RX_RINGS]; 1401 1402 int tx_ring_count; /* One per online CPU. */ 1403 u32 rss_ring_first_cq_id;/* index of first inbound (rss) rx_ring */ ··· 1502 void ql_mpi_reset_work(struct work_struct *work); 1503 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit); 1504 void ql_queue_asic_error(struct ql_adapter *qdev); 1505 + u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr); 1506 void ql_set_ethtool_ops(struct net_device *ndev); 1507 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data); 1508
+46 -43
drivers/net/qlge/qlge_main.c
··· 577 * incremented everytime we queue a worker and decremented everytime 578 * a worker finishes. Once it hits zero we enable the interrupt. 579 */ 580 - void ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) 581 { 582 - if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) 583 ql_write32(qdev, INTR_EN, 584 - qdev->intr_context[intr].intr_en_mask); 585 - else { 586 - if (qdev->legacy_check) 587 - spin_lock(&qdev->legacy_lock); 588 - if (atomic_dec_and_test(&qdev->intr_context[intr].irq_cnt)) { 589 - QPRINTK(qdev, INTR, ERR, "Enabling interrupt %d.\n", 590 - intr); 591 - ql_write32(qdev, INTR_EN, 592 - qdev->intr_context[intr].intr_en_mask); 593 - } else { 594 - QPRINTK(qdev, INTR, ERR, 595 - "Skip enable, other queue(s) are active.\n"); 596 - } 597 - if (qdev->legacy_check) 598 - spin_unlock(&qdev->legacy_lock); 599 } 600 } 601 602 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr) 603 { 604 u32 var = 0; 605 606 - if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) 607 - goto exit; 608 - else if (!atomic_read(&qdev->intr_context[intr].irq_cnt)) { 609 ql_write32(qdev, INTR_EN, 610 - qdev->intr_context[intr].intr_dis_mask); 611 var = ql_read32(qdev, STS); 612 } 613 - atomic_inc(&qdev->intr_context[intr].irq_cnt); 614 - exit: 615 return var; 616 } 617 ··· 635 * and enables only if the result is zero. 636 * So we precharge it here. 637 */ 638 - atomic_set(&qdev->intr_context[i].irq_cnt, 1); 639 ql_enable_completion_interrupt(qdev, i); 640 } 641 ··· 1739 return IRQ_HANDLED; 1740 } 1741 1742 - /* We check here to see if we're already handling a legacy 1743 - * interrupt. If we are, then it must belong to another 1744 - * chip with which we're sharing the interrupt line. 1745 - */ 1746 - int ql_legacy_check(struct ql_adapter *qdev) 1747 - { 1748 - int err; 1749 - spin_lock(&qdev->legacy_lock); 1750 - err = atomic_read(&qdev->intr_context[0].irq_cnt); 1751 - spin_unlock(&qdev->legacy_lock); 1752 - return err; 1753 - } 1754 - 1755 /* This handles a fatal error, MPI activity, and the default 1756 * rx_ring in an MSI-X multiple vector environment. 1757 * In MSI/Legacy environment it also process the rest of ··· 1753 int i; 1754 int work_done = 0; 1755 1756 - if (qdev->legacy_check && qdev->legacy_check(qdev)) { 1757 - QPRINTK(qdev, INTR, INFO, "Already busy, not our interrupt.\n"); 1758 - return IRQ_NONE; /* Not our interrupt */ 1759 } 1760 1761 - var = ql_read32(qdev, STS); 1762 1763 /* 1764 * Check for fatal error. ··· 1827 } 1828 } 1829 } 1830 return work_done ? IRQ_HANDLED : IRQ_NONE; 1831 } 1832 ··· 2706 } 2707 } 2708 irq_type = LEG_IRQ; 2709 - spin_lock_init(&qdev->legacy_lock); 2710 - qdev->legacy_check = ql_legacy_check; 2711 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n"); 2712 } 2713
··· 577 * incremented everytime we queue a worker and decremented everytime 578 * a worker finishes. Once it hits zero we enable the interrupt. 579 */ 580 + u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr) 581 { 582 + u32 var = 0; 583 + unsigned long hw_flags = 0; 584 + struct intr_context *ctx = qdev->intr_context + intr; 585 + 586 + if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) { 587 + /* Always enable if we're MSIX multi interrupts and 588 + * it's not the default (zeroeth) interrupt. 589 + */ 590 ql_write32(qdev, INTR_EN, 591 + ctx->intr_en_mask); 592 + var = ql_read32(qdev, STS); 593 + return var; 594 } 595 + 596 + spin_lock_irqsave(&qdev->hw_lock, hw_flags); 597 + if (atomic_dec_and_test(&ctx->irq_cnt)) { 598 + ql_write32(qdev, INTR_EN, 599 + ctx->intr_en_mask); 600 + var = ql_read32(qdev, STS); 601 + } 602 + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 603 + return var; 604 } 605 606 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr) 607 { 608 u32 var = 0; 609 + unsigned long hw_flags; 610 + struct intr_context *ctx; 611 612 + /* HW disables for us if we're MSIX multi interrupts and 613 + * it's not the default (zeroeth) interrupt. 614 + */ 615 + if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) 616 + return 0; 617 + 618 + ctx = qdev->intr_context + intr; 619 + spin_lock_irqsave(&qdev->hw_lock, hw_flags); 620 + if (!atomic_read(&ctx->irq_cnt)) { 621 ql_write32(qdev, INTR_EN, 622 + ctx->intr_dis_mask); 623 var = ql_read32(qdev, STS); 624 } 625 + atomic_inc(&ctx->irq_cnt); 626 + spin_unlock_irqrestore(&qdev->hw_lock, hw_flags); 627 return var; 628 } 629 ··· 623 * and enables only if the result is zero. 624 * So we precharge it here. 625 */ 626 + if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) || 627 + i == 0)) 628 + atomic_set(&qdev->intr_context[i].irq_cnt, 1); 629 ql_enable_completion_interrupt(qdev, i); 630 } 631 ··· 1725 return IRQ_HANDLED; 1726 } 1727 1728 /* This handles a fatal error, MPI activity, and the default 1729 * rx_ring in an MSI-X multiple vector environment. 1730 * In MSI/Legacy environment it also process the rest of ··· 1752 int i; 1753 int work_done = 0; 1754 1755 + spin_lock(&qdev->hw_lock); 1756 + if (atomic_read(&qdev->intr_context[0].irq_cnt)) { 1757 + QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n"); 1758 + spin_unlock(&qdev->hw_lock); 1759 + return IRQ_NONE; 1760 } 1761 + spin_unlock(&qdev->hw_lock); 1762 1763 + var = ql_disable_completion_interrupt(qdev, intr_context->intr); 1764 1765 /* 1766 * Check for fatal error. ··· 1823 } 1824 } 1825 } 1826 + ql_enable_completion_interrupt(qdev, intr_context->intr); 1827 return work_done ? IRQ_HANDLED : IRQ_NONE; 1828 } 1829 ··· 2701 } 2702 } 2703 irq_type = LEG_IRQ; 2704 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n"); 2705 } 2706