Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: ll_temac: Prepare indirect register access for multicast support

With .ndo_set_rx_mode/temac_set_multicast_list() being called in atomic
context (holding addr_list_lock), and temac_set_multicast_list() needing
to access temac indirect registers, the mutex used to synchronize indirect
register is a no-no.

Replace it with a spinlock, and avoid sleeping in
temac_indirect_busywait().

To avoid excessive holding of the lock, which is now a spinlock, the
temac_device_reset() function is changed to only hold the lock for short
periods. With timeouts, it could be holding the spinlock for more than
2 seconds.

Signed-off-by: Esben Haabendal <esben@geanix.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Esben Haabendal and committed by
David S. Miller
1bd33bf0 ddc0bf34

+181 -91
+3 -2
drivers/net/ethernet/xilinx/ll_temac.h
··· 361 361 /* For synchronization of indirect register access. Must be 362 362 * shared mutex between interfaces in same TEMAC block. 363 363 */ 364 - struct mutex *indirect_mutex; 364 + spinlock_t *indirect_lock; 365 365 u32 options; /* Current options word */ 366 366 int last_link; 367 367 unsigned int temac_features; ··· 388 388 /* xilinx_temac.c */ 389 389 int temac_indirect_busywait(struct temac_local *lp); 390 390 u32 temac_indirect_in32(struct temac_local *lp, int reg); 391 + u32 temac_indirect_in32_locked(struct temac_local *lp, int reg); 391 392 void temac_indirect_out32(struct temac_local *lp, int reg, u32 value); 392 - 393 + void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value); 393 394 394 395 /* xilinx_temac_mdio.c */ 395 396 int temac_mdio_setup(struct temac_local *lp, struct platform_device *pdev);
+167 -77
drivers/net/ethernet/xilinx/ll_temac_main.c
··· 53 53 #include <linux/slab.h> 54 54 #include <linux/interrupt.h> 55 55 #include <linux/dma-mapping.h> 56 + #include <linux/processor.h> 56 57 #include <linux/platform_data/xilinx-ll-temac.h> 57 58 58 59 #include "ll_temac.h" ··· 85 84 return iowrite32(value, lp->regs + offset); 86 85 } 87 86 87 + static bool hard_acs_rdy(struct temac_local *lp) 88 + { 89 + return temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK; 90 + } 91 + 92 + static bool hard_acs_rdy_or_timeout(struct temac_local *lp, ktime_t timeout) 93 + { 94 + ktime_t cur = ktime_get(); 95 + 96 + return hard_acs_rdy(lp) || ktime_after(cur, timeout); 97 + } 98 + 99 + /* Poll for maximum 20 ms. This is similar to the 2 jiffies @ 100 Hz 100 + * that was used before, and should cover MDIO bus speed down to 3200 101 + * Hz. 102 + */ 103 + #define HARD_ACS_RDY_POLL_NS (20 * NSEC_PER_MSEC) 104 + 105 + /** 106 + * temac_indirect_busywait - Wait for current indirect register access 107 + * to complete. 108 + */ 88 109 int temac_indirect_busywait(struct temac_local *lp) 89 110 { 90 - unsigned long end = jiffies + 2; 111 + ktime_t timeout = ktime_add_ns(ktime_get(), HARD_ACS_RDY_POLL_NS); 91 112 92 - while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) { 93 - if (time_before_eq(end, jiffies)) { 94 - WARN_ON(1); 95 - return -ETIMEDOUT; 96 - } 97 - usleep_range(500, 1000); 98 - } 99 - return 0; 113 + spin_until_cond(hard_acs_rdy_or_timeout(lp, timeout)); 114 + if (WARN_ON(!hard_acs_rdy(lp))) 115 + return -ETIMEDOUT; 116 + else 117 + return 0; 100 118 } 101 119 102 120 /** 103 - * temac_indirect_in32 104 - * 105 - * lp->indirect_mutex must be held when calling this function 121 + * temac_indirect_in32 - Indirect register read access. This function 122 + * must be called without lp->indirect_lock being held. 106 123 */ 107 124 u32 temac_indirect_in32(struct temac_local *lp, int reg) 108 125 { 109 - u32 val; 126 + unsigned long flags; 127 + int val; 110 128 111 - if (temac_indirect_busywait(lp)) 112 - return -ETIMEDOUT; 113 - temac_iow(lp, XTE_CTL0_OFFSET, reg); 114 - if (temac_indirect_busywait(lp)) 115 - return -ETIMEDOUT; 116 - val = temac_ior(lp, XTE_LSW0_OFFSET); 117 - 129 + spin_lock_irqsave(lp->indirect_lock, flags); 130 + val = temac_indirect_in32_locked(lp, reg); 131 + spin_unlock_irqrestore(lp->indirect_lock, flags); 118 132 return val; 119 133 } 120 134 121 135 /** 122 - * temac_indirect_out32 123 - * 124 - * lp->indirect_mutex must be held when calling this function 136 + * temac_indirect_in32_locked - Indirect register read access. This 137 + * function must be called with lp->indirect_lock being held. Use 138 + * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid 139 + * repeated lock/unlock and to ensure uninterrupted access to indirect 140 + * registers. 141 + */ 142 + u32 temac_indirect_in32_locked(struct temac_local *lp, int reg) 143 + { 144 + /* This initial wait should normally not spin, as we always 145 + * try to wait for indirect access to complete before 146 + * releasing the indirect_lock. 147 + */ 148 + if (WARN_ON(temac_indirect_busywait(lp))) 149 + return -ETIMEDOUT; 150 + /* Initiate read from indirect register */ 151 + temac_iow(lp, XTE_CTL0_OFFSET, reg); 152 + /* Wait for indirect register access to complete. We really 153 + * should not see timeouts, and could even end up causing 154 + * problem for following indirect access, so let's make a bit 155 + * of WARN noise. 156 + */ 157 + if (WARN_ON(temac_indirect_busywait(lp))) 158 + return -ETIMEDOUT; 159 + /* Value is ready now */ 160 + return temac_ior(lp, XTE_LSW0_OFFSET); 161 + } 162 + 163 + /** 164 + * temac_indirect_out32 - Indirect register write access. This function 165 + * must be called without lp->indirect_lock being held. 125 166 */ 126 167 void temac_indirect_out32(struct temac_local *lp, int reg, u32 value) 127 168 { 128 - if (temac_indirect_busywait(lp)) 169 + unsigned long flags; 170 + 171 + spin_lock_irqsave(lp->indirect_lock, flags); 172 + temac_indirect_out32_locked(lp, reg, value); 173 + spin_unlock_irqrestore(lp->indirect_lock, flags); 174 + } 175 + 176 + /** 177 + * temac_indirect_out32_locked - Indirect register write access. This 178 + * function must be called with lp->indirect_lock being held. Use 179 + * this together with spin_lock_irqsave/spin_lock_irqrestore to avoid 180 + * repeated lock/unlock and to ensure uninterrupted access to indirect 181 + * registers. 182 + */ 183 + void temac_indirect_out32_locked(struct temac_local *lp, int reg, u32 value) 184 + { 185 + /* As in temac_indirect_in32_locked(), we should normally not 186 + * spin here. And if it happens, we actually end up silently 187 + * ignoring the write request. Ouch. 188 + */ 189 + if (WARN_ON(temac_indirect_busywait(lp))) 129 190 return; 191 + /* Initiate write to indirect register */ 130 192 temac_iow(lp, XTE_LSW0_OFFSET, value); 131 193 temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg); 132 - temac_indirect_busywait(lp); 194 + /* As in temac_indirect_in32_locked(), we should not see timeouts 195 + * here. And if it happens, we continue before the write has 196 + * completed. Not good. 197 + */ 198 + WARN_ON(temac_indirect_busywait(lp)); 133 199 } 134 200 135 201 /** ··· 412 344 static void temac_do_set_mac_address(struct net_device *ndev) 413 345 { 414 346 struct temac_local *lp = netdev_priv(ndev); 347 + unsigned long flags; 415 348 416 349 /* set up unicast MAC address filter set its mac address */ 417 - mutex_lock(lp->indirect_mutex); 418 - temac_indirect_out32(lp, XTE_UAW0_OFFSET, 419 - (ndev->dev_addr[0]) | 420 - (ndev->dev_addr[1] << 8) | 421 - (ndev->dev_addr[2] << 16) | 422 - (ndev->dev_addr[3] << 24)); 350 + spin_lock_irqsave(lp->indirect_lock, flags); 351 + temac_indirect_out32_locked(lp, XTE_UAW0_OFFSET, 352 + (ndev->dev_addr[0]) | 353 + (ndev->dev_addr[1] << 8) | 354 + (ndev->dev_addr[2] << 16) | 355 + (ndev->dev_addr[3] << 24)); 423 356 /* There are reserved bits in EUAW1 424 357 * so don't affect them Set MAC bits [47:32] in EUAW1 */ 425 - temac_indirect_out32(lp, XTE_UAW1_OFFSET, 426 - (ndev->dev_addr[4] & 0x000000ff) | 427 - (ndev->dev_addr[5] << 8)); 428 - mutex_unlock(lp->indirect_mutex); 358 + temac_indirect_out32_locked(lp, XTE_UAW1_OFFSET, 359 + (ndev->dev_addr[4] & 0x000000ff) | 360 + (ndev->dev_addr[5] << 8)); 361 + spin_unlock_irqrestore(lp->indirect_lock, flags); 429 362 } 430 363 431 364 static int temac_init_mac_address(struct net_device *ndev, const void *address) ··· 452 383 static void temac_set_multicast_list(struct net_device *ndev) 453 384 { 454 385 struct temac_local *lp = netdev_priv(ndev); 455 - u32 multi_addr_msw, multi_addr_lsw, val; 386 + u32 multi_addr_msw, multi_addr_lsw; 456 387 int i; 388 + unsigned long flags; 389 + bool promisc_mode_disabled = false; 457 390 458 - mutex_lock(lp->indirect_mutex); 459 - if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || 460 - netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) { 391 + if (ndev->flags & (IFF_PROMISC | IFF_ALLMULTI) || 392 + (netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM)) { 461 393 temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK); 462 394 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 463 - } else if (!netdev_mc_empty(ndev)) { 395 + return; 396 + } 397 + 398 + spin_lock_irqsave(lp->indirect_lock, flags); 399 + 400 + if (!netdev_mc_empty(ndev)) { 464 401 struct netdev_hw_addr *ha; 465 402 466 403 i = 0; 467 404 netdev_for_each_mc_addr(ha, ndev) { 468 - if (i >= MULTICAST_CAM_TABLE_NUM) 405 + if (WARN_ON(i >= MULTICAST_CAM_TABLE_NUM)) 469 406 break; 470 407 multi_addr_msw = ((ha->addr[3] << 24) | 471 408 (ha->addr[2] << 16) | 472 409 (ha->addr[1] << 8) | 473 410 (ha->addr[0])); 474 - temac_indirect_out32(lp, XTE_MAW0_OFFSET, 475 - multi_addr_msw); 411 + temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, 412 + multi_addr_msw); 476 413 multi_addr_lsw = ((ha->addr[5] << 8) | 477 414 (ha->addr[4]) | (i << 16)); 478 - temac_indirect_out32(lp, XTE_MAW1_OFFSET, 479 - multi_addr_lsw); 415 + temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, 416 + multi_addr_lsw); 480 417 i++; 481 418 } 482 419 } else { 483 - val = temac_indirect_in32(lp, XTE_AFM_OFFSET); 484 - temac_indirect_out32(lp, XTE_AFM_OFFSET, 485 - val & ~XTE_AFM_EPPRM_MASK); 486 - temac_indirect_out32(lp, XTE_MAW0_OFFSET, 0); 487 - temac_indirect_out32(lp, XTE_MAW1_OFFSET, 0); 488 - dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); 420 + temac_indirect_out32_locked(lp, XTE_MAW0_OFFSET, 0); 421 + temac_indirect_out32_locked(lp, XTE_MAW1_OFFSET, i << 16); 422 + } 489 423 } 490 - mutex_unlock(lp->indirect_mutex); 424 + 425 + /* Enable address filter block if currently disabled */ 426 + if (temac_indirect_in32_locked(lp, XTE_AFM_OFFSET) 427 + & XTE_AFM_EPPRM_MASK) { 428 + temac_indirect_out32_locked(lp, XTE_AFM_OFFSET, 0); 429 + promisc_mode_disabled = true; 430 + } 431 + 432 + spin_unlock_irqrestore(lp->indirect_lock, flags); 433 + 434 + if (promisc_mode_disabled) 435 + dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); 491 436 } 492 437 493 438 static struct temac_option { ··· 592 509 struct temac_local *lp = netdev_priv(ndev); 593 510 struct temac_option *tp = &temac_options[0]; 594 511 int reg; 512 + unsigned long flags; 595 513 596 - mutex_lock(lp->indirect_mutex); 514 + spin_lock_irqsave(lp->indirect_lock, flags); 597 515 while (tp->opt) { 598 - reg = temac_indirect_in32(lp, tp->reg) & ~tp->m_or; 599 - if (options & tp->opt) 516 + reg = temac_indirect_in32_locked(lp, tp->reg) & ~tp->m_or; 517 + if (options & tp->opt) { 600 518 reg |= tp->m_or; 601 - temac_indirect_out32(lp, tp->reg, reg); 519 + temac_indirect_out32_locked(lp, tp->reg, reg); 520 + } 602 521 tp++; 603 522 } 523 + spin_unlock_irqrestore(lp->indirect_lock, flags); 604 524 lp->options |= options; 605 - mutex_unlock(lp->indirect_mutex); 606 525 607 526 return 0; 608 527 } ··· 615 530 struct temac_local *lp = netdev_priv(ndev); 616 531 u32 timeout; 617 532 u32 val; 533 + unsigned long flags; 618 534 619 535 /* Perform a software reset */ 620 536 ··· 624 538 625 539 dev_dbg(&ndev->dev, "%s()\n", __func__); 626 540 627 - mutex_lock(lp->indirect_mutex); 628 541 /* Reset the receiver and wait for it to finish reset */ 629 542 temac_indirect_out32(lp, XTE_RXC1_OFFSET, XTE_RXC1_RXRST_MASK); 630 543 timeout = 1000; ··· 649 564 } 650 565 651 566 /* Disable the receiver */ 652 - val = temac_indirect_in32(lp, XTE_RXC1_OFFSET); 653 - temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK); 567 + spin_lock_irqsave(lp->indirect_lock, flags); 568 + val = temac_indirect_in32_locked(lp, XTE_RXC1_OFFSET); 569 + temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET, 570 + val & ~XTE_RXC1_RXEN_MASK); 571 + spin_unlock_irqrestore(lp->indirect_lock, flags); 654 572 655 573 /* Reset Local Link (DMA) */ 656 574 lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST); ··· 673 585 "temac_device_reset descriptor allocation failed\n"); 674 586 } 675 587 676 - temac_indirect_out32(lp, XTE_RXC0_OFFSET, 0); 677 - temac_indirect_out32(lp, XTE_RXC1_OFFSET, 0); 678 - temac_indirect_out32(lp, XTE_TXC_OFFSET, 0); 679 - temac_indirect_out32(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK); 680 - 681 - mutex_unlock(lp->indirect_mutex); 588 + spin_lock_irqsave(lp->indirect_lock, flags); 589 + temac_indirect_out32_locked(lp, XTE_RXC0_OFFSET, 0); 590 + temac_indirect_out32_locked(lp, XTE_RXC1_OFFSET, 0); 591 + temac_indirect_out32_locked(lp, XTE_TXC_OFFSET, 0); 592 + temac_indirect_out32_locked(lp, XTE_FCC_OFFSET, XTE_FCC_RXFLO_MASK); 593 + spin_unlock_irqrestore(lp->indirect_lock, flags); 682 594 683 595 /* Sync default options with HW 684 596 * but leave receiver and transmitter disabled. */ ··· 702 614 struct phy_device *phy = ndev->phydev; 703 615 u32 mii_speed; 704 616 int link_state; 617 + unsigned long flags; 705 618 706 619 /* hash together the state values to decide if something has changed */ 707 620 link_state = phy->speed | (phy->duplex << 1) | phy->link; 708 621 709 - mutex_lock(lp->indirect_mutex); 710 622 if (lp->last_link != link_state) { 711 - mii_speed = temac_indirect_in32(lp, XTE_EMCFG_OFFSET); 623 + spin_lock_irqsave(lp->indirect_lock, flags); 624 + mii_speed = temac_indirect_in32_locked(lp, XTE_EMCFG_OFFSET); 712 625 mii_speed &= ~XTE_EMCFG_LINKSPD_MASK; 713 626 714 627 switch (phy->speed) { ··· 719 630 } 720 631 721 632 /* Write new speed setting out to TEMAC */ 722 - temac_indirect_out32(lp, XTE_EMCFG_OFFSET, mii_speed); 633 + temac_indirect_out32_locked(lp, XTE_EMCFG_OFFSET, mii_speed); 634 + spin_unlock_irqrestore(lp->indirect_lock, flags); 635 + 723 636 lp->last_link = link_state; 724 637 phy_print_status(phy); 725 638 } 726 - mutex_unlock(lp->indirect_mutex); 727 639 } 728 640 729 641 #ifdef CONFIG_64BIT ··· 1186 1096 1187 1097 /* Setup mutex for synchronization of indirect register access */ 1188 1098 if (pdata) { 1189 - if (!pdata->indirect_mutex) { 1099 + if (!pdata->indirect_lock) { 1190 1100 dev_err(&pdev->dev, 1191 - "indirect_mutex missing in platform_data\n"); 1101 + "indirect_lock missing in platform_data\n"); 1192 1102 return -EINVAL; 1193 1103 } 1194 - lp->indirect_mutex = pdata->indirect_mutex; 1104 + lp->indirect_lock = pdata->indirect_lock; 1195 1105 } else { 1196 - lp->indirect_mutex = devm_kmalloc(&pdev->dev, 1197 - sizeof(*lp->indirect_mutex), 1198 - GFP_KERNEL); 1199 - mutex_init(lp->indirect_mutex); 1106 + lp->indirect_lock = devm_kmalloc(&pdev->dev, 1107 + sizeof(*lp->indirect_lock), 1108 + GFP_KERNEL); 1109 + spin_lock_init(lp->indirect_lock); 1200 1110 } 1201 1111 1202 1112 /* map device registers */
+9 -11
drivers/net/ethernet/xilinx/ll_temac_mdio.c
··· 25 25 { 26 26 struct temac_local *lp = bus->priv; 27 27 u32 rc; 28 + unsigned long flags; 28 29 29 30 /* Write the PHY address to the MIIM Access Initiator register. 30 31 * When the transfer completes, the PHY register value will appear 31 32 * in the LSW0 register */ 32 - mutex_lock(lp->indirect_mutex); 33 + spin_lock_irqsave(lp->indirect_lock, flags); 33 34 temac_iow(lp, XTE_LSW0_OFFSET, (phy_id << 5) | reg); 34 - rc = temac_indirect_in32(lp, XTE_MIIMAI_OFFSET); 35 - mutex_unlock(lp->indirect_mutex); 35 + rc = temac_indirect_in32_locked(lp, XTE_MIIMAI_OFFSET); 36 + spin_unlock_irqrestore(lp->indirect_lock, flags); 36 37 37 38 dev_dbg(lp->dev, "temac_mdio_read(phy_id=%i, reg=%x) == %x\n", 38 39 phy_id, reg, rc); ··· 44 43 static int temac_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) 45 44 { 46 45 struct temac_local *lp = bus->priv; 46 + unsigned long flags; 47 47 48 48 dev_dbg(lp->dev, "temac_mdio_write(phy_id=%i, reg=%x, val=%x)\n", 49 49 phy_id, reg, val); ··· 52 50 /* First write the desired value into the write data register 53 51 * and then write the address into the access initiator register 54 52 */ 55 - mutex_lock(lp->indirect_mutex); 56 - temac_indirect_out32(lp, XTE_MGTDR_OFFSET, val); 57 - temac_indirect_out32(lp, XTE_MIIMAI_OFFSET, (phy_id << 5) | reg); 58 - mutex_unlock(lp->indirect_mutex); 53 + spin_lock_irqsave(lp->indirect_lock, flags); 54 + temac_indirect_out32_locked(lp, XTE_MGTDR_OFFSET, val); 55 + temac_indirect_out32_locked(lp, XTE_MIIMAI_OFFSET, (phy_id << 5) | reg); 56 + spin_unlock_irqrestore(lp->indirect_lock, flags); 59 57 60 58 return 0; 61 59 } ··· 89 87 90 88 /* Enable the MDIO bus by asserting the enable bit and writing 91 89 * in the clock config */ 92 - mutex_lock(lp->indirect_mutex); 93 90 temac_indirect_out32(lp, XTE_MC_OFFSET, 1 << 6 | clk_div); 94 - mutex_unlock(lp->indirect_mutex); 95 91 96 92 bus = devm_mdiobus_alloc(&pdev->dev); 97 93 if (!bus) ··· 116 116 if (rc) 117 117 return rc; 118 118 119 - mutex_lock(lp->indirect_mutex); 120 119 dev_dbg(lp->dev, "MDIO bus registered; MC:%x\n", 121 120 temac_indirect_in32(lp, XTE_MC_OFFSET)); 122 - mutex_unlock(lp->indirect_mutex); 123 121 return 0; 124 122 } 125 123
+2 -1
include/linux/platform_data/xilinx-ll-temac.h
··· 4 4 5 5 #include <linux/if_ether.h> 6 6 #include <linux/phy.h> 7 + #include <linux/spinlock.h> 7 8 8 9 struct ll_temac_platform_data { 9 10 bool txcsum; /* Enable/disable TX checksum */ ··· 22 21 * TEMAC IP block, the same mutex should be passed here, as 23 22 * they share the same DCR bus bridge. 24 23 */ 25 - struct mutex *indirect_mutex; 24 + spinlock_t *indirect_lock; 26 25 /* DMA channel control setup */ 27 26 u8 tx_irq_timeout; /* TX Interrupt Delay Time-out */ 28 27 u8 tx_irq_count; /* TX Interrupt Coalescing Threshold Count */