Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: alteon: Convert tasklet API to new bottom half workqueue mechanism

Migrate tasklet APIs to the new bottom half workqueue mechanism. It
replaces all occurrences of tasklet usage with the appropriate workqueue
APIs throughout the alteon driver. This transition ensures compatibility
with the latest design and enhances performance.

Signed-off-by: Allen Pais <allen.lkml@gmail.com>
Link: https://patch.msgid.link/20240730183403.4176544-2-allen.lkml@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Allen Pais and committed by
Jakub Kicinski
20a3bcfe 9bb3ec18

+17 -17
+13 -13
drivers/net/ethernet/alteon/acenic.c
··· 1560 1560 } 1561 1561 1562 1562 1563 - static void ace_tasklet(struct tasklet_struct *t) 1563 + static void ace_bh_work(struct work_struct *work) 1564 1564 { 1565 - struct ace_private *ap = from_tasklet(ap, t, ace_tasklet); 1565 + struct ace_private *ap = from_work(ap, work, ace_bh_work); 1566 1566 struct net_device *dev = ap->ndev; 1567 1567 int cur_size; 1568 1568 ··· 1595 1595 #endif 1596 1596 ace_load_jumbo_rx_ring(dev, RX_JUMBO_SIZE - cur_size); 1597 1597 } 1598 - ap->tasklet_pending = 0; 1598 + ap->bh_work_pending = 0; 1599 1599 } 1600 1600 1601 1601 ··· 1617 1617 * 1618 1618 * Loading rings is safe without holding the spin lock since this is 1619 1619 * done only before the device is enabled, thus no interrupts are 1620 - * generated and by the interrupt handler/tasklet handler. 1620 + * generated and by the interrupt handler/bh handler. 1621 1621 */ 1622 1622 static void ace_load_std_rx_ring(struct net_device *dev, int nr_bufs) 1623 1623 { ··· 2160 2160 */ 2161 2161 if (netif_running(dev)) { 2162 2162 int cur_size; 2163 - int run_tasklet = 0; 2163 + int run_bh_work = 0; 2164 2164 2165 2165 cur_size = atomic_read(&ap->cur_rx_bufs); 2166 2166 if (cur_size < RX_LOW_STD_THRES) { ··· 2172 2172 ace_load_std_rx_ring(dev, 2173 2173 RX_RING_SIZE - cur_size); 2174 2174 } else 2175 - run_tasklet = 1; 2175 + run_bh_work = 1; 2176 2176 } 2177 2177 2178 2178 if (!ACE_IS_TIGON_I(ap)) { ··· 2188 2188 ace_load_mini_rx_ring(dev, 2189 2189 RX_MINI_SIZE - cur_size); 2190 2190 } else 2191 - run_tasklet = 1; 2191 + run_bh_work = 1; 2192 2192 } 2193 2193 } 2194 2194 ··· 2205 2205 ace_load_jumbo_rx_ring(dev, 2206 2206 RX_JUMBO_SIZE - cur_size); 2207 2207 } else 2208 - run_tasklet = 1; 2208 + run_bh_work = 1; 2209 2209 } 2210 2210 } 2211 - if (run_tasklet && !ap->tasklet_pending) { 2212 - ap->tasklet_pending = 1; 2213 - tasklet_schedule(&ap->ace_tasklet); 2211 + if (run_bh_work && !ap->bh_work_pending) { 2212 + ap->bh_work_pending = 1; 2213 + queue_work(system_bh_wq, &ap->ace_bh_work); 2214 2214 } 2215 2215 } 2216 2216 ··· 2267 2267 /* 2268 2268 * Setup the bottom half rx ring refill handler 2269 2269 */ 2270 - tasklet_setup(&ap->ace_tasklet, ace_tasklet); 2270 + INIT_WORK(&ap->ace_bh_work, ace_bh_work); 2271 2271 return 0; 2272 2272 } 2273 2273 ··· 2301 2301 cmd.idx = 0; 2302 2302 ace_issue_cmd(regs, &cmd); 2303 2303 2304 - tasklet_kill(&ap->ace_tasklet); 2304 + cancel_work_sync(&ap->ace_bh_work); 2305 2305 2306 2306 /* 2307 2307 * Make sure one CPU is not processing packets while
+4 -4
drivers/net/ethernet/alteon/acenic.h
··· 2 2 #ifndef _ACENIC_H_ 3 3 #define _ACENIC_H_ 4 4 #include <linux/interrupt.h> 5 - 5 + #include <linux/workqueue.h> 6 6 7 7 /* 8 8 * Generate TX index update each time, when TX ring is closed. ··· 667 667 struct rx_desc *rx_mini_ring; 668 668 struct rx_desc *rx_return_ring; 669 669 670 - int tasklet_pending, jumbo; 671 - struct tasklet_struct ace_tasklet; 670 + int bh_work_pending, jumbo; 671 + struct work_struct ace_bh_work; 672 672 673 673 struct event *evt_ring; 674 674 ··· 776 776 static netdev_tx_t ace_start_xmit(struct sk_buff *skb, 777 777 struct net_device *dev); 778 778 static int ace_close(struct net_device *dev); 779 - static void ace_tasklet(struct tasklet_struct *t); 779 + static void ace_bh_work(struct work_struct *work); 780 780 static void ace_dump_trace(struct ace_private *ap); 781 781 static void ace_set_multicast_list(struct net_device *dev); 782 782 static int ace_change_mtu(struct net_device *dev, int new_mtu);