Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ucc_geth: Add support for skb recycling

We can reclaim transmitted skbs to use in the receive path, so-called
skb recycling support.

Also reorder ucc_geth_poll() steps, so that we'll clean tx ring firstly,
thus maybe reclaim some skbs for rx.

Signed-off-by: Anton Vorontsov <avorontsov@ru.mvista.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Anton Vorontsov and committed by
David S. Miller
50f238fd ef0657c4

+30 -12
+28 -12
drivers/net/ucc_geth.c
··· 209 209 { 210 210 struct sk_buff *skb = NULL; 211 211 212 - skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length + 213 - UCC_GETH_RX_DATA_BUF_ALIGNMENT); 214 - 212 + skb = __skb_dequeue(&ugeth->rx_recycle); 213 + if (!skb) 214 + skb = dev_alloc_skb(ugeth->ug_info->uf_info.max_rx_buf_length + 215 + UCC_GETH_RX_DATA_BUF_ALIGNMENT); 215 216 if (skb == NULL) 216 217 return NULL; 217 218 ··· 1987 1986 iounmap(ugeth->ug_regs); 1988 1987 ugeth->ug_regs = NULL; 1989 1988 } 1989 + 1990 + skb_queue_purge(&ugeth->rx_recycle); 1990 1991 } 1991 1992 1992 1993 static void ucc_geth_set_multi(struct net_device *dev) ··· 2204 2201 ugeth_err("%s: Failed to ioremap regs.", __func__); 2205 2202 return -ENOMEM; 2206 2203 } 2204 + 2205 + skb_queue_head_init(&ugeth->rx_recycle); 2207 2206 2208 2207 return 0; 2209 2208 } ··· 3213 3208 if (netif_msg_rx_err(ugeth)) 3214 3209 ugeth_err("%s, %d: ERROR!!! skb - 0x%08x", 3215 3210 __func__, __LINE__, (u32) skb); 3216 - if (skb) 3217 - dev_kfree_skb_any(skb); 3211 + if (skb) { 3212 + skb->data = skb->head + NET_SKB_PAD; 3213 + __skb_queue_head(&ugeth->rx_recycle, skb); 3214 + } 3218 3215 3219 3216 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL; 3220 3217 dev->stats.rx_dropped++; ··· 3274 3267 3275 3268 /* Normal processing. */ 3276 3269 while ((bd_status & T_R) == 0) { 3270 + struct sk_buff *skb; 3271 + 3277 3272 /* BD contains already transmitted buffer. */ 3278 3273 /* Handle the transmitted buffer and release */ 3279 3274 /* the BD to be used with the current frame */ ··· 3285 3276 3286 3277 dev->stats.tx_packets++; 3287 3278 3288 - /* Free the sk buffer associated with this TxBD */ 3289 - dev_kfree_skb(ugeth-> 3290 - tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]); 3279 + skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]]; 3280 + 3281 + if (skb_queue_len(&ugeth->rx_recycle) < RX_BD_RING_LEN && 3282 + skb_recycle_check(skb, 3283 + ugeth->ug_info->uf_info.max_rx_buf_length + 3284 + UCC_GETH_RX_DATA_BUF_ALIGNMENT)) 3285 + __skb_queue_head(&ugeth->rx_recycle, skb); 3286 + else 3287 + dev_kfree_skb(skb); 3288 + 3291 3289 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL; 3292 3290 ugeth->skb_dirtytx[txQ] = 3293 3291 (ugeth->skb_dirtytx[txQ] + ··· 3323 3307 3324 3308 ug_info = ugeth->ug_info; 3325 3309 3326 - howmany = 0; 3327 - for (i = 0; i < ug_info->numQueuesRx; i++) 3328 - howmany += ucc_geth_rx(ugeth, i, budget - howmany); 3329 - 3330 3310 /* Tx event processing */ 3331 3311 spin_lock(&ugeth->lock); 3332 3312 for (i = 0; i < ug_info->numQueuesTx; i++) 3333 3313 ucc_geth_tx(ugeth->ndev, i); 3334 3314 spin_unlock(&ugeth->lock); 3315 + 3316 + howmany = 0; 3317 + for (i = 0; i < ug_info->numQueuesRx; i++) 3318 + howmany += ucc_geth_rx(ugeth, i, budget - howmany); 3335 3319 3336 3320 if (howmany < budget) { 3337 3321 napi_complete(napi);
+2
drivers/net/ucc_geth.h
··· 1212 1212 /* index of the first skb which hasn't been transmitted yet. */ 1213 1213 u16 skb_dirtytx[NUM_TX_QUEUES]; 1214 1214 1215 + struct sk_buff_head rx_recycle; 1216 + 1215 1217 struct ugeth_mii_info *mii_info; 1216 1218 struct phy_device *phydev; 1217 1219 phy_interface_t phy_interface;