Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: mhi-net: Add re-aggregation of fragmented packets

When device side MTU is larger than host side MTU, the packets
(typically rmnet packets) are split over multiple MHI transfers.
In that case, fragments must be re-aggregated to recover the packet
before forwarding to upper layer.

A fragmented packet result in -EOVERFLOW MHI transaction status for
each of its fragments, except the final one. Such transfer was
previously considered as error and fragments were simply dropped.

This change adds re-aggregation mechanism using skb chaining, via
skb frag_list.

A warning (once) is printed since this behavior usually comes from
a misconfiguration of the device (e.g. modem MTU).

Signed-off-by: Loic Poulain <loic.poulain@linaro.org>
Acked-by: Jesse Brandeburg <jesse.brandeburg@intel.com>
Link: https://lore.kernel.org/r/1612428002-12333-1-git-send-email-loic.poulain@linaro.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Loic Poulain and committed by
Jakub Kicinski
c1fcda2b d698e6a0

+64 -10
+64 -10
drivers/net/mhi_net.c
··· 32 32 struct mhi_net_dev { 33 33 struct mhi_device *mdev; 34 34 struct net_device *ndev; 35 + struct sk_buff *skbagg_head; 36 + struct sk_buff *skbagg_tail; 35 37 struct delayed_work rx_refill; 36 38 struct mhi_net_stats stats; 37 39 u32 rx_queue_sz; ··· 134 132 ndev->tx_queue_len = 1000; 135 133 } 136 134 135 + static struct sk_buff *mhi_net_skb_agg(struct mhi_net_dev *mhi_netdev, 136 + struct sk_buff *skb) 137 + { 138 + struct sk_buff *head = mhi_netdev->skbagg_head; 139 + struct sk_buff *tail = mhi_netdev->skbagg_tail; 140 + 141 + /* This is non-paged skb chaining using frag_list */ 142 + if (!head) { 143 + mhi_netdev->skbagg_head = skb; 144 + return skb; 145 + } 146 + 147 + if (!skb_shinfo(head)->frag_list) 148 + skb_shinfo(head)->frag_list = skb; 149 + else 150 + tail->next = skb; 151 + 152 + head->len += skb->len; 153 + head->data_len += skb->len; 154 + head->truesize += skb->truesize; 155 + 156 + mhi_netdev->skbagg_tail = skb; 157 + 158 + return mhi_netdev->skbagg_head; 159 + } 160 + 137 161 static void mhi_net_dl_callback(struct mhi_device *mhi_dev, 138 162 struct mhi_result *mhi_res) 139 163 { ··· 170 142 free_desc_count = mhi_get_free_desc_count(mhi_dev, DMA_FROM_DEVICE); 171 143 172 144 if (unlikely(mhi_res->transaction_status)) { 173 - dev_kfree_skb_any(skb); 174 - 175 - /* MHI layer stopping/resetting the DL channel */ 176 - if (mhi_res->transaction_status == -ENOTCONN) 145 + switch (mhi_res->transaction_status) { 146 + case -EOVERFLOW: 147 + /* Packet can not fit in one MHI buffer and has been 148 + * split over multiple MHI transfers, do re-aggregation. 149 + * That usually means the device side MTU is larger than 150 + * the host side MTU/MRU. Since this is not optimal, 151 + * print a warning (once). 152 + */ 153 + netdev_warn_once(mhi_netdev->ndev, 154 + "Fragmented packets received, fix MTU?\n"); 155 + skb_put(skb, mhi_res->bytes_xferd); 156 + mhi_net_skb_agg(mhi_netdev, skb); 157 + break; 158 + case -ENOTCONN: 159 + /* MHI layer stopping/resetting the DL channel */ 160 + dev_kfree_skb_any(skb); 177 161 return; 178 - 179 - u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); 180 - u64_stats_inc(&mhi_netdev->stats.rx_errors); 181 - u64_stats_update_end(&mhi_netdev->stats.rx_syncp); 162 + default: 163 + /* Unknown error, simply drop */ 164 + dev_kfree_skb_any(skb); 165 + u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); 166 + u64_stats_inc(&mhi_netdev->stats.rx_errors); 167 + u64_stats_update_end(&mhi_netdev->stats.rx_syncp); 168 + } 182 169 } else { 170 + skb_put(skb, mhi_res->bytes_xferd); 171 + 172 + if (mhi_netdev->skbagg_head) { 173 + /* Aggregate the final fragment */ 174 + skb = mhi_net_skb_agg(mhi_netdev, skb); 175 + mhi_netdev->skbagg_head = NULL; 176 + } 177 + 183 178 u64_stats_update_begin(&mhi_netdev->stats.rx_syncp); 184 179 u64_stats_inc(&mhi_netdev->stats.rx_packets); 185 - u64_stats_add(&mhi_netdev->stats.rx_bytes, mhi_res->bytes_xferd); 180 + u64_stats_add(&mhi_netdev->stats.rx_bytes, skb->len); 186 181 u64_stats_update_end(&mhi_netdev->stats.rx_syncp); 187 182 188 183 switch (skb->data[0] & 0xf0) { ··· 220 169 break; 221 170 } 222 171 223 - skb_put(skb, mhi_res->bytes_xferd); 224 172 netif_rx(skb); 225 173 } 226 174 ··· 317 267 dev_set_drvdata(dev, mhi_netdev); 318 268 mhi_netdev->ndev = ndev; 319 269 mhi_netdev->mdev = mhi_dev; 270 + mhi_netdev->skbagg_head = NULL; 320 271 SET_NETDEV_DEV(ndev, &mhi_dev->dev); 321 272 SET_NETDEV_DEVTYPE(ndev, &wwan_type); 322 273 ··· 351 300 unregister_netdev(mhi_netdev->ndev); 352 301 353 302 mhi_unprepare_from_transfer(mhi_netdev->mdev); 303 + 304 + if (mhi_netdev->skbagg_head) 305 + kfree_skb(mhi_netdev->skbagg_head); 354 306 355 307 free_netdev(mhi_netdev->ndev); 356 308 }