Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

caif-hsi: robust frame aggregation for HSI

Implement aggregation algorithm, combining more data into a single
HSI transfer. 4 different traffic categories are supported:
1. TC_PRIO_CONTROL .. TC_PRIO_MAX (CTL)
2. TC_PRIO_INTERACTIVE (VO)
3. TC_PRIO_INTERACTIVE_BULK (VI)
4. TC_PRIO_BESTEFFORT, TC_PRIO_BULK, TC_PRIO_FILLER (BEBK)

Signed-off-by: Dmitry Tarnyagin <dmitry.tarnyagin@stericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Dmitry Tarnyagin and committed by
David S. Miller
ece367d5 44764812

+205 -57
+188 -55
drivers/net/caif/caif_hsi.c
··· 19 19 #include <linux/if_arp.h> 20 20 #include <linux/timer.h> 21 21 #include <linux/rtnetlink.h> 22 + #include <linux/pkt_sched.h> 22 23 #include <net/caif/caif_layer.h> 23 24 #include <net/caif/caif_hsi.h> 24 25 ··· 34 33 static int inactivity_timeout = 1000; 35 34 module_param(inactivity_timeout, int, S_IRUGO | S_IWUSR); 36 35 MODULE_PARM_DESC(inactivity_timeout, "Inactivity timeout on HSI, ms."); 36 + 37 + static int aggregation_timeout = 1; 38 + module_param(aggregation_timeout, int, S_IRUGO | S_IWUSR); 39 + MODULE_PARM_DESC(aggregation_timeout, "Aggregation timeout on HSI, ms."); 37 40 38 41 /* 39 42 * HSI padding options. ··· 91 86 queue_work(cfhsi->wq, &cfhsi->wake_down_work); 92 87 } 93 88 89 + static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi, 90 + const struct sk_buff *skb, 91 + int direction) 92 + { 93 + struct caif_payload_info *info; 94 + int hpad, tpad, len; 95 + 96 + info = (struct caif_payload_info *)&skb->cb; 97 + hpad = 1 + PAD_POW2((info->hdr_len + 1), hsi_head_align); 98 + tpad = PAD_POW2((skb->len + hpad), hsi_tail_align); 99 + len = skb->len + hpad + tpad; 100 + 101 + if (direction > 0) 102 + cfhsi->aggregation_len += len; 103 + else if (direction < 0) 104 + cfhsi->aggregation_len -= len; 105 + } 106 + 107 + static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi) 108 + { 109 + int i; 110 + 111 + if (cfhsi->aggregation_timeout < 0) 112 + return true; 113 + 114 + for (i = 0; i < CFHSI_PRIO_BEBK; ++i) { 115 + if (cfhsi->qhead[i].qlen) 116 + return true; 117 + } 118 + 119 + /* TODO: Use aggregation_len instead */ 120 + if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS) 121 + return true; 122 + 123 + return false; 124 + } 125 + 126 + static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi) 127 + { 128 + struct sk_buff *skb; 129 + int i; 130 + 131 + for (i = 0; i < CFHSI_PRIO_LAST; ++i) { 132 + skb = skb_dequeue(&cfhsi->qhead[i]); 133 + if (skb) 134 + break; 135 + } 136 + 137 + return skb; 138 + } 139 + 140 + static int cfhsi_tx_queue_len(struct cfhsi *cfhsi) 141 + { 142 + int i, len = 0; 143 + for (i = 0; i < CFHSI_PRIO_LAST; ++i) 144 + len += skb_queue_len(&cfhsi->qhead[i]); 145 + return len; 146 + } 147 + 94 148 static void cfhsi_abort_tx(struct cfhsi *cfhsi) 95 149 { 96 150 struct sk_buff *skb; 97 151 98 152 for (;;) { 99 153 spin_lock_bh(&cfhsi->lock); 100 - skb = skb_dequeue(&cfhsi->qhead); 154 + skb = cfhsi_dequeue(cfhsi); 101 155 if (!skb) 102 156 break; 103 157 104 158 cfhsi->ndev->stats.tx_errors++; 105 159 cfhsi->ndev->stats.tx_dropped++; 160 + cfhsi_update_aggregation_stats(cfhsi, skb, -1); 106 161 spin_unlock_bh(&cfhsi->lock); 107 162 kfree_skb(skb); 108 163 } 109 164 cfhsi->tx_state = CFHSI_TX_STATE_IDLE; 110 165 if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 111 - mod_timer(&cfhsi->timer, 166 + mod_timer(&cfhsi->inactivity_timer, 112 167 jiffies + cfhsi->inactivity_timeout); 113 168 spin_unlock_bh(&cfhsi->lock); 114 169 } ··· 234 169 struct sk_buff *skb; 235 170 u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ; 236 171 237 - skb = skb_dequeue(&cfhsi->qhead); 172 + skb = cfhsi_dequeue(cfhsi); 238 173 if (!skb) 239 174 return 0; 240 175 ··· 261 196 pemb += hpad; 262 197 263 198 /* Update network statistics. */ 199 + spin_lock_bh(&cfhsi->lock); 264 200 cfhsi->ndev->stats.tx_packets++; 265 201 cfhsi->ndev->stats.tx_bytes += skb->len; 202 + cfhsi_update_aggregation_stats(cfhsi, skb, -1); 203 + spin_unlock_bh(&cfhsi->lock); 266 204 267 205 /* Copy in embedded CAIF frame. */ 268 206 skb_copy_bits(skb, 0, pemb, skb->len); 207 + 208 + /* Consume the SKB */ 269 209 consume_skb(skb); 270 210 skb = NULL; 271 211 } ··· 284 214 int tpad = 0; 285 215 286 216 if (!skb) 287 - skb = skb_dequeue(&cfhsi->qhead); 217 + skb = cfhsi_dequeue(cfhsi); 288 218 289 219 if (!skb) 290 220 break; ··· 303 233 pfrm += hpad; 304 234 305 235 /* Update network statistics. */ 236 + spin_lock_bh(&cfhsi->lock); 306 237 cfhsi->ndev->stats.tx_packets++; 307 238 cfhsi->ndev->stats.tx_bytes += skb->len; 239 + cfhsi_update_aggregation_stats(cfhsi, skb, -1); 240 + spin_unlock_bh(&cfhsi->lock); 308 241 309 242 /* Copy in CAIF frame. */ 310 243 skb_copy_bits(skb, 0, pfrm, skb->len); ··· 317 244 318 245 /* Update frame pointer. */ 319 246 pfrm += skb->len + tpad; 247 + 248 + /* Consume the SKB */ 320 249 consume_skb(skb); 321 250 skb = NULL; 322 251 ··· 333 258 } 334 259 335 260 /* Check if we can piggy-back another descriptor. */ 336 - skb = skb_peek(&cfhsi->qhead); 337 - if (skb) 261 + if (cfhsi_can_send_aggregate(cfhsi)) 338 262 desc->header |= CFHSI_PIGGY_DESC; 339 263 else 340 264 desc->header &= ~CFHSI_PIGGY_DESC; ··· 341 267 return CFHSI_DESC_SZ + pld_len; 342 268 } 343 269 344 - static void cfhsi_tx_done(struct cfhsi *cfhsi) 270 + static void cfhsi_start_tx(struct cfhsi *cfhsi) 345 271 { 346 - struct cfhsi_desc *desc = NULL; 347 - int len = 0; 348 - int res; 272 + struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf; 273 + int len, res; 349 274 350 275 dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); 351 276 352 277 if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 353 278 return; 354 279 355 - desc = (struct cfhsi_desc *)cfhsi->tx_buf; 356 - 357 280 do { 358 - /* 359 - * Send flow on if flow off has been previously signalled 360 - * and number of packets is below low water mark. 361 - */ 362 - spin_lock_bh(&cfhsi->lock); 363 - if (cfhsi->flow_off_sent && 364 - cfhsi->qhead.qlen <= cfhsi->q_low_mark && 365 - cfhsi->cfdev.flowctrl) { 366 - 367 - cfhsi->flow_off_sent = 0; 368 - cfhsi->cfdev.flowctrl(cfhsi->ndev, ON); 369 - } 370 - spin_unlock_bh(&cfhsi->lock); 371 - 372 281 /* Create HSI frame. */ 373 - do { 374 - len = cfhsi_tx_frm(desc, cfhsi); 375 - if (!len) { 376 - spin_lock_bh(&cfhsi->lock); 377 - if (unlikely(skb_peek(&cfhsi->qhead))) { 378 - spin_unlock_bh(&cfhsi->lock); 379 - continue; 380 - } 381 - cfhsi->tx_state = CFHSI_TX_STATE_IDLE; 382 - /* Start inactivity timer. */ 383 - mod_timer(&cfhsi->timer, 384 - jiffies + cfhsi->inactivity_timeout); 282 + len = cfhsi_tx_frm(desc, cfhsi); 283 + if (!len) { 284 + spin_lock_bh(&cfhsi->lock); 285 + if (unlikely(cfhsi_tx_queue_len(cfhsi))) { 385 286 spin_unlock_bh(&cfhsi->lock); 386 - goto done; 287 + res = -EAGAIN; 288 + continue; 387 289 } 388 - } while (!len); 290 + cfhsi->tx_state = CFHSI_TX_STATE_IDLE; 291 + /* Start inactivity timer. */ 292 + mod_timer(&cfhsi->inactivity_timer, 293 + jiffies + cfhsi->inactivity_timeout); 294 + spin_unlock_bh(&cfhsi->lock); 295 + break; 296 + } 389 297 390 298 /* Set up new transfer. */ 391 299 res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev); 392 - if (WARN_ON(res < 0)) { 300 + if (WARN_ON(res < 0)) 393 301 dev_err(&cfhsi->ndev->dev, "%s: TX error %d.\n", 394 302 __func__, res); 395 - } 396 303 } while (res < 0); 304 + } 397 305 398 - done: 306 + static void cfhsi_tx_done(struct cfhsi *cfhsi) 307 + { 308 + dev_dbg(&cfhsi->ndev->dev, "%s.\n", __func__); 309 + 310 + if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) 311 + return; 312 + 313 + /* 314 + * Send flow on if flow off has been previously signalled 315 + * and number of packets is below low water mark. 316 + */ 317 + spin_lock_bh(&cfhsi->lock); 318 + if (cfhsi->flow_off_sent && 319 + cfhsi_tx_queue_len(cfhsi) <= cfhsi->q_low_mark && 320 + cfhsi->cfdev.flowctrl) { 321 + 322 + cfhsi->flow_off_sent = 0; 323 + cfhsi->cfdev.flowctrl(cfhsi->ndev, ON); 324 + } 325 + 326 + if (cfhsi_can_send_aggregate(cfhsi)) { 327 + spin_unlock_bh(&cfhsi->lock); 328 + cfhsi_start_tx(cfhsi); 329 + } else { 330 + mod_timer(&cfhsi->aggregation_timer, 331 + jiffies + cfhsi->aggregation_timeout); 332 + spin_unlock_bh(&cfhsi->lock); 333 + } 334 + 399 335 return; 400 336 } 401 337 ··· 644 560 645 561 /* Update inactivity timer if pending. */ 646 562 spin_lock_bh(&cfhsi->lock); 647 - mod_timer_pending(&cfhsi->timer, 563 + mod_timer_pending(&cfhsi->inactivity_timer, 648 564 jiffies + cfhsi->inactivity_timeout); 649 565 spin_unlock_bh(&cfhsi->lock); 650 566 ··· 877 793 878 794 spin_lock_bh(&cfhsi->lock); 879 795 880 - /* Resume transmit if queue is not empty. */ 881 - if (!skb_peek(&cfhsi->qhead)) { 796 + /* Resume transmit if queues are not empty. */ 797 + if (!cfhsi_tx_queue_len(cfhsi)) { 882 798 dev_dbg(&cfhsi->ndev->dev, "%s: Peer wake, start timer.\n", 883 799 __func__); 884 800 /* Start inactivity timer. */ 885 - mod_timer(&cfhsi->timer, 801 + mod_timer(&cfhsi->inactivity_timer, 886 802 jiffies + cfhsi->inactivity_timeout); 887 803 spin_unlock_bh(&cfhsi->lock); 888 804 return; ··· 1018 934 wake_up_interruptible(&cfhsi->wake_down_wait); 1019 935 } 1020 936 937 + static void cfhsi_aggregation_tout(unsigned long arg) 938 + { 939 + struct cfhsi *cfhsi = (struct cfhsi *)arg; 940 + 941 + dev_dbg(&cfhsi->ndev->dev, "%s.\n", 942 + __func__); 943 + 944 + cfhsi_start_tx(cfhsi); 945 + } 946 + 1021 947 static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) 1022 948 { 1023 949 struct cfhsi *cfhsi = NULL; 1024 950 int start_xfer = 0; 1025 951 int timer_active; 952 + int prio; 1026 953 1027 954 if (!dev) 1028 955 return -EINVAL; 1029 956 1030 957 cfhsi = netdev_priv(dev); 1031 958 959 + switch (skb->priority) { 960 + case TC_PRIO_BESTEFFORT: 961 + case TC_PRIO_FILLER: 962 + case TC_PRIO_BULK: 963 + prio = CFHSI_PRIO_BEBK; 964 + break; 965 + case TC_PRIO_INTERACTIVE_BULK: 966 + prio = CFHSI_PRIO_VI; 967 + break; 968 + case TC_PRIO_INTERACTIVE: 969 + prio = CFHSI_PRIO_VO; 970 + break; 971 + case TC_PRIO_CONTROL: 972 + default: 973 + prio = CFHSI_PRIO_CTL; 974 + break; 975 + } 976 + 1032 977 spin_lock_bh(&cfhsi->lock); 1033 978 1034 - skb_queue_tail(&cfhsi->qhead, skb); 979 + /* Update aggregation statistics */ 980 + cfhsi_update_aggregation_stats(cfhsi, skb, 1); 981 + 982 + /* Queue the SKB */ 983 + skb_queue_tail(&cfhsi->qhead[prio], skb); 1035 984 1036 985 /* Sanity check; xmit should not be called after unregister_netdev */ 1037 986 if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) { ··· 1075 958 1076 959 /* Send flow off if number of packets is above high water mark. */ 1077 960 if (!cfhsi->flow_off_sent && 1078 - cfhsi->qhead.qlen > cfhsi->q_high_mark && 961 + cfhsi_tx_queue_len(cfhsi) > cfhsi->q_high_mark && 1079 962 cfhsi->cfdev.flowctrl) { 1080 963 cfhsi->flow_off_sent = 1; 1081 964 cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF); ··· 1087 970 } 1088 971 1089 972 if (!start_xfer) { 973 + /* Send aggregate if it is possible */ 974 + bool aggregate_ready = 975 + cfhsi_can_send_aggregate(cfhsi) && 976 + del_timer(&cfhsi->aggregation_timer) > 0; 1090 977 spin_unlock_bh(&cfhsi->lock); 978 + if (aggregate_ready) 979 + cfhsi_start_tx(cfhsi); 1091 980 return 0; 1092 981 } 1093 982 1094 983 /* Delete inactivity timer if started. */ 1095 - timer_active = del_timer_sync(&cfhsi->timer); 984 + timer_active = del_timer_sync(&cfhsi->inactivity_timer); 1096 985 1097 986 spin_unlock_bh(&cfhsi->lock); 1098 987 ··· 1149 1026 1150 1027 static void cfhsi_setup(struct net_device *dev) 1151 1028 { 1029 + int i; 1152 1030 struct cfhsi *cfhsi = netdev_priv(dev); 1153 1031 dev->features = 0; 1154 1032 dev->netdev_ops = &cfhsi_ops; ··· 1158 1034 dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ; 1159 1035 dev->tx_queue_len = 0; 1160 1036 dev->destructor = free_netdev; 1161 - skb_queue_head_init(&cfhsi->qhead); 1037 + for (i = 0; i < CFHSI_PRIO_LAST; ++i) 1038 + skb_queue_head_init(&cfhsi->qhead[i]); 1162 1039 cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; 1163 1040 cfhsi->cfdev.use_frag = false; 1164 1041 cfhsi->cfdev.use_stx = false; ··· 1236 1111 cfhsi->inactivity_timeout = NEXT_TIMER_MAX_DELTA; 1237 1112 } 1238 1113 1114 + /* Initialize aggregation timeout */ 1115 + cfhsi->aggregation_timeout = aggregation_timeout; 1116 + 1239 1117 /* Initialize recieve vaiables. */ 1240 1118 cfhsi->rx_ptr = cfhsi->rx_buf; 1241 1119 cfhsi->rx_len = CFHSI_DESC_SZ; ··· 1278 1150 init_waitqueue_head(&cfhsi->flush_fifo_wait); 1279 1151 1280 1152 /* Setup the inactivity timer. */ 1281 - init_timer(&cfhsi->timer); 1282 - cfhsi->timer.data = (unsigned long)cfhsi; 1283 - cfhsi->timer.function = cfhsi_inactivity_tout; 1153 + init_timer(&cfhsi->inactivity_timer); 1154 + cfhsi->inactivity_timer.data = (unsigned long)cfhsi; 1155 + cfhsi->inactivity_timer.function = cfhsi_inactivity_tout; 1284 1156 /* Setup the slowpath RX timer. */ 1285 1157 init_timer(&cfhsi->rx_slowpath_timer); 1286 1158 cfhsi->rx_slowpath_timer.data = (unsigned long)cfhsi; 1287 1159 cfhsi->rx_slowpath_timer.function = cfhsi_rx_slowpath; 1160 + /* Setup the aggregation timer. */ 1161 + init_timer(&cfhsi->aggregation_timer); 1162 + cfhsi->aggregation_timer.data = (unsigned long)cfhsi; 1163 + cfhsi->aggregation_timer.function = cfhsi_aggregation_tout; 1288 1164 1289 1165 /* Add CAIF HSI device to list. */ 1290 1166 spin_lock(&cfhsi_list_lock); ··· 1354 1222 flush_workqueue(cfhsi->wq); 1355 1223 1356 1224 /* Delete timers if pending */ 1357 - del_timer_sync(&cfhsi->timer); 1225 + del_timer_sync(&cfhsi->inactivity_timer); 1358 1226 del_timer_sync(&cfhsi->rx_slowpath_timer); 1227 + del_timer_sync(&cfhsi->aggregation_timer); 1359 1228 1360 1229 /* Cancel pending RX request (if any) */ 1361 1230 cfhsi->dev->cfhsi_rx_cancel(cfhsi->dev);
+17 -2
include/net/caif/caif_hsi.h
··· 123 123 bool piggy_desc; 124 124 }; 125 125 126 + /* Priority mapping */ 127 + enum { 128 + CFHSI_PRIO_CTL = 0, 129 + CFHSI_PRIO_VI, 130 + CFHSI_PRIO_VO, 131 + CFHSI_PRIO_BEBK, 132 + CFHSI_PRIO_LAST, 133 + }; 134 + 126 135 /* Structure implemented by CAIF HSI drivers. */ 127 136 struct cfhsi { 128 137 struct caif_dev_common cfdev; 129 138 struct net_device *ndev; 130 139 struct platform_device *pdev; 131 - struct sk_buff_head qhead; 140 + struct sk_buff_head qhead[CFHSI_PRIO_LAST]; 132 141 struct cfhsi_drv drv; 133 142 struct cfhsi_dev *dev; 134 143 int tx_state; ··· 160 151 wait_queue_head_t wake_up_wait; 161 152 wait_queue_head_t wake_down_wait; 162 153 wait_queue_head_t flush_fifo_wait; 163 - struct timer_list timer; 154 + struct timer_list inactivity_timer; 164 155 struct timer_list rx_slowpath_timer; 156 + 157 + /* TX aggregation */ 158 + unsigned long aggregation_timeout; 159 + int aggregation_len; 160 + struct timer_list aggregation_timer; 161 + 165 162 unsigned long bits; 166 163 }; 167 164