Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net_sched: Add accessor function for packet length for qdiscs

Signed-off-by: Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Jussi Kivilinna and committed by
David S. Miller
0abf77e5 5f86173b

+69 -67
+11 -6
include/net/sch_generic.h
··· 306 306 return true; 307 307 } 308 308 309 + static inline unsigned int qdisc_pkt_len(struct sk_buff *skb) 310 + { 311 + return skb->len; 312 + } 313 + 309 314 static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch) 310 315 { 311 316 return sch->enqueue(skb, sch); ··· 325 320 struct sk_buff_head *list) 326 321 { 327 322 __skb_queue_tail(list, skb); 328 - sch->qstats.backlog += skb->len; 329 - sch->bstats.bytes += skb->len; 323 + sch->qstats.backlog += qdisc_pkt_len(skb); 324 + sch->bstats.bytes += qdisc_pkt_len(skb); 330 325 sch->bstats.packets++; 331 326 332 327 return NET_XMIT_SUCCESS; ··· 343 338 struct sk_buff *skb = __skb_dequeue(list); 344 339 345 340 if (likely(skb != NULL)) 346 - sch->qstats.backlog -= skb->len; 341 + sch->qstats.backlog -= qdisc_pkt_len(skb); 347 342 348 343 return skb; 349 344 } ··· 359 354 struct sk_buff *skb = __skb_dequeue_tail(list); 360 355 361 356 if (likely(skb != NULL)) 362 - sch->qstats.backlog -= skb->len; 357 + sch->qstats.backlog -= qdisc_pkt_len(skb); 363 358 364 359 return skb; 365 360 } ··· 373 368 struct sk_buff_head *list) 374 369 { 375 370 __skb_queue_head(list, skb); 376 - sch->qstats.backlog += skb->len; 371 + sch->qstats.backlog += qdisc_pkt_len(skb); 377 372 sch->qstats.requeues++; 378 373 379 374 return NET_XMIT_SUCCESS; ··· 406 401 struct sk_buff *skb = __qdisc_dequeue_tail(sch, list); 407 402 408 403 if (likely(skb != NULL)) { 409 - unsigned int len = skb->len; 404 + unsigned int len = qdisc_pkt_len(skb); 410 405 kfree_skb(skb); 411 406 return len; 412 407 }
+1 -1
net/sched/act_gact.c
··· 139 139 #else 140 140 action = gact->tcf_action; 141 141 #endif 142 - gact->tcf_bstats.bytes += skb->len; 142 + gact->tcf_bstats.bytes += qdisc_pkt_len(skb); 143 143 gact->tcf_bstats.packets++; 144 144 if (action == TC_ACT_SHOT) 145 145 gact->tcf_qstats.drops++;
+1 -1
net/sched/act_ipt.c
··· 205 205 spin_lock(&ipt->tcf_lock); 206 206 207 207 ipt->tcf_tm.lastuse = jiffies; 208 - ipt->tcf_bstats.bytes += skb->len; 208 + ipt->tcf_bstats.bytes += qdisc_pkt_len(skb); 209 209 ipt->tcf_bstats.packets++; 210 210 211 211 /* yes, we have to worry about both in and out dev
+2 -2
net/sched/act_mirred.c
··· 164 164 if (skb2 != NULL) 165 165 kfree_skb(skb2); 166 166 m->tcf_qstats.overlimits++; 167 - m->tcf_bstats.bytes += skb->len; 167 + m->tcf_bstats.bytes += qdisc_pkt_len(skb); 168 168 m->tcf_bstats.packets++; 169 169 spin_unlock(&m->tcf_lock); 170 170 /* should we be asking for packet to be dropped? ··· 184 184 goto bad_mirred; 185 185 } 186 186 187 - m->tcf_bstats.bytes += skb2->len; 187 + m->tcf_bstats.bytes += qdisc_pkt_len(skb2); 188 188 m->tcf_bstats.packets++; 189 189 if (!(at & AT_EGRESS)) 190 190 if (m->tcfm_ok_push)
+1 -1
net/sched/act_nat.c
··· 124 124 egress = p->flags & TCA_NAT_FLAG_EGRESS; 125 125 action = p->tcf_action; 126 126 127 - p->tcf_bstats.bytes += skb->len; 127 + p->tcf_bstats.bytes += qdisc_pkt_len(skb); 128 128 p->tcf_bstats.packets++; 129 129 130 130 spin_unlock(&p->tcf_lock);
+1 -1
net/sched/act_pedit.c
··· 182 182 bad: 183 183 p->tcf_qstats.overlimits++; 184 184 done: 185 - p->tcf_bstats.bytes += skb->len; 185 + p->tcf_bstats.bytes += qdisc_pkt_len(skb); 186 186 p->tcf_bstats.packets++; 187 187 spin_unlock(&p->tcf_lock); 188 188 return p->tcf_action;
+4 -4
net/sched/act_police.c
··· 272 272 273 273 spin_lock(&police->tcf_lock); 274 274 275 - police->tcf_bstats.bytes += skb->len; 275 + police->tcf_bstats.bytes += qdisc_pkt_len(skb); 276 276 police->tcf_bstats.packets++; 277 277 278 278 if (police->tcfp_ewma_rate && ··· 282 282 return police->tcf_action; 283 283 } 284 284 285 - if (skb->len <= police->tcfp_mtu) { 285 + if (qdisc_pkt_len(skb) <= police->tcfp_mtu) { 286 286 if (police->tcfp_R_tab == NULL) { 287 287 spin_unlock(&police->tcf_lock); 288 288 return police->tcfp_result; ··· 295 295 ptoks = toks + police->tcfp_ptoks; 296 296 if (ptoks > (long)L2T_P(police, police->tcfp_mtu)) 297 297 ptoks = (long)L2T_P(police, police->tcfp_mtu); 298 - ptoks -= L2T_P(police, skb->len); 298 + ptoks -= L2T_P(police, qdisc_pkt_len(skb)); 299 299 } 300 300 toks += police->tcfp_toks; 301 301 if (toks > (long)police->tcfp_burst) 302 302 toks = police->tcfp_burst; 303 - toks -= L2T(police, skb->len); 303 + toks -= L2T(police, qdisc_pkt_len(skb)); 304 304 if ((toks|ptoks) >= 0) { 305 305 police->tcfp_t_c = now; 306 306 police->tcfp_toks = toks;
+1 -1
net/sched/act_simple.c
··· 41 41 42 42 spin_lock(&d->tcf_lock); 43 43 d->tcf_tm.lastuse = jiffies; 44 - d->tcf_bstats.bytes += skb->len; 44 + d->tcf_bstats.bytes += qdisc_pkt_len(skb); 45 45 d->tcf_bstats.packets++; 46 46 47 47 /* print policy string followed by _ then packet count
+2 -2
net/sched/sch_atm.c
··· 437 437 flow->qstats.drops++; 438 438 return ret; 439 439 } 440 - sch->bstats.bytes += skb->len; 440 + sch->bstats.bytes += qdisc_pkt_len(skb); 441 441 sch->bstats.packets++; 442 - flow->bstats.bytes += skb->len; 442 + flow->bstats.bytes += qdisc_pkt_len(skb); 443 443 flow->bstats.packets++; 444 444 /* 445 445 * Okay, this may seem weird. We pretend we've dropped the packet if
+6 -8
net/sched/sch_cbq.c
··· 370 370 cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) 371 371 { 372 372 struct cbq_sched_data *q = qdisc_priv(sch); 373 - int len = skb->len; 374 373 int uninitialized_var(ret); 375 374 struct cbq_class *cl = cbq_classify(skb, sch, &ret); 376 375 ··· 390 391 if (ret == NET_XMIT_SUCCESS) { 391 392 sch->q.qlen++; 392 393 sch->bstats.packets++; 393 - sch->bstats.bytes+=len; 394 + sch->bstats.bytes += qdisc_pkt_len(skb); 394 395 cbq_mark_toplevel(q, cl); 395 396 if (!cl->next_alive) 396 397 cbq_activate_class(cl); ··· 657 658 #ifdef CONFIG_NET_CLS_ACT 658 659 static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) 659 660 { 660 - int len = skb->len; 661 661 struct Qdisc *sch = child->__parent; 662 662 struct cbq_sched_data *q = qdisc_priv(sch); 663 663 struct cbq_class *cl = q->rx_class; ··· 673 675 if (qdisc_enqueue(skb, cl->q) == 0) { 674 676 sch->q.qlen++; 675 677 sch->bstats.packets++; 676 - sch->bstats.bytes+=len; 678 + sch->bstats.bytes += qdisc_pkt_len(skb); 677 679 if (!cl->next_alive) 678 680 cbq_activate_class(cl); 679 681 return 0; ··· 879 881 if (skb == NULL) 880 882 goto skip_class; 881 883 882 - cl->deficit -= skb->len; 884 + cl->deficit -= qdisc_pkt_len(skb); 883 885 q->tx_class = cl; 884 886 q->tx_borrowed = borrow; 885 887 if (borrow != cl) { ··· 887 889 borrow->xstats.borrows++; 888 890 cl->xstats.borrows++; 889 891 #else 890 - borrow->xstats.borrows += skb->len; 891 - cl->xstats.borrows += skb->len; 892 + borrow->xstats.borrows += qdisc_pkt_len(skb); 893 + cl->xstats.borrows += qdisc_pkt_len(skb); 892 894 #endif 893 895 } 894 - q->tx_len = skb->len; 896 + q->tx_len = qdisc_pkt_len(skb); 895 897 896 898 if (cl->deficit <= 0) { 897 899 q->active[prio] = cl;
+1 -1
net/sched/sch_dsmark.c
··· 258 258 return err; 259 259 } 260 260 261 - sch->bstats.bytes += skb->len; 261 + sch->bstats.bytes += qdisc_pkt_len(skb); 262 262 sch->bstats.packets++; 263 263 sch->q.qlen++; 264 264
+1 -1
net/sched/sch_fifo.c
··· 27 27 { 28 28 struct fifo_sched_data *q = qdisc_priv(sch); 29 29 30 - if (likely(sch->qstats.backlog + skb->len <= q->limit)) 30 + if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= q->limit)) 31 31 return qdisc_enqueue_tail(skb, sch); 32 32 33 33 return qdisc_reshape_fail(skb, sch);
+6 -6
net/sched/sch_gred.c
··· 188 188 } 189 189 190 190 q->packetsin++; 191 - q->bytesin += skb->len; 191 + q->bytesin += qdisc_pkt_len(skb); 192 192 193 193 if (gred_wred_mode(t)) 194 194 gred_load_wred_set(t, q); ··· 226 226 break; 227 227 } 228 228 229 - if (q->backlog + skb->len <= q->limit) { 230 - q->backlog += skb->len; 229 + if (q->backlog + qdisc_pkt_len(skb) <= q->limit) { 230 + q->backlog += qdisc_pkt_len(skb); 231 231 return qdisc_enqueue_tail(skb, sch); 232 232 } 233 233 ··· 254 254 } else { 255 255 if (red_is_idling(&q->parms)) 256 256 red_end_of_idle_period(&q->parms); 257 - q->backlog += skb->len; 257 + q->backlog += qdisc_pkt_len(skb); 258 258 } 259 259 260 260 return qdisc_requeue(skb, sch); ··· 277 277 "VQ 0x%x after dequeue, screwing up " 278 278 "backlog.\n", tc_index_to_dp(skb)); 279 279 } else { 280 - q->backlog -= skb->len; 280 + q->backlog -= qdisc_pkt_len(skb); 281 281 282 282 if (!q->backlog && !gred_wred_mode(t)) 283 283 red_start_of_idle_period(&q->parms); ··· 299 299 300 300 skb = qdisc_dequeue_tail(sch); 301 301 if (skb) { 302 - unsigned int len = skb->len; 302 + unsigned int len = qdisc_pkt_len(skb); 303 303 struct gred_sched_data *q; 304 304 u16 dp = tc_index_to_dp(skb); 305 305
+6 -8
net/sched/sch_hfsc.c
··· 895 895 printk("qdisc_peek_len: non work-conserving qdisc ?\n"); 896 896 return 0; 897 897 } 898 - len = skb->len; 898 + len = qdisc_pkt_len(skb); 899 899 if (unlikely(sch->ops->requeue(skb, sch) != NET_XMIT_SUCCESS)) { 900 900 if (net_ratelimit()) 901 901 printk("qdisc_peek_len: failed to requeue\n"); ··· 1574 1574 hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) 1575 1575 { 1576 1576 struct hfsc_class *cl; 1577 - unsigned int len; 1578 1577 int err; 1579 1578 1580 1579 cl = hfsc_classify(skb, sch, &err); ··· 1584 1585 return err; 1585 1586 } 1586 1587 1587 - len = skb->len; 1588 1588 err = qdisc_enqueue(skb, cl->qdisc); 1589 1589 if (unlikely(err != NET_XMIT_SUCCESS)) { 1590 1590 cl->qstats.drops++; ··· 1592 1594 } 1593 1595 1594 1596 if (cl->qdisc->q.qlen == 1) 1595 - set_active(cl, len); 1597 + set_active(cl, qdisc_pkt_len(skb)); 1596 1598 1597 1599 cl->bstats.packets++; 1598 - cl->bstats.bytes += len; 1600 + cl->bstats.bytes += qdisc_pkt_len(skb); 1599 1601 sch->bstats.packets++; 1600 - sch->bstats.bytes += len; 1602 + sch->bstats.bytes += qdisc_pkt_len(skb); 1601 1603 sch->q.qlen++; 1602 1604 1603 1605 return NET_XMIT_SUCCESS; ··· 1647 1649 return NULL; 1648 1650 } 1649 1651 1650 - update_vf(cl, skb->len, cur_time); 1652 + update_vf(cl, qdisc_pkt_len(skb), cur_time); 1651 1653 if (realtime) 1652 - cl->cl_cumul += skb->len; 1654 + cl->cl_cumul += qdisc_pkt_len(skb); 1653 1655 1654 1656 if (cl->qdisc->q.qlen != 0) { 1655 1657 if (cl->cl_flags & HFSC_RSC) {
+5 -4
net/sched/sch_htb.c
··· 579 579 } else { 580 580 cl->bstats.packets += 581 581 skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; 582 - cl->bstats.bytes += skb->len; 582 + cl->bstats.bytes += qdisc_pkt_len(skb); 583 583 htb_activate(q, cl); 584 584 } 585 585 586 586 sch->q.qlen++; 587 587 sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; 588 - sch->bstats.bytes += skb->len; 588 + sch->bstats.bytes += qdisc_pkt_len(skb); 589 589 return NET_XMIT_SUCCESS; 590 590 } 591 591 ··· 642 642 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, 643 643 int level, struct sk_buff *skb) 644 644 { 645 - int bytes = skb->len; 645 + int bytes = qdisc_pkt_len(skb); 646 646 long toks, diff; 647 647 enum htb_cmode old_mode; 648 648 ··· 855 855 } while (cl != start); 856 856 857 857 if (likely(skb != NULL)) { 858 - if ((cl->un.leaf.deficit[level] -= skb->len) < 0) { 858 + cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb); 859 + if (cl->un.leaf.deficit[level] < 0) { 859 860 cl->un.leaf.deficit[level] += cl->un.leaf.quantum; 860 861 htb_next_rb_node((level ? cl->parent->un.inner.ptr : q-> 861 862 ptr[0]) + prio);
+1 -1
net/sched/sch_ingress.c
··· 77 77 result = tc_classify(skb, p->filter_list, &res); 78 78 79 79 sch->bstats.packets++; 80 - sch->bstats.bytes += skb->len; 80 + sch->bstats.bytes += qdisc_pkt_len(skb); 81 81 switch (result) { 82 82 case TC_ACT_SHOT: 83 83 result = TC_ACT_SHOT;
+3 -3
net/sched/sch_netem.c
··· 237 237 238 238 if (likely(ret == NET_XMIT_SUCCESS)) { 239 239 sch->q.qlen++; 240 - sch->bstats.bytes += skb->len; 240 + sch->bstats.bytes += qdisc_pkt_len(skb); 241 241 sch->bstats.packets++; 242 242 } else 243 243 sch->qstats.drops++; ··· 481 481 482 482 __skb_queue_after(list, skb, nskb); 483 483 484 - sch->qstats.backlog += nskb->len; 485 - sch->bstats.bytes += nskb->len; 484 + sch->qstats.backlog += qdisc_pkt_len(nskb); 485 + sch->bstats.bytes += qdisc_pkt_len(nskb); 486 486 sch->bstats.packets++; 487 487 488 488 return NET_XMIT_SUCCESS;
+1 -1
net/sched/sch_prio.c
··· 83 83 84 84 ret = qdisc_enqueue(skb, qdisc); 85 85 if (ret == NET_XMIT_SUCCESS) { 86 - sch->bstats.bytes += skb->len; 86 + sch->bstats.bytes += qdisc_pkt_len(skb); 87 87 sch->bstats.packets++; 88 88 sch->q.qlen++; 89 89 return NET_XMIT_SUCCESS;
+1 -1
net/sched/sch_red.c
··· 94 94 95 95 ret = qdisc_enqueue(skb, child); 96 96 if (likely(ret == NET_XMIT_SUCCESS)) { 97 - sch->bstats.bytes += skb->len; 97 + sch->bstats.bytes += qdisc_pkt_len(skb); 98 98 sch->bstats.packets++; 99 99 sch->q.qlen++; 100 100 } else {
+8 -8
net/sched/sch_sfq.c
··· 245 245 if (d > 1) { 246 246 sfq_index x = q->dep[d + SFQ_DEPTH].next; 247 247 skb = q->qs[x].prev; 248 - len = skb->len; 248 + len = qdisc_pkt_len(skb); 249 249 __skb_unlink(skb, &q->qs[x]); 250 250 kfree_skb(skb); 251 251 sfq_dec(q, x); ··· 261 261 q->next[q->tail] = q->next[d]; 262 262 q->allot[q->next[d]] += q->quantum; 263 263 skb = q->qs[d].prev; 264 - len = skb->len; 264 + len = qdisc_pkt_len(skb); 265 265 __skb_unlink(skb, &q->qs[d]); 266 266 kfree_skb(skb); 267 267 sfq_dec(q, d); ··· 305 305 if (q->qs[x].qlen >= q->limit) 306 306 return qdisc_drop(skb, sch); 307 307 308 - sch->qstats.backlog += skb->len; 308 + sch->qstats.backlog += qdisc_pkt_len(skb); 309 309 __skb_queue_tail(&q->qs[x], skb); 310 310 sfq_inc(q, x); 311 311 if (q->qs[x].qlen == 1) { /* The flow is new */ ··· 320 320 } 321 321 } 322 322 if (++sch->q.qlen <= q->limit) { 323 - sch->bstats.bytes += skb->len; 323 + sch->bstats.bytes += qdisc_pkt_len(skb); 324 324 sch->bstats.packets++; 325 325 return 0; 326 326 } ··· 352 352 q->hash[x] = hash; 353 353 } 354 354 355 - sch->qstats.backlog += skb->len; 355 + sch->qstats.backlog += qdisc_pkt_len(skb); 356 356 __skb_queue_head(&q->qs[x], skb); 357 357 /* If selected queue has length q->limit+1, this means that 358 358 * all another queues are empty and we do simple tail drop. ··· 363 363 skb = q->qs[x].prev; 364 364 __skb_unlink(skb, &q->qs[x]); 365 365 sch->qstats.drops++; 366 - sch->qstats.backlog -= skb->len; 366 + sch->qstats.backlog -= qdisc_pkt_len(skb); 367 367 kfree_skb(skb); 368 368 return NET_XMIT_CN; 369 369 } ··· 411 411 skb = __skb_dequeue(&q->qs[a]); 412 412 sfq_dec(q, a); 413 413 sch->q.qlen--; 414 - sch->qstats.backlog -= skb->len; 414 + sch->qstats.backlog -= qdisc_pkt_len(skb); 415 415 416 416 /* Is the slot empty? */ 417 417 if (q->qs[a].qlen == 0) { ··· 423 423 } 424 424 q->next[q->tail] = a; 425 425 q->allot[a] += q->quantum; 426 - } else if ((q->allot[a] -= skb->len) <= 0) { 426 + } else if ((q->allot[a] -= qdisc_pkt_len(skb)) <= 0) { 427 427 q->tail = a; 428 428 a = q->next[a]; 429 429 q->allot[a] += q->quantum;
+3 -3
net/sched/sch_tbf.c
··· 123 123 struct tbf_sched_data *q = qdisc_priv(sch); 124 124 int ret; 125 125 126 - if (skb->len > q->max_size) { 126 + if (qdisc_pkt_len(skb) > q->max_size) { 127 127 sch->qstats.drops++; 128 128 #ifdef CONFIG_NET_CLS_ACT 129 129 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) ··· 140 140 } 141 141 142 142 sch->q.qlen++; 143 - sch->bstats.bytes += skb->len; 143 + sch->bstats.bytes += qdisc_pkt_len(skb); 144 144 sch->bstats.packets++; 145 145 return 0; 146 146 } ··· 181 181 psched_time_t now; 182 182 long toks; 183 183 long ptoks = 0; 184 - unsigned int len = skb->len; 184 + unsigned int len = qdisc_pkt_len(skb); 185 185 186 186 now = psched_get_time(); 187 187 toks = psched_tdiff_bounded(now, q->t_c, q->buffer);
+3 -3
net/sched/sch_teql.c
··· 83 83 84 84 if (q->q.qlen < dev->tx_queue_len) { 85 85 __skb_queue_tail(&q->q, skb); 86 - sch->bstats.bytes += skb->len; 86 + sch->bstats.bytes += qdisc_pkt_len(skb); 87 87 sch->bstats.packets++; 88 88 return 0; 89 89 } ··· 278 278 struct Qdisc *start, *q; 279 279 int busy; 280 280 int nores; 281 - int len = skb->len; 282 281 int subq = skb_get_queue_mapping(skb); 283 282 struct sk_buff *skb_res = NULL; 284 283 ··· 312 313 master->slaves = NEXT_SLAVE(q); 313 314 netif_wake_queue(dev); 314 315 master->stats.tx_packets++; 315 - master->stats.tx_bytes += len; 316 + master->stats.tx_bytes += 317 + qdisc_pkt_len(skb); 316 318 return 0; 317 319 } 318 320 netif_tx_unlock(slave);