Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PKT_SCHED] netem: packet corruption option

Here is a new feature for netem in 2.6.16. It adds the ability to
randomly corrupt packets with netem. A version was done by
Hagen Paul Pfeifer, but I redid it to handle the cases of backwards
compatibility with netlink interface and presence of hardware checksum
offload. It is useful for testing hardware offload in devices.

Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Stephen Hemminger and committed by
David S. Miller
c865e5d9 8cbb512e

+53 -3
+7
include/linux/pkt_sched.h
··· 429 429 TCA_NETEM_CORR, 430 430 TCA_NETEM_DELAY_DIST, 431 431 TCA_NETEM_REORDER, 432 + TCA_NETEM_CORRUPT, 432 433 __TCA_NETEM_MAX, 433 434 }; 434 435 ··· 453 452 }; 454 453 455 454 struct tc_netem_reorder 455 + { 456 + __u32 probability; 457 + __u32 correlation; 458 + }; 459 + 460 + struct tc_netem_corrupt 456 461 { 457 462 __u32 probability; 458 463 __u32 correlation;
+46 -3
net/sched/sch_netem.c
··· 25 25 26 26 #include <net/pkt_sched.h> 27 27 28 - #define VERSION "1.1" 28 + #define VERSION "1.2" 29 29 30 30 /* Network Emulation Queuing algorithm. 31 31 ==================================== ··· 65 65 u32 jitter; 66 66 u32 duplicate; 67 67 u32 reorder; 68 + u32 corrupt; 68 69 69 70 struct crndstate { 70 71 unsigned long last; 71 72 unsigned long rho; 72 - } delay_cor, loss_cor, dup_cor, reorder_cor; 73 + } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor; 73 74 74 75 struct disttable { 75 76 u32 size; ··· 182 181 183 182 rootq->enqueue(skb2, rootq); 184 183 q->duplicate = dupsave; 184 + } 185 + 186 + /* 187 + * Randomized packet corruption. 188 + * Make copy if needed since we are modifying 189 + * If packet is going to be hardware checksummed, then 190 + * do it now in software before we mangle it. 191 + */ 192 + if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { 193 + if (!(skb = skb_unshare(skb, GFP_ATOMIC)) 194 + || (skb->ip_summed == CHECKSUM_HW 195 + && skb_checksum_help(skb, 0))) { 196 + sch->qstats.drops++; 197 + return NET_XMIT_DROP; 198 + } 199 + 200 + skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8); 185 201 } 186 202 187 203 if (q->gap == 0 /* not doing reordering */ ··· 400 382 return 0; 401 383 } 402 384 385 + static int get_corrupt(struct Qdisc *sch, const struct rtattr *attr) 386 + { 387 + struct netem_sched_data *q = qdisc_priv(sch); 388 + const struct tc_netem_corrupt *r = RTA_DATA(attr); 389 + 390 + if (RTA_PAYLOAD(attr) != sizeof(*r)) 391 + return -EINVAL; 392 + 393 + q->corrupt = r->probability; 394 + init_crandom(&q->corrupt_cor, r->correlation); 395 + return 0; 396 + } 397 + 398 + /* Parse netlink message to set options */ 403 399 static int netem_change(struct Qdisc *sch, struct rtattr *opt) 404 400 { 405 401 struct netem_sched_data *q = qdisc_priv(sch); ··· 464 432 if (ret) 465 433 return ret; 466 434 } 435 + 467 436 if (tb[TCA_NETEM_REORDER-1]) { 468 437 ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]); 469 438 if (ret) 470 439 return ret; 471 440 } 472 - } 473 441 442 + if (tb[TCA_NETEM_CORRUPT-1]) { 443 + ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT-1]); 444 + if (ret) 445 + return ret; 446 + } 447 + } 474 448 475 449 return 0; 476 450 } ··· 602 564 struct tc_netem_qopt qopt; 603 565 struct tc_netem_corr cor; 604 566 struct tc_netem_reorder reorder; 567 + struct tc_netem_corrupt corrupt; 605 568 606 569 qopt.latency = q->latency; 607 570 qopt.jitter = q->jitter; ··· 620 581 reorder.probability = q->reorder; 621 582 reorder.correlation = q->reorder_cor.rho; 622 583 RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder); 584 + 585 + corrupt.probability = q->corrupt; 586 + corrupt.correlation = q->corrupt_cor.rho; 587 + RTA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt); 623 588 624 589 rta->rta_len = skb->tail - b; 625 590