Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched: Dump configuration and statistics of dualpi2 qdisc

The configuration and statistics dump of the DualPI2 Qdisc provides
information related to both queues, such as packet numbers and queuing
delays in the L-queue and C-queue, as well as general information such as
probability value, WRR credits, memory usage, packet marking counters, max
queue size, etc.

The following patch includes enqueue/dequeue for DualPI2.

Signed-off-by: Chia-Yu Chang <chia-yu.chang@nokia-bell-labs.com>
Link: https://patch.msgid.link/20250722095915.24485-3-chia-yu.chang@nokia-bell-labs.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Chia-Yu Chang and committed by
Jakub Kicinski
d4de8bff 320d031a

+152 -17
+15
include/uapi/linux/pkt_sched.h
··· 1264 1264 1265 1265 #define TCA_DUALPI2_MAX (__TCA_DUALPI2_MAX - 1) 1266 1266 1267 + struct tc_dualpi2_xstats { 1268 + __u32 prob; /* current probability */ 1269 + __u32 delay_c; /* current delay in C queue */ 1270 + __u32 delay_l; /* current delay in L queue */ 1271 + __u32 packets_in_c; /* number of packets enqueued in C queue */ 1272 + __u32 packets_in_l; /* number of packets enqueued in L queue */ 1273 + __u32 maxq; /* maximum queue size */ 1274 + __u32 ecn_mark; /* packets marked with ecn*/ 1275 + __u32 step_marks; /* ECN marks due to the step AQM */ 1276 + __s32 credit; /* current c_protection credit */ 1277 + __u32 memory_used; /* Memory used by both queues */ 1278 + __u32 max_memory_used; /* Maximum used memory */ 1279 + __u32 memory_limit; /* Memory limit of both queues */ 1280 + }; 1281 + 1267 1282 #endif
+137 -17
net/sched/sch_dualpi2.c
··· 123 123 return tmp; 124 124 } 125 125 126 + static u32 dualpi2_unscale_alpha_beta(u32 param) 127 + { 128 + u64 tmp = ((u64)param * NSEC_PER_SEC << ALPHA_BETA_SCALING); 129 + 130 + do_div(tmp, MAX_PROB); 131 + return tmp; 132 + } 133 + 126 134 static ktime_t next_pi2_timeout(struct dualpi2_sched_data *q) 127 135 { 128 136 return ktime_add_ns(ktime_get_ns(), q->pi2_tupdate); ··· 235 227 return lower_32_bits(ns); 236 228 } 237 229 230 + static u32 convert_ns_to_usec(u64 ns) 231 + { 232 + do_div(ns, NSEC_PER_USEC); 233 + if (upper_32_bits(ns)) 234 + return U32_MAX; 235 + 236 + return lower_32_bits(ns); 237 + } 238 + 238 239 static enum hrtimer_restart dualpi2_timer(struct hrtimer *timer) 239 240 { 240 241 struct dualpi2_sched_data *q = timer_container_of(q, timer, pi2_timer); ··· 321 304 if (tb[TCA_DUALPI2_LIMIT]) { 322 305 u32 limit = nla_get_u32(tb[TCA_DUALPI2_LIMIT]); 323 306 324 - sch->limit = limit; 325 - q->memory_limit = get_memory_limit(sch, limit); 307 + WRITE_ONCE(sch->limit, limit); 308 + WRITE_ONCE(q->memory_limit, get_memory_limit(sch, limit)); 326 309 } 327 310 328 311 if (tb[TCA_DUALPI2_MEMORY_LIMIT]) 329 - q->memory_limit = nla_get_u32(tb[TCA_DUALPI2_MEMORY_LIMIT]); 312 + WRITE_ONCE(q->memory_limit, 313 + nla_get_u32(tb[TCA_DUALPI2_MEMORY_LIMIT])); 330 314 331 315 if (tb[TCA_DUALPI2_TARGET]) { 332 316 u64 target = nla_get_u32(tb[TCA_DUALPI2_TARGET]); 333 317 334 - q->pi2_target = target * NSEC_PER_USEC; 318 + WRITE_ONCE(q->pi2_target, target * NSEC_PER_USEC); 335 319 } 336 320 337 321 if (tb[TCA_DUALPI2_TUPDATE]) { 338 322 u64 tupdate = nla_get_u32(tb[TCA_DUALPI2_TUPDATE]); 339 323 340 - q->pi2_tupdate = convert_us_to_nsec(tupdate); 324 + WRITE_ONCE(q->pi2_tupdate, convert_us_to_nsec(tupdate)); 341 325 } 342 326 343 327 if (tb[TCA_DUALPI2_ALPHA]) { 344 328 u32 alpha = nla_get_u32(tb[TCA_DUALPI2_ALPHA]); 345 329 346 - q->pi2_alpha = dualpi2_scale_alpha_beta(alpha); 330 + WRITE_ONCE(q->pi2_alpha, dualpi2_scale_alpha_beta(alpha)); 347 331 } 348 332 349 333 if (tb[TCA_DUALPI2_BETA]) { 350 334 u32 beta = nla_get_u32(tb[TCA_DUALPI2_BETA]); 351 335 352 - q->pi2_beta = dualpi2_scale_alpha_beta(beta); 336 + WRITE_ONCE(q->pi2_beta, dualpi2_scale_alpha_beta(beta)); 353 337 } 354 338 355 339 if (tb[TCA_DUALPI2_STEP_THRESH_PKTS]) { 356 340 u32 step_th = nla_get_u32(tb[TCA_DUALPI2_STEP_THRESH_PKTS]); 357 341 358 - q->step_in_packets = true; 359 - q->step_thresh = step_th; 342 + WRITE_ONCE(q->step_in_packets, true); 343 + WRITE_ONCE(q->step_thresh, step_th); 360 344 } else if (tb[TCA_DUALPI2_STEP_THRESH_US]) { 361 345 u32 step_th = nla_get_u32(tb[TCA_DUALPI2_STEP_THRESH_US]); 362 346 363 - q->step_in_packets = false; 364 - q->step_thresh = convert_us_to_nsec(step_th); 347 + WRITE_ONCE(q->step_in_packets, false); 348 + WRITE_ONCE(q->step_thresh, convert_us_to_nsec(step_th)); 365 349 } 366 350 367 351 if (tb[TCA_DUALPI2_MIN_QLEN_STEP]) 368 - q->min_qlen_step = nla_get_u32(tb[TCA_DUALPI2_MIN_QLEN_STEP]); 352 + WRITE_ONCE(q->min_qlen_step, 353 + nla_get_u32(tb[TCA_DUALPI2_MIN_QLEN_STEP])); 369 354 370 355 if (tb[TCA_DUALPI2_COUPLING]) { 371 356 u8 coupling = nla_get_u8(tb[TCA_DUALPI2_COUPLING]); 372 357 373 - q->coupling_factor = coupling; 358 + WRITE_ONCE(q->coupling_factor, coupling); 374 359 } 375 360 376 361 if (tb[TCA_DUALPI2_DROP_OVERLOAD]) { 377 362 u8 drop_overload = nla_get_u8(tb[TCA_DUALPI2_DROP_OVERLOAD]); 378 363 379 - q->drop_overload = (bool)drop_overload; 364 + WRITE_ONCE(q->drop_overload, (bool)drop_overload); 380 365 } 381 366 382 367 if (tb[TCA_DUALPI2_DROP_EARLY]) { 383 368 u8 drop_early = nla_get_u8(tb[TCA_DUALPI2_DROP_EARLY]); 384 369 385 - q->drop_early = (bool)drop_early; 370 + WRITE_ONCE(q->drop_early, (bool)drop_early); 386 371 } 387 372 388 373 if (tb[TCA_DUALPI2_C_PROTECTION]) { ··· 396 377 if (tb[TCA_DUALPI2_ECN_MASK]) { 397 378 u8 ecn_mask = nla_get_u8(tb[TCA_DUALPI2_ECN_MASK]); 398 379 399 - q->ecn_mask = ecn_mask; 380 + WRITE_ONCE(q->ecn_mask, ecn_mask); 400 381 } 401 382 402 383 if (tb[TCA_DUALPI2_SPLIT_GSO]) { 403 384 u8 split_gso = nla_get_u8(tb[TCA_DUALPI2_SPLIT_GSO]); 404 385 405 - q->split_gso = (bool)split_gso; 386 + WRITE_ONCE(q->split_gso, (bool)split_gso); 406 387 } 407 388 408 389 old_qlen = qdisc_qlen(sch); ··· 477 458 hrtimer_start(&q->pi2_timer, next_pi2_timeout(q), 478 459 HRTIMER_MODE_ABS_PINNED); 479 460 return 0; 461 + } 462 + 463 + static int dualpi2_dump(struct Qdisc *sch, struct sk_buff *skb) 464 + { 465 + struct dualpi2_sched_data *q = qdisc_priv(sch); 466 + struct nlattr *opts; 467 + bool step_in_pkts; 468 + u32 step_th; 469 + 470 + step_in_pkts = READ_ONCE(q->step_in_packets); 471 + step_th = READ_ONCE(q->step_thresh); 472 + 473 + opts = nla_nest_start_noflag(skb, TCA_OPTIONS); 474 + if (!opts) 475 + goto nla_put_failure; 476 + 477 + if (step_in_pkts && 478 + (nla_put_u32(skb, TCA_DUALPI2_LIMIT, READ_ONCE(sch->limit)) || 479 + nla_put_u32(skb, TCA_DUALPI2_MEMORY_LIMIT, 480 + READ_ONCE(q->memory_limit)) || 481 + nla_put_u32(skb, TCA_DUALPI2_TARGET, 482 + convert_ns_to_usec(READ_ONCE(q->pi2_target))) || 483 + nla_put_u32(skb, TCA_DUALPI2_TUPDATE, 484 + convert_ns_to_usec(READ_ONCE(q->pi2_tupdate))) || 485 + nla_put_u32(skb, TCA_DUALPI2_ALPHA, 486 + dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_alpha))) || 487 + nla_put_u32(skb, TCA_DUALPI2_BETA, 488 + dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_beta))) || 489 + nla_put_u32(skb, TCA_DUALPI2_STEP_THRESH_PKTS, step_th) || 490 + nla_put_u32(skb, TCA_DUALPI2_MIN_QLEN_STEP, 491 + READ_ONCE(q->min_qlen_step)) || 492 + nla_put_u8(skb, TCA_DUALPI2_COUPLING, 493 + READ_ONCE(q->coupling_factor)) || 494 + nla_put_u8(skb, TCA_DUALPI2_DROP_OVERLOAD, 495 + READ_ONCE(q->drop_overload)) || 496 + nla_put_u8(skb, TCA_DUALPI2_DROP_EARLY, 497 + READ_ONCE(q->drop_early)) || 498 + nla_put_u8(skb, TCA_DUALPI2_C_PROTECTION, 499 + READ_ONCE(q->c_protection_wc)) || 500 + nla_put_u8(skb, TCA_DUALPI2_ECN_MASK, READ_ONCE(q->ecn_mask)) || 501 + nla_put_u8(skb, TCA_DUALPI2_SPLIT_GSO, READ_ONCE(q->split_gso)))) 502 + goto nla_put_failure; 503 + 504 + if (!step_in_pkts && 505 + (nla_put_u32(skb, TCA_DUALPI2_LIMIT, READ_ONCE(sch->limit)) || 506 + nla_put_u32(skb, TCA_DUALPI2_MEMORY_LIMIT, 507 + READ_ONCE(q->memory_limit)) || 508 + nla_put_u32(skb, TCA_DUALPI2_TARGET, 509 + convert_ns_to_usec(READ_ONCE(q->pi2_target))) || 510 + nla_put_u32(skb, TCA_DUALPI2_TUPDATE, 511 + convert_ns_to_usec(READ_ONCE(q->pi2_tupdate))) || 512 + nla_put_u32(skb, TCA_DUALPI2_ALPHA, 513 + dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_alpha))) || 514 + nla_put_u32(skb, TCA_DUALPI2_BETA, 515 + dualpi2_unscale_alpha_beta(READ_ONCE(q->pi2_beta))) || 516 + nla_put_u32(skb, TCA_DUALPI2_STEP_THRESH_US, 517 + convert_ns_to_usec(step_th)) || 518 + nla_put_u32(skb, TCA_DUALPI2_MIN_QLEN_STEP, 519 + READ_ONCE(q->min_qlen_step)) || 520 + nla_put_u8(skb, TCA_DUALPI2_COUPLING, 521 + READ_ONCE(q->coupling_factor)) || 522 + nla_put_u8(skb, TCA_DUALPI2_DROP_OVERLOAD, 523 + READ_ONCE(q->drop_overload)) || 524 + nla_put_u8(skb, TCA_DUALPI2_DROP_EARLY, 525 + READ_ONCE(q->drop_early)) || 526 + nla_put_u8(skb, TCA_DUALPI2_C_PROTECTION, 527 + READ_ONCE(q->c_protection_wc)) || 528 + nla_put_u8(skb, TCA_DUALPI2_ECN_MASK, READ_ONCE(q->ecn_mask)) || 529 + nla_put_u8(skb, TCA_DUALPI2_SPLIT_GSO, READ_ONCE(q->split_gso)))) 530 + goto nla_put_failure; 531 + 532 + return nla_nest_end(skb, opts); 533 + 534 + nla_put_failure: 535 + nla_nest_cancel(skb, opts); 536 + return -1; 537 + } 538 + 539 + static int dualpi2_dump_stats(struct Qdisc *sch, struct gnet_dump *d) 540 + { 541 + struct dualpi2_sched_data *q = qdisc_priv(sch); 542 + struct tc_dualpi2_xstats st = { 543 + .prob = q->pi2_prob, 544 + .packets_in_c = q->packets_in_c, 545 + .packets_in_l = q->packets_in_l, 546 + .maxq = q->maxq, 547 + .ecn_mark = q->ecn_mark, 548 + .credit = q->c_protection_credit, 549 + .step_marks = q->step_marks, 550 + .memory_used = q->memory_used, 551 + .max_memory_used = q->max_memory_used, 552 + .memory_limit = q->memory_limit, 553 + }; 554 + u64 qc, ql; 555 + 556 + get_queue_delays(q, &qc, &ql); 557 + st.delay_l = convert_ns_to_usec(ql); 558 + st.delay_c = convert_ns_to_usec(qc); 559 + return gnet_stats_copy_app(d, &st, sizeof(st)); 480 560 } 481 561 482 562 /* Reset both L-queue and C-queue, internal packet counters, PI probability, ··· 682 564 .destroy = dualpi2_destroy, 683 565 .reset = dualpi2_reset, 684 566 .change = dualpi2_change, 567 + .dump = dualpi2_dump, 568 + .dump_stats = dualpi2_dump_stats, 685 569 .owner = THIS_MODULE, 686 570 }; 687 571