Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net/sched: Add drop reasons for AQM-based qdiscs

Now that we have generic QDISC_CONGESTED and QDISC_OVERLIMIT drop
reasons, let's have all the qdiscs that contain an AQM apply them
consistently when dropping packets.

Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://patch.msgid.link/20241214-fq-codel-drop-reasons-v1-1-2a814e884c37@redhat.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

authored by

Toke Høiland-Jørgensen and committed by
Paolo Abeni
ff9f17ce 963b7895

+21 -10
+3 -2
net/sched/sch_codel.c
··· 52 52 { 53 53 struct Qdisc *sch = ctx; 54 54 55 - kfree_skb(skb); 55 + kfree_skb_reason(skb, SKB_DROP_REASON_QDISC_CONGESTED); 56 56 qdisc_qstats_drop(sch); 57 57 } 58 58 ··· 89 89 } 90 90 q = qdisc_priv(sch); 91 91 q->drop_overlimit++; 92 - return qdisc_drop(skb, sch, to_free); 92 + return qdisc_drop_reason(skb, sch, to_free, 93 + SKB_DROP_REASON_QDISC_OVERLIMIT); 93 94 } 94 95 95 96 static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
+2 -1
net/sched/sch_fq_codel.c
··· 168 168 skb = dequeue_head(flow); 169 169 len += qdisc_pkt_len(skb); 170 170 mem += get_codel_cb(skb)->mem_usage; 171 + tcf_set_drop_reason(skb, SKB_DROP_REASON_QDISC_OVERLIMIT); 171 172 __qdisc_drop(skb, to_free); 172 173 } while (++i < max_packets && len < threshold); 173 174 ··· 275 274 { 276 275 struct Qdisc *sch = ctx; 277 276 278 - kfree_skb(skb); 277 + kfree_skb_reason(skb, SKB_DROP_REASON_QDISC_CONGESTED); 279 278 qdisc_qstats_drop(sch); 280 279 } 281 280
+4 -2
net/sched/sch_fq_pie.c
··· 130 130 static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, 131 131 struct sk_buff **to_free) 132 132 { 133 + enum skb_drop_reason reason = SKB_DROP_REASON_QDISC_OVERLIMIT; 133 134 struct fq_pie_sched_data *q = qdisc_priv(sch); 134 135 struct fq_pie_flow *sel_flow; 135 136 int ret; ··· 161 160 } else if (unlikely(memory_limited)) { 162 161 q->overmemory++; 163 162 } 163 + 164 + reason = SKB_DROP_REASON_QDISC_CONGESTED; 164 165 165 166 if (!pie_drop_early(sch, &q->p_params, &sel_flow->vars, 166 167 sel_flow->backlog, skb->len)) { ··· 201 198 out: 202 199 q->stats.dropped++; 203 200 sel_flow->vars.accu_prob = 0; 204 - __qdisc_drop(skb, to_free); 205 - qdisc_qstats_drop(sch); 201 + qdisc_drop_reason(skb, sch, to_free, reason); 206 202 return NET_XMIT_CN; 207 203 } 208 204
+2 -2
net/sched/sch_gred.c
··· 251 251 252 252 q->stats.pdrop++; 253 253 drop: 254 - return qdisc_drop(skb, sch, to_free); 254 + return qdisc_drop_reason(skb, sch, to_free, SKB_DROP_REASON_QDISC_OVERLIMIT); 255 255 256 256 congestion_drop: 257 - qdisc_drop(skb, sch, to_free); 257 + qdisc_drop_reason(skb, sch, to_free, SKB_DROP_REASON_QDISC_CONGESTED); 258 258 return NET_XMIT_CN; 259 259 } 260 260
+4 -1
net/sched/sch_pie.c
··· 85 85 static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, 86 86 struct sk_buff **to_free) 87 87 { 88 + enum skb_drop_reason reason = SKB_DROP_REASON_QDISC_OVERLIMIT; 88 89 struct pie_sched_data *q = qdisc_priv(sch); 89 90 bool enqueue = false; 90 91 ··· 93 92 q->stats.overlimit++; 94 93 goto out; 95 94 } 95 + 96 + reason = SKB_DROP_REASON_QDISC_CONGESTED; 96 97 97 98 if (!pie_drop_early(sch, &q->params, &q->vars, sch->qstats.backlog, 98 99 skb->len)) { ··· 124 121 out: 125 122 q->stats.dropped++; 126 123 q->vars.accu_prob = 0; 127 - return qdisc_drop(skb, sch, to_free); 124 + return qdisc_drop_reason(skb, sch, to_free, reason); 128 125 } 129 126 130 127 static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
+3 -1
net/sched/sch_red.c
··· 70 70 static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, 71 71 struct sk_buff **to_free) 72 72 { 73 + enum skb_drop_reason reason = SKB_DROP_REASON_QDISC_CONGESTED; 73 74 struct red_sched_data *q = qdisc_priv(sch); 74 75 struct Qdisc *child = q->qdisc; 75 76 unsigned int len; ··· 108 107 break; 109 108 110 109 case RED_HARD_MARK: 110 + reason = SKB_DROP_REASON_QDISC_OVERLIMIT; 111 111 qdisc_qstats_overlimit(sch); 112 112 if (red_use_harddrop(q) || !red_use_ecn(q)) { 113 113 q->stats.forced_drop++; ··· 145 143 if (!skb) 146 144 return NET_XMIT_CN | ret; 147 145 148 - qdisc_drop(skb, sch, to_free); 146 + qdisc_drop_reason(skb, sch, to_free, reason); 149 147 return NET_XMIT_CN; 150 148 } 151 149
+3 -1
net/sched/sch_sfb.c
··· 280 280 struct sk_buff **to_free) 281 281 { 282 282 283 + enum skb_drop_reason reason = SKB_DROP_REASON_QDISC_OVERLIMIT; 283 284 struct sfb_sched_data *q = qdisc_priv(sch); 284 285 unsigned int len = qdisc_pkt_len(skb); 285 286 struct Qdisc *child = q->qdisc; ··· 381 380 } 382 381 383 382 r = get_random_u16() & SFB_MAX_PROB; 383 + reason = SKB_DROP_REASON_QDISC_CONGESTED; 384 384 385 385 if (unlikely(r < p_min)) { 386 386 if (unlikely(p_min > SFB_MAX_PROB / 2)) { ··· 416 414 return ret; 417 415 418 416 drop: 419 - qdisc_drop(skb, sch, to_free); 417 + qdisc_drop_reason(skb, sch, to_free, reason); 420 418 return NET_XMIT_CN; 421 419 other_drop: 422 420 if (ret & __NET_XMIT_BYPASS)