Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net/sched: sch_cake: Factor out config variables into separate struct

Factor out all the user-configurable variables into a separate struct
and embed it into struct cake_sched_data. This is done in preparation
for sharing the configuration across multiple instances of cake in an mq
setup.

No functional change is intended with this patch.

Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com>
Reviewed-by: Willem de Bruijn <willemb@google.com>
Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://patch.msgid.link/20260109-mq-cake-sub-qdisc-v8-2-8d613fece5d8@redhat.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

authored by

Toke Høiland-Jørgensen and committed by
Paolo Abeni
bc0ce2ba 8b27fd66

+133 -112
+133 -112
net/sched/sch_cake.c
··· 197 197 u32 way_collisions; 198 198 }; /* number of tins is small, so size of this struct doesn't matter much */ 199 199 200 + struct cake_sched_config { 201 + u64 rate_bps; 202 + u64 interval; 203 + u64 target; 204 + u32 buffer_config_limit; 205 + u32 fwmark_mask; 206 + u16 fwmark_shft; 207 + s16 rate_overhead; 208 + u16 rate_mpu; 209 + u16 rate_flags; 210 + u8 tin_mode; 211 + u8 flow_mode; 212 + u8 atm_mode; 213 + u8 ack_filter; 214 + }; 215 + 200 216 struct cake_sched_data { 201 217 struct tcf_proto __rcu *filter_list; /* optional external classifier */ 202 218 struct tcf_block *block; 203 219 struct cake_tin_data *tins; 220 + struct cake_sched_config *config; 204 221 205 222 struct cake_heap_entry overflow_heap[CAKE_QUEUES * CAKE_MAX_TINS]; 206 - u16 overflow_timeout; 207 - 208 - u16 tin_cnt; 209 - u8 tin_mode; 210 - u8 flow_mode; 211 - u8 ack_filter; 212 - u8 atm_mode; 213 - 214 - u32 fwmark_mask; 215 - u16 fwmark_shft; 216 223 217 224 /* time_next = time_this + ((len * rate_ns) >> rate_shft) */ 218 - u16 rate_shft; 219 225 ktime_t time_next_packet; 220 226 ktime_t failsafe_next_packet; 221 227 u64 rate_ns; 222 - u64 rate_bps; 223 - u16 rate_flags; 224 - s16 rate_overhead; 225 - u16 rate_mpu; 226 - u64 interval; 227 - u64 target; 228 + u16 rate_shft; 229 + u16 overflow_timeout; 230 + u16 tin_cnt; 228 231 229 232 /* resource tracking */ 230 233 u32 buffer_used; 231 234 u32 buffer_max_used; 232 235 u32 buffer_limit; 233 - u32 buffer_config_limit; 234 236 235 237 /* indices for dequeue */ 236 238 u16 cur_tin; ··· 1200 1198 static struct sk_buff *cake_ack_filter(struct cake_sched_data *q, 1201 1199 struct cake_flow *flow) 1202 1200 { 1203 - bool aggressive = q->ack_filter == CAKE_ACK_AGGRESSIVE; 1201 + bool aggressive = q->config->ack_filter == CAKE_ACK_AGGRESSIVE; 1204 1202 struct sk_buff *elig_ack = NULL, *elig_ack_prev = NULL; 1205 1203 struct sk_buff *skb_check, *skb_prev = NULL; 1206 1204 const struct ipv6hdr *ipv6h, *ipv6h_check; ··· 1360 1358 return avg; 1361 1359 } 1362 1360 1363 - static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off) 1361 + static u32 cake_calc_overhead(struct cake_sched_data *qd, u32 len, u32 off) 1364 1362 { 1363 + struct cake_sched_config *q = qd->config; 1364 + 1365 1365 if (q->rate_flags & CAKE_FLAG_OVERHEAD) 1366 1366 len -= off; 1367 1367 1368 - if (q->max_netlen < len) 1369 - q->max_netlen = len; 1370 - if (q->min_netlen > len) 1371 - q->min_netlen = len; 1368 + if (qd->max_netlen < len) 1369 + qd->max_netlen = len; 1370 + if (qd->min_netlen > len) 1371 + qd->min_netlen = len; 1372 1372 1373 1373 len += q->rate_overhead; 1374 1374 ··· 1389 1385 len += (len + 63) / 64; 1390 1386 } 1391 1387 1392 - if (q->max_adjlen < len) 1393 - q->max_adjlen = len; 1394 - if (q->min_adjlen > len) 1395 - q->min_adjlen = len; 1388 + if (qd->max_adjlen < len) 1389 + qd->max_adjlen = len; 1390 + if (qd->min_adjlen > len) 1391 + qd->min_adjlen = len; 1396 1392 1397 1393 return len; 1398 1394 } ··· 1590 1586 flow->dropped++; 1591 1587 b->tin_dropped++; 1592 1588 1593 - if (q->rate_flags & CAKE_FLAG_INGRESS) 1589 + if (q->config->rate_flags & CAKE_FLAG_INGRESS) 1594 1590 cake_advance_shaper(q, b, skb, now, true); 1595 1591 1596 1592 qdisc_drop_reason(skb, sch, to_free, SKB_DROP_REASON_QDISC_OVERLIMIT); ··· 1660 1656 static struct cake_tin_data *cake_select_tin(struct Qdisc *sch, 1661 1657 struct sk_buff *skb) 1662 1658 { 1663 - struct cake_sched_data *q = qdisc_priv(sch); 1659 + struct cake_sched_data *qd = qdisc_priv(sch); 1660 + struct cake_sched_config *q = qd->config; 1664 1661 u32 tin, mark; 1665 1662 bool wash; 1666 1663 u8 dscp; ··· 1678 1673 if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT) 1679 1674 tin = 0; 1680 1675 1681 - else if (mark && mark <= q->tin_cnt) 1682 - tin = q->tin_order[mark - 1]; 1676 + else if (mark && mark <= qd->tin_cnt) 1677 + tin = qd->tin_order[mark - 1]; 1683 1678 1684 1679 else if (TC_H_MAJ(skb->priority) == sch->handle && 1685 1680 TC_H_MIN(skb->priority) > 0 && 1686 - TC_H_MIN(skb->priority) <= q->tin_cnt) 1687 - tin = q->tin_order[TC_H_MIN(skb->priority) - 1]; 1681 + TC_H_MIN(skb->priority) <= qd->tin_cnt) 1682 + tin = qd->tin_order[TC_H_MIN(skb->priority) - 1]; 1688 1683 1689 1684 else { 1690 1685 if (!wash) 1691 1686 dscp = cake_handle_diffserv(skb, wash); 1692 - tin = q->tin_index[dscp]; 1687 + tin = qd->tin_index[dscp]; 1693 1688 1694 - if (unlikely(tin >= q->tin_cnt)) 1689 + if (unlikely(tin >= qd->tin_cnt)) 1695 1690 tin = 0; 1696 1691 } 1697 1692 1698 - return &q->tins[tin]; 1693 + return &qd->tins[tin]; 1699 1694 } 1700 1695 1701 1696 static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data **t, ··· 1751 1746 bool same_flow = false; 1752 1747 1753 1748 /* choose flow to insert into */ 1754 - idx = cake_classify(sch, &b, skb, q->flow_mode, &ret); 1749 + idx = cake_classify(sch, &b, skb, q->config->flow_mode, &ret); 1755 1750 if (idx == 0) { 1756 1751 if (ret & __NET_XMIT_BYPASS) 1757 1752 qdisc_qstats_drop(sch); ··· 1786 1781 if (unlikely(len > b->max_skblen)) 1787 1782 b->max_skblen = len; 1788 1783 1789 - if (qdisc_pkt_segs(skb) > 1 && q->rate_flags & CAKE_FLAG_SPLIT_GSO) { 1784 + if (qdisc_pkt_segs(skb) > 1 && q->config->rate_flags & CAKE_FLAG_SPLIT_GSO) { 1790 1785 struct sk_buff *segs, *nskb; 1791 1786 netdev_features_t features = netif_skb_features(skb); 1792 1787 unsigned int slen = 0, numsegs = 0; ··· 1828 1823 get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb); 1829 1824 flow_queue_add(flow, skb); 1830 1825 1831 - if (q->ack_filter) 1826 + if (q->config->ack_filter) 1832 1827 ack = cake_ack_filter(q, flow); 1833 1828 1834 1829 if (ack) { ··· 1837 1832 ack_pkt_len = qdisc_pkt_len(ack); 1838 1833 b->bytes += ack_pkt_len; 1839 1834 q->buffer_used += skb->truesize - ack->truesize; 1840 - if (q->rate_flags & CAKE_FLAG_INGRESS) 1835 + if (q->config->rate_flags & CAKE_FLAG_INGRESS) 1841 1836 cake_advance_shaper(q, b, ack, now, true); 1842 1837 1843 1838 qdisc_tree_reduce_backlog(sch, 1, ack_pkt_len); ··· 1860 1855 cake_heapify_up(q, b->overflow_idx[idx]); 1861 1856 1862 1857 /* incoming bandwidth capacity estimate */ 1863 - if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) { 1858 + if (q->config->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) { 1864 1859 u64 packet_interval = \ 1865 1860 ktime_to_ns(ktime_sub(now, q->last_packet_time)); 1866 1861 ··· 1892 1887 if (ktime_after(now, 1893 1888 ktime_add_ms(q->last_reconfig_time, 1894 1889 250))) { 1895 - q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4; 1890 + q->config->rate_bps = (q->avg_peak_bandwidth * 15) >> 4; 1896 1891 cake_reconfigure(sch); 1897 1892 } 1898 1893 } ··· 1912 1907 flow->set = CAKE_SET_SPARSE; 1913 1908 b->sparse_flow_count++; 1914 1909 1915 - flow->deficit = cake_get_flow_quantum(b, flow, q->flow_mode); 1910 + flow->deficit = cake_get_flow_quantum(b, flow, q->config->flow_mode); 1916 1911 } else if (flow->set == CAKE_SET_SPARSE_WAIT) { 1917 1912 /* this flow was empty, accounted as a sparse flow, but actually 1918 1913 * in the bulk rotation. ··· 1921 1916 b->sparse_flow_count--; 1922 1917 b->bulk_flow_count++; 1923 1918 1924 - cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode); 1925 - cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode); 1919 + cake_inc_srchost_bulk_flow_count(b, flow, q->config->flow_mode); 1920 + cake_inc_dsthost_bulk_flow_count(b, flow, q->config->flow_mode); 1926 1921 } 1927 1922 1928 1923 if (q->buffer_used > q->buffer_max_used) ··· 2109 2104 b->sparse_flow_count--; 2110 2105 b->bulk_flow_count++; 2111 2106 2112 - cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode); 2113 - cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode); 2107 + cake_inc_srchost_bulk_flow_count(b, flow, q->config->flow_mode); 2108 + cake_inc_dsthost_bulk_flow_count(b, flow, q->config->flow_mode); 2114 2109 2115 2110 flow->set = CAKE_SET_BULK; 2116 2111 } else { ··· 2122 2117 } 2123 2118 } 2124 2119 2125 - flow->deficit += cake_get_flow_quantum(b, flow, q->flow_mode); 2120 + flow->deficit += cake_get_flow_quantum(b, flow, q->config->flow_mode); 2126 2121 list_move_tail(&flow->flowchain, &b->old_flows); 2127 2122 2128 2123 goto retry; ··· 2146 2141 if (flow->set == CAKE_SET_BULK) { 2147 2142 b->bulk_flow_count--; 2148 2143 2149 - cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode); 2150 - cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode); 2144 + cake_dec_srchost_bulk_flow_count(b, flow, q->config->flow_mode); 2145 + cake_dec_dsthost_bulk_flow_count(b, flow, q->config->flow_mode); 2151 2146 2152 2147 b->decaying_flow_count++; 2153 2148 } else if (flow->set == CAKE_SET_SPARSE || ··· 2165 2160 else if (flow->set == CAKE_SET_BULK) { 2166 2161 b->bulk_flow_count--; 2167 2162 2168 - cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode); 2169 - cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode); 2163 + cake_dec_srchost_bulk_flow_count(b, flow, q->config->flow_mode); 2164 + cake_dec_dsthost_bulk_flow_count(b, flow, q->config->flow_mode); 2170 2165 } else 2171 2166 b->decaying_flow_count--; 2172 2167 ··· 2177 2172 2178 2173 reason = cobalt_should_drop(&flow->cvars, &b->cparams, now, skb, 2179 2174 (b->bulk_flow_count * 2180 - !!(q->rate_flags & 2175 + !!(q->config->rate_flags & 2181 2176 CAKE_FLAG_INGRESS))); 2182 2177 /* Last packet in queue may be marked, shouldn't be dropped */ 2183 2178 if (reason == SKB_NOT_DROPPED_YET || !flow->head) 2184 2179 break; 2185 2180 2186 2181 /* drop this packet, get another one */ 2187 - if (q->rate_flags & CAKE_FLAG_INGRESS) { 2182 + if (q->config->rate_flags & CAKE_FLAG_INGRESS) { 2188 2183 len = cake_advance_shaper(q, b, skb, 2189 2184 now, true); 2190 2185 flow->deficit -= len; ··· 2195 2190 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb)); 2196 2191 qdisc_qstats_drop(sch); 2197 2192 qdisc_dequeue_drop(sch, skb, reason); 2198 - if (q->rate_flags & CAKE_FLAG_INGRESS) 2193 + if (q->config->rate_flags & CAKE_FLAG_INGRESS) 2199 2194 goto retry; 2200 2195 } 2201 2196 ··· 2317 2312 struct cake_sched_data *q = qdisc_priv(sch); 2318 2313 struct cake_tin_data *b = &q->tins[0]; 2319 2314 u32 mtu = psched_mtu(qdisc_dev(sch)); 2320 - u64 rate = q->rate_bps; 2315 + u64 rate = q->config->rate_bps; 2321 2316 2322 2317 q->tin_cnt = 1; 2323 2318 ··· 2325 2320 q->tin_order = normal_order; 2326 2321 2327 2322 cake_set_rate(b, rate, mtu, 2328 - us_to_ns(q->target), us_to_ns(q->interval)); 2323 + us_to_ns(q->config->target), us_to_ns(q->config->interval)); 2329 2324 b->tin_quantum = 65535; 2330 2325 2331 2326 return 0; ··· 2336 2331 /* convert high-level (user visible) parameters into internal format */ 2337 2332 struct cake_sched_data *q = qdisc_priv(sch); 2338 2333 u32 mtu = psched_mtu(qdisc_dev(sch)); 2339 - u64 rate = q->rate_bps; 2334 + u64 rate = q->config->rate_bps; 2340 2335 u32 quantum = 256; 2341 2336 u32 i; 2342 2337 ··· 2347 2342 for (i = 0; i < q->tin_cnt; i++) { 2348 2343 struct cake_tin_data *b = &q->tins[i]; 2349 2344 2350 - cake_set_rate(b, rate, mtu, us_to_ns(q->target), 2351 - us_to_ns(q->interval)); 2345 + cake_set_rate(b, rate, mtu, us_to_ns(q->config->target), 2346 + us_to_ns(q->config->interval)); 2352 2347 2353 2348 b->tin_quantum = max_t(u16, 1U, quantum); 2354 2349 ··· 2425 2420 2426 2421 struct cake_sched_data *q = qdisc_priv(sch); 2427 2422 u32 mtu = psched_mtu(qdisc_dev(sch)); 2428 - u64 rate = q->rate_bps; 2423 + u64 rate = q->config->rate_bps; 2429 2424 u32 quantum = 256; 2430 2425 u32 i; 2431 2426 ··· 2439 2434 for (i = 0; i < q->tin_cnt; i++) { 2440 2435 struct cake_tin_data *b = &q->tins[i]; 2441 2436 2442 - cake_set_rate(b, rate, mtu, us_to_ns(q->target), 2443 - us_to_ns(q->interval)); 2437 + cake_set_rate(b, rate, mtu, us_to_ns(q->config->target), 2438 + us_to_ns(q->config->interval)); 2444 2439 2445 2440 b->tin_quantum = max_t(u16, 1U, quantum); 2446 2441 ··· 2469 2464 2470 2465 struct cake_sched_data *q = qdisc_priv(sch); 2471 2466 u32 mtu = psched_mtu(qdisc_dev(sch)); 2472 - u64 rate = q->rate_bps; 2467 + u64 rate = q->config->rate_bps; 2473 2468 u32 quantum = 1024; 2474 2469 2475 2470 q->tin_cnt = 4; ··· 2480 2475 2481 2476 /* class characteristics */ 2482 2477 cake_set_rate(&q->tins[0], rate, mtu, 2483 - us_to_ns(q->target), us_to_ns(q->interval)); 2478 + us_to_ns(q->config->target), us_to_ns(q->config->interval)); 2484 2479 cake_set_rate(&q->tins[1], rate >> 4, mtu, 2485 - us_to_ns(q->target), us_to_ns(q->interval)); 2480 + us_to_ns(q->config->target), us_to_ns(q->config->interval)); 2486 2481 cake_set_rate(&q->tins[2], rate >> 1, mtu, 2487 - us_to_ns(q->target), us_to_ns(q->interval)); 2482 + us_to_ns(q->config->target), us_to_ns(q->config->interval)); 2488 2483 cake_set_rate(&q->tins[3], rate >> 2, mtu, 2489 - us_to_ns(q->target), us_to_ns(q->interval)); 2484 + us_to_ns(q->config->target), us_to_ns(q->config->interval)); 2490 2485 2491 2486 /* bandwidth-sharing weights */ 2492 2487 q->tins[0].tin_quantum = quantum; ··· 2506 2501 */ 2507 2502 struct cake_sched_data *q = qdisc_priv(sch); 2508 2503 u32 mtu = psched_mtu(qdisc_dev(sch)); 2509 - u64 rate = q->rate_bps; 2504 + u64 rate = q->config->rate_bps; 2510 2505 u32 quantum = 1024; 2511 2506 2512 2507 q->tin_cnt = 3; ··· 2517 2512 2518 2513 /* class characteristics */ 2519 2514 cake_set_rate(&q->tins[0], rate, mtu, 2520 - us_to_ns(q->target), us_to_ns(q->interval)); 2515 + us_to_ns(q->config->target), us_to_ns(q->config->interval)); 2521 2516 cake_set_rate(&q->tins[1], rate >> 4, mtu, 2522 - us_to_ns(q->target), us_to_ns(q->interval)); 2517 + us_to_ns(q->config->target), us_to_ns(q->config->interval)); 2523 2518 cake_set_rate(&q->tins[2], rate >> 2, mtu, 2524 - us_to_ns(q->target), us_to_ns(q->interval)); 2519 + us_to_ns(q->config->target), us_to_ns(q->config->interval)); 2525 2520 2526 2521 /* bandwidth-sharing weights */ 2527 2522 q->tins[0].tin_quantum = quantum; ··· 2533 2528 2534 2529 static void cake_reconfigure(struct Qdisc *sch) 2535 2530 { 2536 - struct cake_sched_data *q = qdisc_priv(sch); 2531 + struct cake_sched_data *qd = qdisc_priv(sch); 2532 + struct cake_sched_config *q = qd->config; 2537 2533 int c, ft; 2538 2534 2539 2535 switch (q->tin_mode) { ··· 2560 2554 break; 2561 2555 } 2562 2556 2563 - for (c = q->tin_cnt; c < CAKE_MAX_TINS; c++) { 2557 + for (c = qd->tin_cnt; c < CAKE_MAX_TINS; c++) { 2564 2558 cake_clear_tin(sch, c); 2565 - q->tins[c].cparams.mtu_time = q->tins[ft].cparams.mtu_time; 2559 + qd->tins[c].cparams.mtu_time = qd->tins[ft].cparams.mtu_time; 2566 2560 } 2567 2561 2568 - q->rate_ns = q->tins[ft].tin_rate_ns; 2569 - q->rate_shft = q->tins[ft].tin_rate_shft; 2562 + qd->rate_ns = qd->tins[ft].tin_rate_ns; 2563 + qd->rate_shft = qd->tins[ft].tin_rate_shft; 2570 2564 2571 2565 if (q->buffer_config_limit) { 2572 - q->buffer_limit = q->buffer_config_limit; 2566 + qd->buffer_limit = q->buffer_config_limit; 2573 2567 } else if (q->rate_bps) { 2574 2568 u64 t = q->rate_bps * q->interval; 2575 2569 2576 2570 do_div(t, USEC_PER_SEC / 4); 2577 - q->buffer_limit = max_t(u32, t, 4U << 20); 2571 + qd->buffer_limit = max_t(u32, t, 4U << 20); 2578 2572 } else { 2579 - q->buffer_limit = ~0; 2573 + qd->buffer_limit = ~0; 2580 2574 } 2581 2575 2582 2576 sch->flags &= ~TCQ_F_CAN_BYPASS; 2583 2577 2584 - q->buffer_limit = min(q->buffer_limit, 2585 - max(sch->limit * psched_mtu(qdisc_dev(sch)), 2586 - q->buffer_config_limit)); 2578 + qd->buffer_limit = min(qd->buffer_limit, 2579 + max(sch->limit * psched_mtu(qdisc_dev(sch)), 2580 + q->buffer_config_limit)); 2587 2581 } 2588 2582 2589 2583 static int cake_change(struct Qdisc *sch, struct nlattr *opt, 2590 2584 struct netlink_ext_ack *extack) 2591 2585 { 2592 - struct cake_sched_data *q = qdisc_priv(sch); 2586 + struct cake_sched_data *qd = qdisc_priv(sch); 2587 + struct cake_sched_config *q = qd->config; 2593 2588 struct nlattr *tb[TCA_CAKE_MAX + 1]; 2594 2589 u16 rate_flags; 2595 2590 u8 flow_mode; ··· 2644 2637 nla_get_s32(tb[TCA_CAKE_OVERHEAD])); 2645 2638 rate_flags |= CAKE_FLAG_OVERHEAD; 2646 2639 2647 - q->max_netlen = 0; 2648 - q->max_adjlen = 0; 2649 - q->min_netlen = ~0; 2650 - q->min_adjlen = ~0; 2640 + qd->max_netlen = 0; 2641 + qd->max_adjlen = 0; 2642 + qd->min_netlen = ~0; 2643 + qd->min_adjlen = ~0; 2651 2644 } 2652 2645 2653 2646 if (tb[TCA_CAKE_RAW]) { 2654 2647 rate_flags &= ~CAKE_FLAG_OVERHEAD; 2655 2648 2656 - q->max_netlen = 0; 2657 - q->max_adjlen = 0; 2658 - q->min_netlen = ~0; 2659 - q->min_adjlen = ~0; 2649 + qd->max_netlen = 0; 2650 + qd->max_adjlen = 0; 2651 + qd->min_netlen = ~0; 2652 + qd->min_adjlen = ~0; 2660 2653 } 2661 2654 2662 2655 if (tb[TCA_CAKE_MPU]) ··· 2712 2705 2713 2706 WRITE_ONCE(q->rate_flags, rate_flags); 2714 2707 WRITE_ONCE(q->flow_mode, flow_mode); 2715 - if (q->tins) { 2708 + if (qd->tins) { 2716 2709 sch_tree_lock(sch); 2717 2710 cake_reconfigure(sch); 2718 2711 sch_tree_unlock(sch); ··· 2728 2721 qdisc_watchdog_cancel(&q->watchdog); 2729 2722 tcf_block_put(q->block); 2730 2723 kvfree(q->tins); 2724 + kvfree(q->config); 2731 2725 } 2732 2726 2733 2727 static int cake_init(struct Qdisc *sch, struct nlattr *opt, 2734 2728 struct netlink_ext_ack *extack) 2735 2729 { 2736 - struct cake_sched_data *q = qdisc_priv(sch); 2730 + struct cake_sched_data *qd = qdisc_priv(sch); 2731 + struct cake_sched_config *q; 2737 2732 int i, j, err; 2733 + 2734 + q = kzalloc(sizeof(*q), GFP_KERNEL); 2735 + if (!q) 2736 + return -ENOMEM; 2738 2737 2739 2738 sch->limit = 10240; 2740 2739 sch->flags |= TCQ_F_DEQUEUE_DROPS; ··· 2755 2742 * for 5 to 10% of interval 2756 2743 */ 2757 2744 q->rate_flags |= CAKE_FLAG_SPLIT_GSO; 2758 - q->cur_tin = 0; 2759 - q->cur_flow = 0; 2745 + qd->cur_tin = 0; 2746 + qd->cur_flow = 0; 2747 + qd->config = q; 2760 2748 2761 - qdisc_watchdog_init(&q->watchdog, sch); 2749 + qdisc_watchdog_init(&qd->watchdog, sch); 2762 2750 2763 2751 if (opt) { 2764 2752 err = cake_change(sch, opt, extack); 2765 2753 2766 2754 if (err) 2767 - return err; 2755 + goto err; 2768 2756 } 2769 2757 2770 - err = tcf_block_get(&q->block, &q->filter_list, sch, extack); 2758 + err = tcf_block_get(&qd->block, &qd->filter_list, sch, extack); 2771 2759 if (err) 2772 - return err; 2760 + goto err; 2773 2761 2774 2762 quantum_div[0] = ~0; 2775 2763 for (i = 1; i <= CAKE_QUEUES; i++) 2776 2764 quantum_div[i] = 65535 / i; 2777 2765 2778 - q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data), 2779 - GFP_KERNEL); 2780 - if (!q->tins) 2781 - return -ENOMEM; 2766 + qd->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data), 2767 + GFP_KERNEL); 2768 + if (!qd->tins) { 2769 + err = -ENOMEM; 2770 + goto err; 2771 + } 2782 2772 2783 2773 for (i = 0; i < CAKE_MAX_TINS; i++) { 2784 - struct cake_tin_data *b = q->tins + i; 2774 + struct cake_tin_data *b = qd->tins + i; 2785 2775 2786 2776 INIT_LIST_HEAD(&b->new_flows); 2787 2777 INIT_LIST_HEAD(&b->old_flows); ··· 2800 2784 INIT_LIST_HEAD(&flow->flowchain); 2801 2785 cobalt_vars_init(&flow->cvars); 2802 2786 2803 - q->overflow_heap[k].t = i; 2804 - q->overflow_heap[k].b = j; 2787 + qd->overflow_heap[k].t = i; 2788 + qd->overflow_heap[k].b = j; 2805 2789 b->overflow_idx[j] = k; 2806 2790 } 2807 2791 } 2808 2792 2809 2793 cake_reconfigure(sch); 2810 - q->avg_peak_bandwidth = q->rate_bps; 2811 - q->min_netlen = ~0; 2812 - q->min_adjlen = ~0; 2794 + qd->avg_peak_bandwidth = q->rate_bps; 2795 + qd->min_netlen = ~0; 2796 + qd->min_adjlen = ~0; 2813 2797 return 0; 2798 + err: 2799 + kvfree(qd->config); 2800 + qd->config = NULL; 2801 + return err; 2814 2802 } 2815 2803 2816 2804 static int cake_dump(struct Qdisc *sch, struct sk_buff *skb) 2817 2805 { 2818 - struct cake_sched_data *q = qdisc_priv(sch); 2806 + struct cake_sched_data *qd = qdisc_priv(sch); 2807 + struct cake_sched_config *q = qd->config; 2819 2808 struct nlattr *opts; 2820 2809 u16 rate_flags; 2821 2810 u8 flow_mode;