Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net/sched: sch_cake: Share config across cake_mq sub-qdiscs

This adds support for configuring the cake_mq instance directly, sharing
the config across the cake sub-qdiscs.

Signed-off-by: Toke Høiland-Jørgensen <toke@redhat.com>
Link: https://patch.msgid.link/20260109-mq-cake-sub-qdisc-v8-4-8d613fece5d8@redhat.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>

authored by

Toke Høiland-Jørgensen and committed by
Paolo Abeni
87826c01 ebc65a87

+133 -40
+133 -40
net/sched/sch_cake.c
··· 212 212 u8 flow_mode; 213 213 u8 atm_mode; 214 214 u8 ack_filter; 215 + u8 is_shared; 215 216 }; 216 217 217 218 struct cake_sched_data { ··· 2588 2587 q->buffer_config_limit)); 2589 2588 } 2590 2589 2591 - static int cake_change(struct Qdisc *sch, struct nlattr *opt, 2592 - struct netlink_ext_ack *extack) 2590 + static int cake_config_change(struct cake_sched_config *q, struct nlattr *opt, 2591 + struct netlink_ext_ack *extack, bool *overhead_changed) 2593 2592 { 2594 - struct cake_sched_data *qd = qdisc_priv(sch); 2595 - struct cake_sched_config *q = qd->config; 2596 2593 struct nlattr *tb[TCA_CAKE_MAX + 1]; 2597 - u16 rate_flags; 2598 - u8 flow_mode; 2594 + u16 rate_flags = q->rate_flags; 2595 + u8 flow_mode = q->flow_mode; 2599 2596 int err; 2600 2597 2601 2598 err = nla_parse_nested_deprecated(tb, TCA_CAKE_MAX, opt, cake_policy, ··· 2601 2602 if (err < 0) 2602 2603 return err; 2603 2604 2604 - flow_mode = q->flow_mode; 2605 2605 if (tb[TCA_CAKE_NAT]) { 2606 2606 #if IS_ENABLED(CONFIG_NF_CONNTRACK) 2607 2607 flow_mode &= ~CAKE_FLOW_NAT_FLAG; ··· 2613 2615 #endif 2614 2616 } 2615 2617 2618 + if (tb[TCA_CAKE_AUTORATE]) { 2619 + if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE])) { 2620 + if (q->is_shared) { 2621 + NL_SET_ERR_MSG_ATTR(extack, tb[TCA_CAKE_AUTORATE], 2622 + "Can't use autorate-ingress with cake_mq"); 2623 + return -EOPNOTSUPP; 2624 + } 2625 + rate_flags |= CAKE_FLAG_AUTORATE_INGRESS; 2626 + } else { 2627 + rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS; 2628 + } 2629 + } 2630 + 2616 2631 if (tb[TCA_CAKE_BASE_RATE64]) 2617 2632 WRITE_ONCE(q->rate_bps, 2618 2633 nla_get_u64(tb[TCA_CAKE_BASE_RATE64])); ··· 2634 2623 WRITE_ONCE(q->tin_mode, 2635 2624 nla_get_u32(tb[TCA_CAKE_DIFFSERV_MODE])); 2636 2625 2637 - rate_flags = q->rate_flags; 2638 2626 if (tb[TCA_CAKE_WASH]) { 2639 2627 if (!!nla_get_u32(tb[TCA_CAKE_WASH])) 2640 2628 rate_flags |= CAKE_FLAG_WASH; ··· 2654 2644 WRITE_ONCE(q->rate_overhead, 2655 2645 nla_get_s32(tb[TCA_CAKE_OVERHEAD])); 2656 2646 rate_flags |= CAKE_FLAG_OVERHEAD; 2657 - 2658 - qd->max_netlen = 0; 2659 - qd->max_adjlen = 0; 2660 - qd->min_netlen = ~0; 2661 - qd->min_adjlen = ~0; 2647 + *overhead_changed = true; 2662 2648 } 2663 2649 2664 2650 if (tb[TCA_CAKE_RAW]) { 2665 2651 rate_flags &= ~CAKE_FLAG_OVERHEAD; 2666 - 2667 - qd->max_netlen = 0; 2668 - qd->max_adjlen = 0; 2669 - qd->min_netlen = ~0; 2670 - qd->min_adjlen = ~0; 2652 + *overhead_changed = true; 2671 2653 } 2672 2654 2673 2655 if (tb[TCA_CAKE_MPU]) ··· 2676 2674 u32 target = nla_get_u32(tb[TCA_CAKE_TARGET]); 2677 2675 2678 2676 WRITE_ONCE(q->target, max(target, 1U)); 2679 - } 2680 - 2681 - if (tb[TCA_CAKE_AUTORATE]) { 2682 - if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE])) 2683 - rate_flags |= CAKE_FLAG_AUTORATE_INGRESS; 2684 - else 2685 - rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS; 2686 2677 } 2687 2678 2688 2679 if (tb[TCA_CAKE_INGRESS]) { ··· 2708 2713 2709 2714 WRITE_ONCE(q->rate_flags, rate_flags); 2710 2715 WRITE_ONCE(q->flow_mode, flow_mode); 2716 + 2717 + return 0; 2718 + } 2719 + 2720 + static int cake_change(struct Qdisc *sch, struct nlattr *opt, 2721 + struct netlink_ext_ack *extack) 2722 + { 2723 + struct cake_sched_data *qd = qdisc_priv(sch); 2724 + struct cake_sched_config *q = qd->config; 2725 + bool overhead_changed = false; 2726 + int ret; 2727 + 2728 + if (q->is_shared) { 2729 + NL_SET_ERR_MSG(extack, "can't reconfigure cake_mq sub-qdiscs"); 2730 + return -EOPNOTSUPP; 2731 + } 2732 + 2733 + ret = cake_config_change(q, opt, extack, &overhead_changed); 2734 + if (ret) 2735 + return ret; 2736 + 2737 + if (overhead_changed) { 2738 + qd->max_netlen = 0; 2739 + qd->max_adjlen = 0; 2740 + qd->min_netlen = ~0; 2741 + qd->min_adjlen = ~0; 2742 + } 2743 + 2711 2744 if (qd->tins) { 2712 2745 sch_tree_lock(sch); 2713 2746 cake_reconfigure(sch); ··· 2752 2729 qdisc_watchdog_cancel(&q->watchdog); 2753 2730 tcf_block_put(q->block); 2754 2731 kvfree(q->tins); 2755 - kvfree(q->config); 2732 + if (q->config && !q->config->is_shared) 2733 + kvfree(q->config); 2734 + } 2735 + 2736 + static void cake_config_init(struct cake_sched_config *q, bool is_shared) 2737 + { 2738 + q->tin_mode = CAKE_DIFFSERV_DIFFSERV3; 2739 + q->flow_mode = CAKE_FLOW_TRIPLE; 2740 + 2741 + q->rate_bps = 0; /* unlimited by default */ 2742 + 2743 + q->interval = 100000; /* 100ms default */ 2744 + q->target = 5000; /* 5ms: codel RFC argues 2745 + * for 5 to 10% of interval 2746 + */ 2747 + q->rate_flags |= CAKE_FLAG_SPLIT_GSO; 2748 + q->is_shared = is_shared; 2756 2749 } 2757 2750 2758 2751 static int cake_init(struct Qdisc *sch, struct nlattr *opt, ··· 2782 2743 if (!q) 2783 2744 return -ENOMEM; 2784 2745 2746 + cake_config_init(q, false); 2747 + 2785 2748 sch->limit = 10240; 2786 2749 sch->flags |= TCQ_F_DEQUEUE_DROPS; 2787 2750 2788 - q->tin_mode = CAKE_DIFFSERV_DIFFSERV3; 2789 - q->flow_mode = CAKE_FLOW_TRIPLE; 2790 - 2791 - q->rate_bps = 0; /* unlimited by default */ 2792 - 2793 - q->interval = 100000; /* 100ms default */ 2794 - q->target = 5000; /* 5ms: codel RFC argues 2795 - * for 5 to 10% of interval 2796 - */ 2797 - q->rate_flags |= CAKE_FLAG_SPLIT_GSO; 2798 2751 qd->cur_tin = 0; 2799 2752 qd->cur_flow = 0; 2800 2753 qd->config = q; ··· 2849 2818 return err; 2850 2819 } 2851 2820 2852 - static int cake_dump(struct Qdisc *sch, struct sk_buff *skb) 2821 + static void cake_config_replace(struct Qdisc *sch, struct cake_sched_config *cfg) 2853 2822 { 2854 2823 struct cake_sched_data *qd = qdisc_priv(sch); 2855 2824 struct cake_sched_config *q = qd->config; 2825 + 2826 + qd->config = cfg; 2827 + 2828 + if (!q->is_shared) 2829 + kvfree(q); 2830 + 2831 + cake_reconfigure(sch); 2832 + } 2833 + 2834 + static int cake_config_dump(struct cake_sched_config *q, struct sk_buff *skb) 2835 + { 2856 2836 struct nlattr *opts; 2857 2837 u16 rate_flags; 2858 2838 u8 flow_mode; ··· 2937 2895 2938 2896 nla_put_failure: 2939 2897 return -1; 2898 + } 2899 + 2900 + static int cake_dump(struct Qdisc *sch, struct sk_buff *skb) 2901 + { 2902 + struct cake_sched_data *qd = qdisc_priv(sch); 2903 + 2904 + return cake_config_dump(qd->config, skb); 2940 2905 } 2941 2906 2942 2907 static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d) ··· 3209 3160 3210 3161 struct cake_mq_sched { 3211 3162 struct mq_sched mq_priv; /* must be first */ 3163 + struct cake_sched_config cake_config; 3212 3164 }; 3213 3165 3214 3166 static void cake_mq_destroy(struct Qdisc *sch) ··· 3220 3170 static int cake_mq_init(struct Qdisc *sch, struct nlattr *opt, 3221 3171 struct netlink_ext_ack *extack) 3222 3172 { 3223 - int ret; 3173 + struct cake_mq_sched *priv = qdisc_priv(sch); 3174 + struct net_device *dev = qdisc_dev(sch); 3175 + int ret, ntx; 3176 + bool _unused; 3177 + 3178 + cake_config_init(&priv->cake_config, true); 3179 + if (opt) { 3180 + ret = cake_config_change(&priv->cake_config, opt, extack, &_unused); 3181 + if (ret) 3182 + return ret; 3183 + } 3224 3184 3225 3185 ret = mq_init_common(sch, opt, extack, &cake_qdisc_ops); 3226 3186 if (ret) 3227 3187 return ret; 3188 + 3189 + for (ntx = 0; ntx < dev->num_tx_queues; ntx++) 3190 + cake_config_replace(priv->mq_priv.qdiscs[ntx], &priv->cake_config); 3228 3191 3229 3192 return 0; 3230 3193 } 3231 3194 3232 3195 static int cake_mq_dump(struct Qdisc *sch, struct sk_buff *skb) 3233 3196 { 3197 + struct cake_mq_sched *priv = qdisc_priv(sch); 3198 + 3234 3199 mq_dump_common(sch, skb); 3235 - return 0; 3200 + return cake_config_dump(&priv->cake_config, skb); 3236 3201 } 3237 3202 3238 3203 static int cake_mq_change(struct Qdisc *sch, struct nlattr *opt, 3239 3204 struct netlink_ext_ack *extack) 3240 3205 { 3241 - return -EOPNOTSUPP; 3206 + struct cake_mq_sched *priv = qdisc_priv(sch); 3207 + struct net_device *dev = qdisc_dev(sch); 3208 + bool overhead_changed = false; 3209 + unsigned int ntx; 3210 + int ret; 3211 + 3212 + ret = cake_config_change(&priv->cake_config, opt, extack, &overhead_changed); 3213 + if (ret) 3214 + return ret; 3215 + 3216 + for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { 3217 + struct Qdisc *chld = rtnl_dereference(netdev_get_tx_queue(dev, ntx)->qdisc_sleeping); 3218 + struct cake_sched_data *qd = qdisc_priv(chld); 3219 + 3220 + if (overhead_changed) { 3221 + qd->max_netlen = 0; 3222 + qd->max_adjlen = 0; 3223 + qd->min_netlen = ~0; 3224 + qd->min_adjlen = ~0; 3225 + } 3226 + 3227 + if (qd->tins) { 3228 + sch_tree_lock(chld); 3229 + cake_reconfigure(chld); 3230 + sch_tree_unlock(chld); 3231 + } 3232 + } 3233 + 3234 + return 0; 3242 3235 } 3243 3236 3244 3237 static int cake_mq_graft(struct Qdisc *sch, unsigned long cl, struct Qdisc *new,