Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'cxgb4-improve-and-tune-TC-MQPRIO-offload'

Rahul Lakkireddy says:

====================
cxgb4: improve and tune TC-MQPRIO offload

Patch 1 improves the Tx path's credit request and recovery mechanism
when running under heavy load.

Patch 2 adds ability to tune the burst buffer sizes of all traffic
classes to improve performance for <= 1500 MTU, under heavy load.

Patch 3 adds support to track EOTIDs and dump software queue
contexts used by TC-MQPRIO offload.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+190 -61
+16 -14
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
··· 1125 1125 * programmed with various parameters. 1126 1126 */ 1127 1127 struct ch_sched_params { 1128 - s8 type; /* packet or flow */ 1128 + u8 type; /* packet or flow */ 1129 1129 union { 1130 1130 struct { 1131 - s8 level; /* scheduler hierarchy level */ 1132 - s8 mode; /* per-class or per-flow */ 1133 - s8 rateunit; /* bit or packet rate */ 1134 - s8 ratemode; /* %port relative or kbps absolute */ 1135 - s8 channel; /* scheduler channel [0..N] */ 1136 - s8 class; /* scheduler class [0..N] */ 1137 - s32 minrate; /* minimum rate */ 1138 - s32 maxrate; /* maximum rate */ 1139 - s16 weight; /* percent weight */ 1140 - s16 pktsize; /* average packet size */ 1131 + u8 level; /* scheduler hierarchy level */ 1132 + u8 mode; /* per-class or per-flow */ 1133 + u8 rateunit; /* bit or packet rate */ 1134 + u8 ratemode; /* %port relative or kbps absolute */ 1135 + u8 channel; /* scheduler channel [0..N] */ 1136 + u8 class; /* scheduler class [0..N] */ 1137 + u32 minrate; /* minimum rate */ 1138 + u32 maxrate; /* maximum rate */ 1139 + u16 weight; /* percent weight */ 1140 + u16 pktsize; /* average packet size */ 1141 + u16 burstsize; /* burst buffer size */ 1141 1142 } params; 1142 1143 } u; 1143 1144 }; ··· 1953 1952 enum ctxt_type ctype, u32 *data); 1954 1953 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid, 1955 1954 enum ctxt_type ctype, u32 *data); 1956 - int t4_sched_params(struct adapter *adapter, int type, int level, int mode, 1957 - int rateunit, int ratemode, int channel, int class, 1958 - int minrate, int maxrate, int weight, int pktsize); 1955 + int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode, 1956 + u8 rateunit, u8 ratemode, u8 channel, u8 class, 1957 + u32 minrate, u32 maxrate, u16 weight, u16 pktsize, 1958 + u16 burstsize); 1959 1959 void t4_sge_decode_idma_state(struct adapter *adapter, int state); 1960 1960 void t4_idma_monitor_init(struct adapter *adapter, 1961 1961 struct sge_idma_monitor_state *idma);
+116 -28
drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
··· 49 49 #include "cudbg_lib_common.h" 50 50 #include "cudbg_entity.h" 51 51 #include "cudbg_lib.h" 52 + #include "cxgb4_tc_mqprio.h" 52 53 53 54 /* generic seq_file support for showing a table of size rows x width. */ 54 55 static void *seq_tab_get_idx(struct seq_tab *tb, loff_t pos) ··· 2658 2657 2659 2658 static int sge_qinfo_show(struct seq_file *seq, void *v) 2660 2659 { 2661 - int eth_entries, ctrl_entries, eo_entries = 0; 2660 + int eth_entries, ctrl_entries, eohw_entries = 0, eosw_entries = 0; 2662 2661 int uld_rxq_entries[CXGB4_ULD_MAX] = { 0 }; 2663 2662 int uld_ciq_entries[CXGB4_ULD_MAX] = { 0 }; 2664 2663 int uld_txq_entries[CXGB4_TX_MAX] = { 0 }; 2665 2664 const struct sge_uld_txq_info *utxq_info; 2666 2665 const struct sge_uld_rxq_info *urxq_info; 2666 + struct cxgb4_tc_port_mqprio *port_mqprio; 2667 2667 struct adapter *adap = seq->private; 2668 - int i, n, r = (uintptr_t)v - 1; 2668 + int i, j, n, r = (uintptr_t)v - 1; 2669 2669 struct sge *s = &adap->sge; 2670 2670 2671 2671 eth_entries = DIV_ROUND_UP(adap->sge.ethqsets, 4); 2672 2672 ctrl_entries = DIV_ROUND_UP(MAX_CTRL_QUEUES, 4); 2673 - if (adap->sge.eohw_txq) 2674 - eo_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4); 2675 - 2676 - mutex_lock(&uld_mutex); 2677 - if (s->uld_txq_info) 2678 - for (i = 0; i < ARRAY_SIZE(uld_txq_entries); i++) 2679 - uld_txq_entries[i] = sge_qinfo_uld_txq_entries(adap, i); 2680 - 2681 - if (s->uld_rxq_info) { 2682 - for (i = 0; i < ARRAY_SIZE(uld_rxq_entries); i++) { 2683 - uld_rxq_entries[i] = sge_qinfo_uld_rxq_entries(adap, i); 2684 - uld_ciq_entries[i] = sge_qinfo_uld_ciq_entries(adap, i); 2685 - } 2686 - } 2687 2673 2688 2674 if (r) 2689 2675 seq_putc(seq, '\n'); ··· 2747 2759 RL("FLLow:", fl.low); 2748 2760 RL("FLStarving:", fl.starving); 2749 2761 2750 - goto unlock; 2762 + goto out; 2751 2763 } 2752 2764 2753 2765 r -= eth_entries; 2754 - if (r < eo_entries) { 2766 + if (!adap->tc_mqprio) 2767 + goto skip_mqprio; 2768 + 2769 + mutex_lock(&adap->tc_mqprio->mqprio_mutex); 2770 + if (!refcount_read(&adap->tc_mqprio->refcnt)) { 2771 + mutex_unlock(&adap->tc_mqprio->mqprio_mutex); 2772 + goto skip_mqprio; 2773 + } 2774 + 2775 + eohw_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4); 2776 + if (r < eohw_entries) { 2755 2777 int base_qset = r * 4; 2756 2778 const struct sge_ofld_rxq *rx = &s->eohw_rxq[base_qset]; 2757 2779 const struct sge_eohw_txq *tx = &s->eohw_txq[base_qset]; ··· 2806 2808 RL("FLLow:", fl.low); 2807 2809 RL("FLStarving:", fl.starving); 2808 2810 2809 - goto unlock; 2811 + mutex_unlock(&adap->tc_mqprio->mqprio_mutex); 2812 + goto out; 2810 2813 } 2811 2814 2812 - r -= eo_entries; 2815 + r -= eohw_entries; 2816 + for (j = 0; j < adap->params.nports; j++) { 2817 + int entries; 2818 + u8 tc; 2819 + 2820 + port_mqprio = &adap->tc_mqprio->port_mqprio[j]; 2821 + entries = 0; 2822 + for (tc = 0; tc < port_mqprio->mqprio.qopt.num_tc; tc++) 2823 + entries += port_mqprio->mqprio.qopt.count[tc]; 2824 + 2825 + if (!entries) 2826 + continue; 2827 + 2828 + eosw_entries = DIV_ROUND_UP(entries, 4); 2829 + if (r < eosw_entries) { 2830 + const struct sge_eosw_txq *tx; 2831 + 2832 + n = min(4, entries - 4 * r); 2833 + tx = &port_mqprio->eosw_txq[4 * r]; 2834 + 2835 + S("QType:", "EOSW-TXQ"); 2836 + S("Interface:", 2837 + adap->port[j] ? adap->port[j]->name : "N/A"); 2838 + T("EOTID:", hwtid); 2839 + T("HWQID:", hwqid); 2840 + T("State:", state); 2841 + T("Size:", ndesc); 2842 + T("In-Use:", inuse); 2843 + T("Credits:", cred); 2844 + T("Compl:", ncompl); 2845 + T("Last-Compl:", last_compl); 2846 + T("PIDX:", pidx); 2847 + T("Last-PIDX:", last_pidx); 2848 + T("CIDX:", cidx); 2849 + T("Last-CIDX:", last_cidx); 2850 + T("FLOWC-IDX:", flowc_idx); 2851 + 2852 + mutex_unlock(&adap->tc_mqprio->mqprio_mutex); 2853 + goto out; 2854 + } 2855 + 2856 + r -= eosw_entries; 2857 + } 2858 + mutex_unlock(&adap->tc_mqprio->mqprio_mutex); 2859 + 2860 + skip_mqprio: 2861 + if (!is_uld(adap)) 2862 + goto skip_uld; 2863 + 2864 + mutex_lock(&uld_mutex); 2865 + if (s->uld_txq_info) 2866 + for (i = 0; i < ARRAY_SIZE(uld_txq_entries); i++) 2867 + uld_txq_entries[i] = sge_qinfo_uld_txq_entries(adap, i); 2868 + 2869 + if (s->uld_rxq_info) { 2870 + for (i = 0; i < ARRAY_SIZE(uld_rxq_entries); i++) { 2871 + uld_rxq_entries[i] = sge_qinfo_uld_rxq_entries(adap, i); 2872 + uld_ciq_entries[i] = sge_qinfo_uld_ciq_entries(adap, i); 2873 + } 2874 + } 2875 + 2813 2876 if (r < uld_txq_entries[CXGB4_TX_OFLD]) { 2814 2877 const struct sge_uld_txq *tx; 2815 2878 ··· 3053 2994 } 3054 2995 3055 2996 r -= uld_txq_entries[CXGB4_TX_CRYPTO]; 2997 + mutex_unlock(&uld_mutex); 2998 + 2999 + skip_uld: 3056 3000 if (r < ctrl_entries) { 3057 3001 const struct sge_ctrl_txq *tx = &s->ctrlq[r * 4]; 3058 3002 ··· 3070 3008 TL("TxQFull:", q.stops); 3071 3009 TL("TxQRestarts:", q.restarts); 3072 3010 3073 - goto unlock; 3011 + goto out; 3074 3012 } 3075 3013 3076 3014 r -= ctrl_entries; ··· 3088 3026 seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:", 3089 3027 s->counter_val[evtq->pktcnt_idx]); 3090 3028 3091 - goto unlock; 3029 + goto out; 3092 3030 } 3093 3031 3094 - unlock: 3095 - mutex_unlock(&uld_mutex); 3096 3032 #undef R 3097 3033 #undef RL 3098 3034 #undef T ··· 3099 3039 #undef R3 3100 3040 #undef T3 3101 3041 #undef S3 3042 + out: 3043 + return 0; 3044 + 3045 + unlock: 3046 + mutex_unlock(&uld_mutex); 3102 3047 return 0; 3103 3048 } 3104 3049 3105 3050 static int sge_queue_entries(const struct adapter *adap) 3106 3051 { 3107 - int tot_uld_entries = 0; 3108 - int i; 3052 + int i, tot_uld_entries = 0, eohw_entries = 0, eosw_entries = 0; 3053 + 3054 + if (adap->tc_mqprio) { 3055 + struct cxgb4_tc_port_mqprio *port_mqprio; 3056 + u8 tc; 3057 + 3058 + mutex_lock(&adap->tc_mqprio->mqprio_mutex); 3059 + if (adap->sge.eohw_txq) 3060 + eohw_entries = DIV_ROUND_UP(adap->sge.eoqsets, 4); 3061 + 3062 + for (i = 0; i < adap->params.nports; i++) { 3063 + u32 entries = 0; 3064 + 3065 + port_mqprio = &adap->tc_mqprio->port_mqprio[i]; 3066 + for (tc = 0; tc < port_mqprio->mqprio.qopt.num_tc; tc++) 3067 + entries += port_mqprio->mqprio.qopt.count[tc]; 3068 + 3069 + if (entries) 3070 + eosw_entries += DIV_ROUND_UP(entries, 4); 3071 + } 3072 + mutex_unlock(&adap->tc_mqprio->mqprio_mutex); 3073 + } 3109 3074 3110 3075 if (!is_uld(adap)) 3111 3076 goto lld_only; ··· 3147 3062 3148 3063 lld_only: 3149 3064 return DIV_ROUND_UP(adap->sge.ethqsets, 4) + 3150 - (adap->sge.eohw_txq ? DIV_ROUND_UP(adap->sge.eoqsets, 4) : 0) + 3151 - tot_uld_entries + 3065 + eohw_entries + eosw_entries + tot_uld_entries + 3152 3066 DIV_ROUND_UP(MAX_CTRL_QUEUES, 4) + 1; 3153 3067 } 3154 3068 ··· 3328 3244 if (t->nhpftids) 3329 3245 seq_printf(seq, "HPFTID range: %u..%u\n", t->hpftid_base, 3330 3246 t->hpftid_base + t->nhpftids - 1); 3247 + if (t->neotids) 3248 + seq_printf(seq, "EOTID range: %u..%u, in use: %u\n", 3249 + t->eotid_base, t->eotid_base + t->neotids - 1, 3250 + atomic_read(&t->eotids_in_use)); 3331 3251 if (t->ntids) 3332 3252 seq_printf(seq, "HW TID usage: %u IP users, %u IPv6 users\n", 3333 3253 t4_read_reg(adap, LE_DB_ACT_CNT_IPV4_A),
+2 -1
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
··· 1579 1579 atomic_set(&t->tids_in_use, 0); 1580 1580 atomic_set(&t->conns_in_use, 0); 1581 1581 atomic_set(&t->hash_tids_in_use, 0); 1582 + atomic_set(&t->eotids_in_use, 0); 1582 1583 1583 1584 /* Setup the free list for atid_tab and clear the stid bitmap. */ 1584 1585 if (natids) { ··· 3022 3021 SCHED_CLASS_RATEUNIT_BITS, 3023 3022 SCHED_CLASS_RATEMODE_ABS, 3024 3023 pi->tx_chan, class_id, 0, 3025 - max_tx_rate * 1000, 0, pktsize); 3024 + max_tx_rate * 1000, 0, pktsize, 0); 3026 3025 if (ret) { 3027 3026 dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n", 3028 3027 ret);
+17
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.c
··· 342 342 p.u.params.minrate = div_u64(mqprio->min_rate[i] * 8, 1000); 343 343 p.u.params.maxrate = div_u64(mqprio->max_rate[i] * 8, 1000); 344 344 345 + /* Request larger burst buffer for smaller MTU, so 346 + * that hardware can work on more data per burst 347 + * cycle. 348 + */ 349 + if (dev->mtu <= ETH_DATA_LEN) 350 + p.u.params.burstsize = 8 * dev->mtu; 351 + 345 352 e = cxgb4_sched_class_alloc(dev, &p); 346 353 if (!e) { 347 354 ret = -ENOMEM; ··· 574 567 int cxgb4_setup_tc_mqprio(struct net_device *dev, 575 568 struct tc_mqprio_qopt_offload *mqprio) 576 569 { 570 + struct adapter *adap = netdev2adap(dev); 577 571 bool needs_bring_up = false; 578 572 int ret; 579 573 580 574 ret = cxgb4_mqprio_validate(dev, mqprio); 581 575 if (ret) 582 576 return ret; 577 + 578 + mutex_lock(&adap->tc_mqprio->mqprio_mutex); 583 579 584 580 /* To configure tc params, the current allocated EOTIDs must 585 581 * be freed up. However, they can't be freed up if there's ··· 619 609 if (needs_bring_up) 620 610 cxgb_open(dev); 621 611 612 + mutex_unlock(&adap->tc_mqprio->mqprio_mutex); 622 613 return ret; 623 614 } 624 615 ··· 632 621 if (!adap->tc_mqprio || !adap->tc_mqprio->port_mqprio) 633 622 return; 634 623 624 + mutex_lock(&adap->tc_mqprio->mqprio_mutex); 635 625 for_each_port(adap, i) { 636 626 dev = adap->port[i]; 637 627 if (!dev) ··· 644 632 645 633 cxgb4_mqprio_disable_offload(dev); 646 634 } 635 + mutex_unlock(&adap->tc_mqprio->mqprio_mutex); 647 636 } 648 637 649 638 int cxgb4_init_tc_mqprio(struct adapter *adap) ··· 665 652 ret = -ENOMEM; 666 653 goto out_free_mqprio; 667 654 } 655 + 656 + mutex_init(&tc_mqprio->mqprio_mutex); 668 657 669 658 tc_mqprio->port_mqprio = tc_port_mqprio; 670 659 for (i = 0; i < adap->params.nports; i++) { ··· 702 687 u8 i; 703 688 704 689 if (adap->tc_mqprio) { 690 + mutex_lock(&adap->tc_mqprio->mqprio_mutex); 705 691 if (adap->tc_mqprio->port_mqprio) { 706 692 for (i = 0; i < adap->params.nports; i++) { 707 693 struct net_device *dev = adap->port[i]; ··· 714 698 } 715 699 kfree(adap->tc_mqprio->port_mqprio); 716 700 } 701 + mutex_unlock(&adap->tc_mqprio->mqprio_mutex); 717 702 kfree(adap->tc_mqprio); 718 703 } 719 704 }
+1
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_mqprio.h
··· 33 33 34 34 struct cxgb4_tc_mqprio { 35 35 refcount_t refcnt; /* Refcount for adapter-wide resources */ 36 + struct mutex mqprio_mutex; /* Lock for accessing MQPRIO info */ 36 37 struct cxgb4_tc_port_mqprio *port_mqprio; /* Per port MQPRIO info */ 37 38 }; 38 39
+5
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
··· 147 147 /* TIDs in the HASH */ 148 148 atomic_t hash_tids_in_use; 149 149 atomic_t conns_in_use; 150 + /* ETHOFLD TIDs used for rate limiting */ 151 + atomic_t eotids_in_use; 152 + 150 153 /* lock for setting/clearing filter bitmap */ 151 154 spinlock_t ftid_lock; 152 155 ··· 224 221 { 225 222 set_bit(eotid, t->eotid_bmap); 226 223 t->eotid_tab[eotid].data = data; 224 + atomic_inc(&t->eotids_in_use); 227 225 } 228 226 229 227 static inline void cxgb4_free_eotid(struct tid_info *t, u32 eotid) 230 228 { 231 229 clear_bit(eotid, t->eotid_bmap); 232 230 t->eotid_tab[eotid].data = NULL; 231 + atomic_dec(&t->eotids_in_use); 233 232 } 234 233 235 234 int cxgb4_alloc_atid(struct tid_info *t, void *data);
+2 -1
drivers/net/ethernet/chelsio/cxgb4/sched.c
··· 57 57 p->u.params.ratemode, 58 58 p->u.params.channel, e->idx, 59 59 p->u.params.minrate, p->u.params.maxrate, 60 - p->u.params.weight, p->u.params.pktsize); 60 + p->u.params.weight, p->u.params.pktsize, 61 + p->u.params.burstsize); 61 62 break; 62 63 default: 63 64 err = -ENOTSUPP;
+26 -14
drivers/net/ethernet/chelsio/cxgb4/sge.c
··· 2091 2091 return flits + nsgl; 2092 2092 } 2093 2093 2094 - static inline void *write_eo_wr(struct adapter *adap, 2095 - struct sge_eosw_txq *eosw_txq, 2096 - struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr, 2097 - u32 hdr_len, u32 wrlen) 2094 + static void *write_eo_wr(struct adapter *adap, struct sge_eosw_txq *eosw_txq, 2095 + struct sk_buff *skb, struct fw_eth_tx_eo_wr *wr, 2096 + u32 hdr_len, u32 wrlen) 2098 2097 { 2099 2098 const struct skb_shared_info *ssi = skb_shinfo(skb); 2100 2099 struct cpl_tx_pkt_core *cpl; ··· 2112 2113 immd_len += hdr_len; 2113 2114 2114 2115 if (!eosw_txq->ncompl || 2115 - eosw_txq->last_compl >= adap->params.ofldq_wr_cred / 2) { 2116 + (eosw_txq->last_compl + wrlen16) >= 2117 + (adap->params.ofldq_wr_cred / 2)) { 2116 2118 compl = true; 2117 2119 eosw_txq->ncompl++; 2118 2120 eosw_txq->last_compl = 0; ··· 2153 2153 return cpl; 2154 2154 } 2155 2155 2156 - static void ethofld_hard_xmit(struct net_device *dev, 2157 - struct sge_eosw_txq *eosw_txq) 2156 + static int ethofld_hard_xmit(struct net_device *dev, 2157 + struct sge_eosw_txq *eosw_txq) 2158 2158 { 2159 2159 struct port_info *pi = netdev2pinfo(dev); 2160 2160 struct adapter *adap = netdev2adap(dev); ··· 2167 2167 bool skip_eotx_wr = false; 2168 2168 struct tx_sw_desc *d; 2169 2169 struct sk_buff *skb; 2170 + int left, ret = 0; 2170 2171 u8 flits, ndesc; 2171 - int left; 2172 2172 2173 2173 eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid]; 2174 2174 spin_lock(&eohw_txq->lock); ··· 2198 2198 wrlen = flits * 8; 2199 2199 wrlen16 = DIV_ROUND_UP(wrlen, 16); 2200 2200 2201 - /* If there are no CPL credits, then wait for credits 2202 - * to come back and retry again 2201 + left = txq_avail(&eohw_txq->q) - ndesc; 2202 + 2203 + /* If there are no descriptors left in hardware queues or no 2204 + * CPL credits left in software queues, then wait for them 2205 + * to come back and retry again. Note that we always request 2206 + * for credits update via interrupt for every half credits 2207 + * consumed. So, the interrupt will eventually restore the 2208 + * credits and invoke the Tx path again. 2203 2209 */ 2204 - if (unlikely(wrlen16 > eosw_txq->cred)) 2210 + if (unlikely(left < 0 || wrlen16 > eosw_txq->cred)) { 2211 + ret = -ENOMEM; 2205 2212 goto out_unlock; 2213 + } 2206 2214 2207 2215 if (unlikely(skip_eotx_wr)) { 2208 2216 start = (u64 *)wr; ··· 2239 2231 sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start, 2240 2232 hdr_len); 2241 2233 if (data_len) { 2242 - if (unlikely(cxgb4_map_skb(adap->pdev_dev, skb, d->addr))) { 2234 + ret = cxgb4_map_skb(adap->pdev_dev, skb, d->addr); 2235 + if (unlikely(ret)) { 2243 2236 memset(d->addr, 0, sizeof(d->addr)); 2244 2237 eohw_txq->mapping_err++; 2245 2238 goto out_unlock; ··· 2286 2277 2287 2278 out_unlock: 2288 2279 spin_unlock(&eohw_txq->lock); 2280 + return ret; 2289 2281 } 2290 2282 2291 2283 static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq) 2292 2284 { 2293 2285 struct sk_buff *skb; 2294 - int pktcount; 2286 + int pktcount, ret; 2295 2287 2296 2288 switch (eosw_txq->state) { 2297 2289 case CXGB4_EO_STATE_ACTIVE: ··· 2317 2307 continue; 2318 2308 } 2319 2309 2320 - ethofld_hard_xmit(dev, eosw_txq); 2310 + ret = ethofld_hard_xmit(dev, eosw_txq); 2311 + if (ret) 2312 + break; 2321 2313 } 2322 2314 } 2323 2315
+5 -3
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
··· 10361 10361 return ret; 10362 10362 } 10363 10363 10364 - int t4_sched_params(struct adapter *adapter, int type, int level, int mode, 10365 - int rateunit, int ratemode, int channel, int class, 10366 - int minrate, int maxrate, int weight, int pktsize) 10364 + int t4_sched_params(struct adapter *adapter, u8 type, u8 level, u8 mode, 10365 + u8 rateunit, u8 ratemode, u8 channel, u8 class, 10366 + u32 minrate, u32 maxrate, u16 weight, u16 pktsize, 10367 + u16 burstsize) 10367 10368 { 10368 10369 struct fw_sched_cmd cmd; 10369 10370 ··· 10386 10385 cmd.u.params.max = cpu_to_be32(maxrate); 10387 10386 cmd.u.params.weight = cpu_to_be16(weight); 10388 10387 cmd.u.params.pktsize = cpu_to_be16(pktsize); 10388 + cmd.u.params.burstsize = cpu_to_be16(burstsize); 10389 10389 10390 10390 return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd), 10391 10391 NULL, 1);