Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'virtio-net-dynamic-coalescing-moderation'

Heng Qi says:

====================
virtio-net: support dynamic coalescing moderation

Now, virtio-net already supports per-queue moderation parameter
setting. Based on this, we use the linux dimlib to support
dynamic coalescing moderation for virtio-net.

Due to some scheduling issues, we only support and test the rx dim.

Some test results:

I. Sockperf UDP
=================================================
1. Env
rxq_0 with affinity to cpu_0.

2. Cmd
client: taskset -c 0 sockperf tp -p 8989 -i $IP -t 10 -m 16B
server: taskset -c 0 sockperf sr -p 8989

3. Result
dim off: 1143277.00 rxpps, throughput 17.844 MBps, cpu is 100%.
dim on: 1124161.00 rxpps, throughput 17.610 MBps, cpu is 83.5%.
=================================================

II. Redis
=================================================
1. Env
There are 8 rxqs, and rxq_i with affinity to cpu_i.

2. Result
When all cpus are 100%, ops/sec of memtier_benchmark client is
dim off: 978437.23
dim on: 1143638.28
=================================================

III. Nginx
=================================================
1. Env
There are 8 rxqs and rxq_i with affinity to cpu_i.

2. Result
When all cpus are 100%, requests/sec of wrk client is
dim off: 877931.67
dim on: 1019160.31
=================================================

IV. Latency of sockperf udp
=================================================
1. Rx cmd
taskset -c 0 sockperf sr -p 8989

2. Tx cmd
taskset -c 0 sockperf pp -i ${ip} -p 8989 -t 10

After running this cmd 5 times and averaging the results,

3. Result
dim off: 17.7735 usec
dim on: 18.0110 usec
=================================================

Changelog:
v7->v8:
- Add select DIMLIB.

v6->v7:
- Drop the patch titled "spin lock for ctrl cmd access"
- Use rtnl_trylock to avoid the deadlock.

v5->v6:
- Add patch(4/5): spin lock for ctrl cmd access
- Patch(5/5):
- Use spin lock and cancel_work_sync to synchronize

v4->v5:
- Patch(4/4):
- Fix possible synchronization issues with cancel_work_sync.
- Reduce if/else nesting levels

v3->v4:
- Patch(5/5): drop.

v2->v3:
- Patch(4/5): some minor modifications.

v1->v2:
- Patch(2/5): a minor fix.
- Patch(4/5):
- improve the judgment of dim switch conditions.
- Cancel the work when vq reset.
- Patch(5/5): drop the tx dim implementation.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+249 -49
+1
drivers/net/Kconfig
··· 434 434 tristate "Virtio network driver" 435 435 depends on VIRTIO 436 436 select NET_FAILOVER 437 + select DIMLIB 437 438 help 438 439 This is the virtual network driver for virtio. It can be used with 439 440 QEMU based VMMs (like KVM or Xen). Say Y or M.
+248 -49
drivers/net/virtio_net.c
··· 19 19 #include <linux/average.h> 20 20 #include <linux/filter.h> 21 21 #include <linux/kernel.h> 22 + #include <linux/dim.h> 22 23 #include <net/route.h> 23 24 #include <net/xdp.h> 24 25 #include <net/net_failover.h> ··· 173 172 174 173 struct virtnet_rq_stats stats; 175 174 175 + /* The number of rx notifications */ 176 + u16 calls; 177 + 178 + /* Is dynamic interrupt moderation enabled? */ 179 + bool dim_enabled; 180 + 181 + /* Dynamic Interrupt Moderation */ 182 + struct dim dim; 183 + 184 + u32 packets_in_napi; 185 + 176 186 struct virtnet_interrupt_coalesce intr_coal; 177 187 178 188 /* Chain pages by the private ptr. */ ··· 317 305 u8 duplex; 318 306 u32 speed; 319 307 308 + /* Is rx dynamic interrupt moderation enabled? */ 309 + bool rx_dim_enabled; 310 + 320 311 /* Interrupt coalescing settings */ 321 312 struct virtnet_interrupt_coalesce intr_coal_tx; 322 313 struct virtnet_interrupt_coalesce intr_coal_rx; ··· 446 431 } 447 432 } 448 433 449 - static void virtqueue_napi_complete(struct napi_struct *napi, 434 + static bool virtqueue_napi_complete(struct napi_struct *napi, 450 435 struct virtqueue *vq, int processed) 451 436 { 452 437 int opaque; ··· 455 440 if (napi_complete_done(napi, processed)) { 456 441 if (unlikely(virtqueue_poll(vq, opaque))) 457 442 virtqueue_napi_schedule(napi, vq); 443 + else 444 + return true; 458 445 } else { 459 446 virtqueue_disable_cb(vq); 460 447 } 448 + 449 + return false; 461 450 } 462 451 463 452 static void skb_xmit_done(struct virtqueue *vq) ··· 2016 1997 struct virtnet_info *vi = rvq->vdev->priv; 2017 1998 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)]; 2018 1999 2000 + rq->calls++; 2019 2001 virtqueue_napi_schedule(&rq->napi, rvq); 2020 2002 } 2021 2003 ··· 2157 2137 } 2158 2138 } 2159 2139 2140 + static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue *rq) 2141 + { 2142 + struct dim_sample cur_sample = {}; 2143 + 2144 + if (!rq->packets_in_napi) 2145 + return; 2146 + 2147 + u64_stats_update_begin(&rq->stats.syncp); 2148 + dim_update_sample(rq->calls, 2149 + u64_stats_read(&rq->stats.packets), 2150 + u64_stats_read(&rq->stats.bytes), 2151 + &cur_sample); 2152 + u64_stats_update_end(&rq->stats.syncp); 2153 + 2154 + net_dim(&rq->dim, cur_sample); 2155 + rq->packets_in_napi = 0; 2156 + } 2157 + 2160 2158 static int virtnet_poll(struct napi_struct *napi, int budget) 2161 2159 { 2162 2160 struct receive_queue *rq = ··· 2183 2145 struct send_queue *sq; 2184 2146 unsigned int received; 2185 2147 unsigned int xdp_xmit = 0; 2148 + bool napi_complete; 2186 2149 2187 2150 virtnet_poll_cleantx(rq); 2188 2151 2189 2152 received = virtnet_receive(rq, budget, &xdp_xmit); 2153 + rq->packets_in_napi += received; 2190 2154 2191 2155 if (xdp_xmit & VIRTIO_XDP_REDIR) 2192 2156 xdp_do_flush(); 2193 2157 2194 2158 /* Out of packets? */ 2195 - if (received < budget) 2196 - virtqueue_napi_complete(napi, rq->vq, received); 2159 + if (received < budget) { 2160 + napi_complete = virtqueue_napi_complete(napi, rq->vq, received); 2161 + if (napi_complete && rq->dim_enabled) 2162 + virtnet_rx_dim_update(vi, rq); 2163 + } 2197 2164 2198 2165 if (xdp_xmit & VIRTIO_XDP_TX) { 2199 2166 sq = virtnet_xdp_get_sq(vi); ··· 2269 2226 disable_delayed_refill(vi); 2270 2227 cancel_delayed_work_sync(&vi->refill); 2271 2228 2272 - for (i--; i >= 0; i--) 2229 + for (i--; i >= 0; i--) { 2273 2230 virtnet_disable_queue_pair(vi, i); 2231 + cancel_work_sync(&vi->rq[i].dim.work); 2232 + } 2233 + 2274 2234 return err; 2275 2235 } 2276 2236 ··· 2435 2389 2436 2390 qindex = rq - vi->rq; 2437 2391 2438 - if (running) 2392 + if (running) { 2439 2393 napi_disable(&rq->napi); 2394 + cancel_work_sync(&rq->dim.work); 2395 + } 2440 2396 2441 2397 err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf); 2442 2398 if (err) ··· 2685 2637 /* Make sure refill_work doesn't re-enable napi! */ 2686 2638 cancel_delayed_work_sync(&vi->refill); 2687 2639 2688 - for (i = 0; i < vi->max_queue_pairs; i++) 2640 + for (i = 0; i < vi->max_queue_pairs; i++) { 2689 2641 virtnet_disable_queue_pair(vi, i); 2642 + cancel_work_sync(&vi->rq[i].dim.work); 2643 + } 2690 2644 2691 2645 return 0; 2692 2646 } ··· 2895 2845 &vi->node_dead); 2896 2846 } 2897 2847 2848 + static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi, 2849 + u16 vqn, u32 max_usecs, u32 max_packets) 2850 + { 2851 + struct scatterlist sgs; 2852 + 2853 + vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn); 2854 + vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs); 2855 + vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets); 2856 + sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq)); 2857 + 2858 + if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, 2859 + VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET, 2860 + &sgs)) 2861 + return -EINVAL; 2862 + 2863 + return 0; 2864 + } 2865 + 2866 + static int virtnet_send_rx_ctrl_coal_vq_cmd(struct virtnet_info *vi, 2867 + u16 queue, u32 max_usecs, 2868 + u32 max_packets) 2869 + { 2870 + int err; 2871 + 2872 + err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue), 2873 + max_usecs, max_packets); 2874 + if (err) 2875 + return err; 2876 + 2877 + vi->rq[queue].intr_coal.max_usecs = max_usecs; 2878 + vi->rq[queue].intr_coal.max_packets = max_packets; 2879 + 2880 + return 0; 2881 + } 2882 + 2883 + static int virtnet_send_tx_ctrl_coal_vq_cmd(struct virtnet_info *vi, 2884 + u16 queue, u32 max_usecs, 2885 + u32 max_packets) 2886 + { 2887 + int err; 2888 + 2889 + err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue), 2890 + max_usecs, max_packets); 2891 + if (err) 2892 + return err; 2893 + 2894 + vi->sq[queue].intr_coal.max_usecs = max_usecs; 2895 + vi->sq[queue].intr_coal.max_packets = max_packets; 2896 + 2897 + return 0; 2898 + } 2899 + 2898 2900 static void virtnet_get_ringparam(struct net_device *dev, 2899 2901 struct ethtool_ringparam *ring, 2900 2902 struct kernel_ethtool_ringparam *kernel_ring, ··· 2959 2857 ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq); 2960 2858 ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq); 2961 2859 } 2962 - 2963 - static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi, 2964 - u16 vqn, u32 max_usecs, u32 max_packets); 2965 2860 2966 2861 static int virtnet_set_ringparam(struct net_device *dev, 2967 2862 struct ethtool_ringparam *ring, ··· 3001 2902 * through the VIRTIO_NET_CTRL_NOTF_COAL_TX_SET command, or, if the driver 3002 2903 * did not set any TX coalescing parameters, to 0. 3003 2904 */ 3004 - err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(i), 3005 - vi->intr_coal_tx.max_usecs, 3006 - vi->intr_coal_tx.max_packets); 2905 + err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, i, 2906 + vi->intr_coal_tx.max_usecs, 2907 + vi->intr_coal_tx.max_packets); 3007 2908 if (err) 3008 2909 return err; 3009 - 3010 - vi->sq[i].intr_coal.max_usecs = vi->intr_coal_tx.max_usecs; 3011 - vi->sq[i].intr_coal.max_packets = vi->intr_coal_tx.max_packets; 3012 2910 } 3013 2911 3014 2912 if (ring->rx_pending != rx_pending) { ··· 3014 2918 return err; 3015 2919 3016 2920 /* The reason is same as the transmit virtqueue reset */ 3017 - err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(i), 3018 - vi->intr_coal_rx.max_usecs, 3019 - vi->intr_coal_rx.max_packets); 2921 + err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, i, 2922 + vi->intr_coal_rx.max_usecs, 2923 + vi->intr_coal_rx.max_packets); 3020 2924 if (err) 3021 2925 return err; 3022 - 3023 - vi->rq[i].intr_coal.max_usecs = vi->intr_coal_rx.max_usecs; 3024 - vi->rq[i].intr_coal.max_packets = vi->intr_coal_rx.max_packets; 3025 2926 } 3026 2927 } 3027 2928 ··· 3355 3262 return 0; 3356 3263 } 3357 3264 3358 - static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, 3359 - struct ethtool_coalesce *ec) 3265 + static int virtnet_send_tx_notf_coal_cmds(struct virtnet_info *vi, 3266 + struct ethtool_coalesce *ec) 3360 3267 { 3361 - struct scatterlist sgs_tx, sgs_rx; 3268 + struct scatterlist sgs_tx; 3362 3269 int i; 3363 3270 3364 3271 vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs); ··· 3370 3277 &sgs_tx)) 3371 3278 return -EINVAL; 3372 3279 3373 - /* Save parameters */ 3374 3280 vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs; 3375 3281 vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames; 3376 3282 for (i = 0; i < vi->max_queue_pairs; i++) { ··· 3377 3285 vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames; 3378 3286 } 3379 3287 3288 + return 0; 3289 + } 3290 + 3291 + static int virtnet_send_rx_notf_coal_cmds(struct virtnet_info *vi, 3292 + struct ethtool_coalesce *ec) 3293 + { 3294 + bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; 3295 + struct scatterlist sgs_rx; 3296 + int i; 3297 + 3298 + if (rx_ctrl_dim_on && !virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) 3299 + return -EOPNOTSUPP; 3300 + 3301 + if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != vi->intr_coal_rx.max_usecs || 3302 + ec->rx_max_coalesced_frames != vi->intr_coal_rx.max_packets)) 3303 + return -EINVAL; 3304 + 3305 + if (rx_ctrl_dim_on && !vi->rx_dim_enabled) { 3306 + vi->rx_dim_enabled = true; 3307 + for (i = 0; i < vi->max_queue_pairs; i++) 3308 + vi->rq[i].dim_enabled = true; 3309 + return 0; 3310 + } 3311 + 3312 + if (!rx_ctrl_dim_on && vi->rx_dim_enabled) { 3313 + vi->rx_dim_enabled = false; 3314 + for (i = 0; i < vi->max_queue_pairs; i++) 3315 + vi->rq[i].dim_enabled = false; 3316 + } 3317 + 3318 + /* Since the per-queue coalescing params can be set, 3319 + * we need apply the global new params even if they 3320 + * are not updated. 3321 + */ 3380 3322 vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs); 3381 3323 vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames); 3382 3324 sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx)); ··· 3420 3294 &sgs_rx)) 3421 3295 return -EINVAL; 3422 3296 3423 - /* Save parameters */ 3424 3297 vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs; 3425 3298 vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames; 3426 3299 for (i = 0; i < vi->max_queue_pairs; i++) { ··· 3430 3305 return 0; 3431 3306 } 3432 3307 3433 - static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi, 3434 - u16 vqn, u32 max_usecs, u32 max_packets) 3308 + static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi, 3309 + struct ethtool_coalesce *ec) 3435 3310 { 3436 - struct scatterlist sgs; 3311 + int err; 3437 3312 3438 - vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn); 3439 - vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs); 3440 - vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets); 3441 - sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq)); 3313 + err = virtnet_send_tx_notf_coal_cmds(vi, ec); 3314 + if (err) 3315 + return err; 3442 3316 3443 - if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL, 3444 - VIRTIO_NET_CTRL_NOTF_COAL_VQ_SET, 3445 - &sgs)) 3317 + err = virtnet_send_rx_notf_coal_cmds(vi, ec); 3318 + if (err) 3319 + return err; 3320 + 3321 + return 0; 3322 + } 3323 + 3324 + static int virtnet_send_rx_notf_coal_vq_cmds(struct virtnet_info *vi, 3325 + struct ethtool_coalesce *ec, 3326 + u16 queue) 3327 + { 3328 + bool rx_ctrl_dim_on = !!ec->use_adaptive_rx_coalesce; 3329 + bool cur_rx_dim = vi->rq[queue].dim_enabled; 3330 + u32 max_usecs, max_packets; 3331 + int err; 3332 + 3333 + max_usecs = vi->rq[queue].intr_coal.max_usecs; 3334 + max_packets = vi->rq[queue].intr_coal.max_packets; 3335 + 3336 + if (rx_ctrl_dim_on && (ec->rx_coalesce_usecs != max_usecs || 3337 + ec->rx_max_coalesced_frames != max_packets)) 3446 3338 return -EINVAL; 3339 + 3340 + if (rx_ctrl_dim_on && !cur_rx_dim) { 3341 + vi->rq[queue].dim_enabled = true; 3342 + return 0; 3343 + } 3344 + 3345 + if (!rx_ctrl_dim_on && cur_rx_dim) 3346 + vi->rq[queue].dim_enabled = false; 3347 + 3348 + /* If no params are updated, userspace ethtool will 3349 + * reject the modification. 3350 + */ 3351 + err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, queue, 3352 + ec->rx_coalesce_usecs, 3353 + ec->rx_max_coalesced_frames); 3354 + if (err) 3355 + return err; 3447 3356 3448 3357 return 0; 3449 3358 } ··· 3488 3329 { 3489 3330 int err; 3490 3331 3491 - err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue), 3492 - ec->rx_coalesce_usecs, 3493 - ec->rx_max_coalesced_frames); 3332 + err = virtnet_send_rx_notf_coal_vq_cmds(vi, ec, queue); 3494 3333 if (err) 3495 3334 return err; 3496 3335 3497 - vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs; 3498 - vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames; 3499 - 3500 - err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue), 3501 - ec->tx_coalesce_usecs, 3502 - ec->tx_max_coalesced_frames); 3336 + err = virtnet_send_tx_ctrl_coal_vq_cmd(vi, queue, 3337 + ec->tx_coalesce_usecs, 3338 + ec->tx_max_coalesced_frames); 3503 3339 if (err) 3504 3340 return err; 3505 - 3506 - vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs; 3507 - vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames; 3508 3341 3509 3342 return 0; 3343 + } 3344 + 3345 + static void virtnet_rx_dim_work(struct work_struct *work) 3346 + { 3347 + struct dim *dim = container_of(work, struct dim, work); 3348 + struct receive_queue *rq = container_of(dim, 3349 + struct receive_queue, dim); 3350 + struct virtnet_info *vi = rq->vq->vdev->priv; 3351 + struct net_device *dev = vi->dev; 3352 + struct dim_cq_moder update_moder; 3353 + int i, qnum, err; 3354 + 3355 + if (!rtnl_trylock()) 3356 + return; 3357 + 3358 + /* Each rxq's work is queued by "net_dim()->schedule_work()" 3359 + * in response to NAPI traffic changes. Note that dim->profile_ix 3360 + * for each rxq is updated prior to the queuing action. 3361 + * So we only need to traverse and update profiles for all rxqs 3362 + * in the work which is holding rtnl_lock. 3363 + */ 3364 + for (i = 0; i < vi->curr_queue_pairs; i++) { 3365 + rq = &vi->rq[i]; 3366 + dim = &rq->dim; 3367 + qnum = rq - vi->rq; 3368 + 3369 + if (!rq->dim_enabled) 3370 + continue; 3371 + 3372 + update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); 3373 + if (update_moder.usec != rq->intr_coal.max_usecs || 3374 + update_moder.pkts != rq->intr_coal.max_packets) { 3375 + err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum, 3376 + update_moder.usec, 3377 + update_moder.pkts); 3378 + if (err) 3379 + pr_debug("%s: Failed to send dim parameters on rxq%d\n", 3380 + dev->name, qnum); 3381 + dim->state = DIM_START_MEASURE; 3382 + } 3383 + } 3384 + 3385 + rtnl_unlock(); 3510 3386 } 3511 3387 3512 3388 static int virtnet_coal_params_supported(struct ethtool_coalesce *ec) ··· 3625 3431 ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs; 3626 3432 ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets; 3627 3433 ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets; 3434 + ec->use_adaptive_rx_coalesce = vi->rx_dim_enabled; 3628 3435 } else { 3629 3436 ec->rx_max_coalesced_frames = 1; 3630 3437 ··· 3683 3488 ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs; 3684 3489 ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets; 3685 3490 ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets; 3491 + ec->use_adaptive_rx_coalesce = vi->rq[queue].dim_enabled; 3686 3492 } else { 3687 3493 ec->rx_max_coalesced_frames = 1; 3688 3494 ··· 3809 3613 3810 3614 static const struct ethtool_ops virtnet_ethtool_ops = { 3811 3615 .supported_coalesce_params = ETHTOOL_COALESCE_MAX_FRAMES | 3812 - ETHTOOL_COALESCE_USECS, 3616 + ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX, 3813 3617 .get_drvinfo = virtnet_get_drvinfo, 3814 3618 .get_link = ethtool_op_get_link, 3815 3619 .get_ringparam = virtnet_get_ringparam, ··· 4398 4202 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi, 4399 4203 virtnet_poll_tx, 4400 4204 napi_tx ? napi_weight : 0); 4205 + 4206 + INIT_WORK(&vi->rq[i].dim.work, virtnet_rx_dim_work); 4207 + vi->rq[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; 4401 4208 4402 4209 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); 4403 4210 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);