Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Intel Corporation. */
3
4#include "ice.h"
5#include "ice_vf_lib_private.h"
6#include "ice_base.h"
7#include "ice_lib.h"
8#include "ice_fltr.h"
9#include "ice_dcb_lib.h"
10#include "ice_flow.h"
11#include "ice_eswitch.h"
12#include "ice_virtchnl_allowlist.h"
13#include "ice_flex_pipe.h"
14#include "ice_vf_vsi_vlan_ops.h"
15#include "ice_vlan.h"
16
17/**
18 * ice_free_vf_entries - Free all VF entries from the hash table
19 * @pf: pointer to the PF structure
20 *
21 * Iterate over the VF hash table, removing and releasing all VF entries.
22 * Called during VF teardown or as cleanup during failed VF initialization.
23 */
24static void ice_free_vf_entries(struct ice_pf *pf)
25{
26 struct ice_vfs *vfs = &pf->vfs;
27 struct hlist_node *tmp;
28 struct ice_vf *vf;
29 unsigned int bkt;
30
31 /* Remove all VFs from the hash table and release their main
32 * reference. Once all references to the VF are dropped, ice_put_vf()
33 * will call ice_release_vf which will remove the VF memory.
34 */
35 lockdep_assert_held(&vfs->table_lock);
36
37 hash_for_each_safe(vfs->table, bkt, tmp, vf, entry) {
38 hash_del_rcu(&vf->entry);
39 ice_deinitialize_vf_entry(vf);
40 ice_put_vf(vf);
41 }
42}
43
44/**
45 * ice_free_vf_res - Free a VF's resources
46 * @vf: pointer to the VF info
47 */
48static void ice_free_vf_res(struct ice_vf *vf)
49{
50 struct ice_pf *pf = vf->pf;
51 int i, last_vector_idx;
52
53 /* First, disable VF's configuration API to prevent OS from
54 * accessing the VF's VSI after it's freed or invalidated.
55 */
56 clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
57 ice_vf_fdir_exit(vf);
58 /* free VF control VSI */
59 if (vf->ctrl_vsi_idx != ICE_NO_VSI)
60 ice_vf_ctrl_vsi_release(vf);
61
62 /* free VSI and disconnect it from the parent uplink */
63 if (vf->lan_vsi_idx != ICE_NO_VSI) {
64 ice_vf_vsi_release(vf);
65 vf->num_mac = 0;
66 vf->num_mac_lldp = 0;
67 }
68
69 last_vector_idx = vf->first_vector_idx + vf->num_msix - 1;
70
71 /* clear VF MDD event information */
72 memset(&vf->mdd_tx_events, 0, sizeof(vf->mdd_tx_events));
73 memset(&vf->mdd_rx_events, 0, sizeof(vf->mdd_rx_events));
74
75 /* Disable interrupts so that VF starts in a known state */
76 for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
77 wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
78 ice_flush(&pf->hw);
79 }
80 /* reset some of the state variables keeping track of the resources */
81 clear_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states);
82 clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
83}
84
85/**
86 * ice_dis_vf_mappings
87 * @vf: pointer to the VF structure
88 */
89static void ice_dis_vf_mappings(struct ice_vf *vf)
90{
91 struct ice_pf *pf = vf->pf;
92 struct ice_vsi *vsi;
93 struct device *dev;
94 int first, last, v;
95 struct ice_hw *hw;
96
97 hw = &pf->hw;
98 vsi = ice_get_vf_vsi(vf);
99 if (WARN_ON(!vsi))
100 return;
101
102 dev = ice_pf_to_dev(pf);
103 wr32(hw, VPINT_ALLOC(vf->vf_id), 0);
104 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), 0);
105
106 first = vf->first_vector_idx;
107 last = first + vf->num_msix - 1;
108 for (v = first; v <= last; v++) {
109 u32 reg;
110
111 reg = FIELD_PREP(GLINT_VECT2FUNC_IS_PF_M, 1) |
112 FIELD_PREP(GLINT_VECT2FUNC_PF_NUM_M, hw->pf_id);
113 wr32(hw, GLINT_VECT2FUNC(v), reg);
114 }
115
116 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG)
117 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), 0);
118 else
119 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
120
121 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
122 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
123 else
124 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
125}
126
127/**
128 * ice_free_vfs - Free all VFs
129 * @pf: pointer to the PF structure
130 */
131void ice_free_vfs(struct ice_pf *pf)
132{
133 struct device *dev = ice_pf_to_dev(pf);
134 struct ice_vfs *vfs = &pf->vfs;
135 struct ice_hw *hw = &pf->hw;
136 struct ice_vf *vf;
137 unsigned int bkt;
138
139 if (!ice_has_vfs(pf))
140 return;
141
142 while (test_and_set_bit(ICE_VF_DIS, pf->state))
143 usleep_range(1000, 2000);
144
145 /* Disable IOV before freeing resources. This lets any VF drivers
146 * running in the host get themselves cleaned up before we yank
147 * the carpet out from underneath their feet.
148 */
149 if (!pci_vfs_assigned(pf->pdev))
150 pci_disable_sriov(pf->pdev);
151 else
152 dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
153
154 mutex_lock(&vfs->table_lock);
155
156 ice_for_each_vf(pf, bkt, vf) {
157 mutex_lock(&vf->cfg_lock);
158
159 ice_eswitch_detach_vf(pf, vf);
160 ice_dis_vf_qs(vf);
161 ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix);
162
163 if (test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
164 /* disable VF qp mappings and set VF disable state */
165 ice_dis_vf_mappings(vf);
166 set_bit(ICE_VF_STATE_DIS, vf->vf_states);
167 ice_free_vf_res(vf);
168 }
169
170 if (!pci_vfs_assigned(pf->pdev)) {
171 u32 reg_idx, bit_idx;
172
173 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
174 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
175 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
176 }
177
178 mutex_unlock(&vf->cfg_lock);
179 }
180
181 vfs->num_qps_per = 0;
182 ice_free_vf_entries(pf);
183
184 mutex_unlock(&vfs->table_lock);
185
186 clear_bit(ICE_VF_DIS, pf->state);
187 clear_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
188}
189
190/**
191 * ice_vf_vsi_setup - Set up a VF VSI
192 * @vf: VF to setup VSI for
193 *
194 * Returns pointer to the successfully allocated VSI struct on success,
195 * otherwise returns NULL on failure.
196 */
197static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
198{
199 struct ice_vsi_cfg_params params = {};
200 struct ice_pf *pf = vf->pf;
201 struct ice_vsi *vsi;
202
203 params.type = ICE_VSI_VF;
204 params.port_info = ice_vf_get_port_info(vf);
205 params.vf = vf;
206 params.flags = ICE_VSI_FLAG_INIT;
207
208 vsi = ice_vsi_setup(pf, ¶ms);
209
210 if (!vsi) {
211 dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
212 ice_vf_invalidate_vsi(vf);
213 return NULL;
214 }
215
216 vf->lan_vsi_idx = vsi->idx;
217
218 return vsi;
219}
220
221
222/**
223 * ice_ena_vf_msix_mappings - enable VF MSIX mappings in hardware
224 * @vf: VF to enable MSIX mappings for
225 *
226 * Some of the registers need to be indexed/configured using hardware global
227 * device values and other registers need 0-based values, which represent PF
228 * based values.
229 */
230static void ice_ena_vf_msix_mappings(struct ice_vf *vf)
231{
232 int device_based_first_msix, device_based_last_msix;
233 int pf_based_first_msix, pf_based_last_msix, v;
234 struct ice_pf *pf = vf->pf;
235 int device_based_vf_id;
236 struct ice_hw *hw;
237 u32 reg;
238
239 hw = &pf->hw;
240 pf_based_first_msix = vf->first_vector_idx;
241 pf_based_last_msix = (pf_based_first_msix + vf->num_msix) - 1;
242
243 device_based_first_msix = pf_based_first_msix +
244 pf->hw.func_caps.common_cap.msix_vector_first_id;
245 device_based_last_msix =
246 (device_based_first_msix + vf->num_msix) - 1;
247 device_based_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
248
249 reg = FIELD_PREP(VPINT_ALLOC_FIRST_M, device_based_first_msix) |
250 FIELD_PREP(VPINT_ALLOC_LAST_M, device_based_last_msix) |
251 VPINT_ALLOC_VALID_M;
252 wr32(hw, VPINT_ALLOC(vf->vf_id), reg);
253
254 reg = FIELD_PREP(VPINT_ALLOC_PCI_FIRST_M, device_based_first_msix) |
255 FIELD_PREP(VPINT_ALLOC_PCI_LAST_M, device_based_last_msix) |
256 VPINT_ALLOC_PCI_VALID_M;
257 wr32(hw, VPINT_ALLOC_PCI(vf->vf_id), reg);
258
259 /* map the interrupts to its functions */
260 for (v = pf_based_first_msix; v <= pf_based_last_msix; v++) {
261 reg = FIELD_PREP(GLINT_VECT2FUNC_VF_NUM_M, device_based_vf_id) |
262 FIELD_PREP(GLINT_VECT2FUNC_PF_NUM_M, hw->pf_id);
263 wr32(hw, GLINT_VECT2FUNC(v), reg);
264 }
265
266 /* Map mailbox interrupt to VF MSI-X vector 0 */
267 wr32(hw, VPINT_MBX_CTL(device_based_vf_id), VPINT_MBX_CTL_CAUSE_ENA_M);
268}
269
270/**
271 * ice_ena_vf_q_mappings - enable Rx/Tx queue mappings for a VF
272 * @vf: VF to enable the mappings for
273 * @max_txq: max Tx queues allowed on the VF's VSI
274 * @max_rxq: max Rx queues allowed on the VF's VSI
275 */
276static void ice_ena_vf_q_mappings(struct ice_vf *vf, u16 max_txq, u16 max_rxq)
277{
278 struct device *dev = ice_pf_to_dev(vf->pf);
279 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
280 struct ice_hw *hw = &vf->pf->hw;
281 u32 reg;
282
283 if (WARN_ON(!vsi))
284 return;
285
286 /* set regardless of mapping mode */
287 wr32(hw, VPLAN_TXQ_MAPENA(vf->vf_id), VPLAN_TXQ_MAPENA_TX_ENA_M);
288
289 /* VF Tx queues allocation */
290 if (vsi->tx_mapping_mode == ICE_VSI_MAP_CONTIG) {
291 /* set the VF PF Tx queue range
292 * VFNUMQ value should be set to (number of queues - 1). A value
293 * of 0 means 1 queue and a value of 255 means 256 queues
294 */
295 reg = FIELD_PREP(VPLAN_TX_QBASE_VFFIRSTQ_M, vsi->txq_map[0]) |
296 FIELD_PREP(VPLAN_TX_QBASE_VFNUMQ_M, max_txq - 1);
297 wr32(hw, VPLAN_TX_QBASE(vf->vf_id), reg);
298 } else {
299 dev_err(dev, "Scattered mode for VF Tx queues is not yet implemented\n");
300 }
301
302 /* set regardless of mapping mode */
303 wr32(hw, VPLAN_RXQ_MAPENA(vf->vf_id), VPLAN_RXQ_MAPENA_RX_ENA_M);
304
305 /* VF Rx queues allocation */
306 if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) {
307 /* set the VF PF Rx queue range
308 * VFNUMQ value should be set to (number of queues - 1). A value
309 * of 0 means 1 queue and a value of 255 means 256 queues
310 */
311 reg = FIELD_PREP(VPLAN_RX_QBASE_VFFIRSTQ_M, vsi->rxq_map[0]) |
312 FIELD_PREP(VPLAN_RX_QBASE_VFNUMQ_M, max_rxq - 1);
313 wr32(hw, VPLAN_RX_QBASE(vf->vf_id), reg);
314 } else {
315 dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
316 }
317}
318
319/**
320 * ice_ena_vf_mappings - enable VF MSIX and queue mapping
321 * @vf: pointer to the VF structure
322 */
323static void ice_ena_vf_mappings(struct ice_vf *vf)
324{
325 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
326
327 if (WARN_ON(!vsi))
328 return;
329
330 ice_ena_vf_msix_mappings(vf);
331 ice_ena_vf_q_mappings(vf, vsi->alloc_txq, vsi->alloc_rxq);
332}
333
334/**
335 * ice_calc_vf_reg_idx - Calculate the VF's register index in the PF space
336 * @vf: VF to calculate the register index for
337 * @q_vector: a q_vector associated to the VF
338 */
339void ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector)
340{
341 if (!vf || !q_vector)
342 return;
343
344 /* always add one to account for the OICR being the first MSIX */
345 q_vector->vf_reg_idx = q_vector->v_idx + ICE_NONQ_VECS_VF;
346 q_vector->reg_idx = vf->first_vector_idx + q_vector->vf_reg_idx;
347}
348
349/**
350 * ice_set_per_vf_res - check if vectors and queues are available
351 * @pf: pointer to the PF structure
352 * @num_vfs: the number of SR-IOV VFs being configured
353 *
354 * First, determine HW interrupts from common pool. If we allocate fewer VFs, we
355 * get more vectors and can enable more queues per VF. Note that this does not
356 * grab any vectors from the SW pool already allocated. Also note, that all
357 * vector counts include one for each VF's miscellaneous interrupt vector
358 * (i.e. OICR).
359 *
360 * Minimum VFs - 2 vectors, 1 queue pair
361 * Small VFs - 5 vectors, 4 queue pairs
362 * Medium VFs - 17 vectors, 16 queue pairs
363 *
364 * Second, determine number of queue pairs per VF by starting with a pre-defined
365 * maximum each VF supports. If this is not possible, then we adjust based on
366 * queue pairs available on the device.
367 *
368 * Lastly, set queue and MSI-X VF variables tracked by the PF so it can be used
369 * by each VF during VF initialization and reset.
370 */
371static int ice_set_per_vf_res(struct ice_pf *pf, u16 num_vfs)
372{
373 u16 num_msix_per_vf, num_txq, num_rxq, avail_qs;
374 int msix_avail_per_vf, msix_avail_for_sriov;
375 struct device *dev = ice_pf_to_dev(pf);
376
377 lockdep_assert_held(&pf->vfs.table_lock);
378
379 if (!num_vfs)
380 return -EINVAL;
381
382 /* determine MSI-X resources per VF */
383 msix_avail_for_sriov = pf->virt_irq_tracker.num_entries;
384 msix_avail_per_vf = msix_avail_for_sriov / num_vfs;
385 if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MED) {
386 num_msix_per_vf = ICE_NUM_VF_MSIX_MED;
387 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_SMALL) {
388 num_msix_per_vf = ICE_NUM_VF_MSIX_SMALL;
389 } else if (msix_avail_per_vf >= ICE_NUM_VF_MSIX_MULTIQ_MIN) {
390 num_msix_per_vf = ICE_NUM_VF_MSIX_MULTIQ_MIN;
391 } else if (msix_avail_per_vf >= ICE_MIN_INTR_PER_VF) {
392 num_msix_per_vf = ICE_MIN_INTR_PER_VF;
393 } else {
394 dev_err(dev, "Only %d MSI-X interrupts available for SR-IOV. Not enough to support minimum of %d MSI-X interrupts per VF for %d VFs\n",
395 msix_avail_for_sriov, ICE_MIN_INTR_PER_VF,
396 num_vfs);
397 return -ENOSPC;
398 }
399
400 num_txq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
401 ICE_MAX_RSS_QS_PER_VF);
402 avail_qs = ice_get_avail_txq_count(pf) / num_vfs;
403 if (!avail_qs)
404 num_txq = 0;
405 else if (num_txq > avail_qs)
406 num_txq = rounddown_pow_of_two(avail_qs);
407
408 num_rxq = min_t(u16, num_msix_per_vf - ICE_NONQ_VECS_VF,
409 ICE_MAX_RSS_QS_PER_VF);
410 avail_qs = ice_get_avail_rxq_count(pf) / num_vfs;
411 if (!avail_qs)
412 num_rxq = 0;
413 else if (num_rxq > avail_qs)
414 num_rxq = rounddown_pow_of_two(avail_qs);
415
416 if (num_txq < ICE_MIN_QS_PER_VF || num_rxq < ICE_MIN_QS_PER_VF) {
417 dev_err(dev, "Not enough queues to support minimum of %d queue pairs per VF for %d VFs\n",
418 ICE_MIN_QS_PER_VF, num_vfs);
419 return -ENOSPC;
420 }
421
422 /* only allow equal Tx/Rx queue count (i.e. queue pairs) */
423 pf->vfs.num_qps_per = min_t(int, num_txq, num_rxq);
424 pf->vfs.num_msix_per = num_msix_per_vf;
425 dev_info(dev, "Enabling %d VFs with %d vectors and %d queues per VF\n",
426 num_vfs, pf->vfs.num_msix_per, pf->vfs.num_qps_per);
427
428 return 0;
429}
430
431/**
432 * ice_init_vf_vsi_res - initialize/setup VF VSI resources
433 * @vf: VF to initialize/setup the VSI for
434 *
435 * This function creates a VSI for the VF, adds a VLAN 0 filter, and sets up the
436 * VF VSI's broadcast filter and is only used during initial VF creation.
437 */
438static int ice_init_vf_vsi_res(struct ice_vf *vf)
439{
440 struct ice_pf *pf = vf->pf;
441 struct ice_vsi *vsi;
442 int err;
443
444 vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
445 if (vf->first_vector_idx < 0)
446 return -ENOMEM;
447
448 vsi = ice_vf_vsi_setup(vf);
449 if (!vsi)
450 return -ENOMEM;
451
452 err = ice_vf_init_host_cfg(vf, vsi);
453 if (err)
454 goto release_vsi;
455
456 return 0;
457
458release_vsi:
459 ice_vf_vsi_release(vf);
460 return err;
461}
462
463/**
464 * ice_start_vfs - start VFs so they are ready to be used by SR-IOV
465 * @pf: PF the VFs are associated with
466 */
467static int ice_start_vfs(struct ice_pf *pf)
468{
469 struct ice_hw *hw = &pf->hw;
470 unsigned int bkt, it_cnt;
471 struct ice_vf *vf;
472 int retval;
473
474 lockdep_assert_held(&pf->vfs.table_lock);
475
476 it_cnt = 0;
477 ice_for_each_vf(pf, bkt, vf) {
478 vf->vf_ops->clear_reset_trigger(vf);
479
480 retval = ice_init_vf_vsi_res(vf);
481 if (retval) {
482 dev_err(ice_pf_to_dev(pf), "Failed to initialize VSI resources for VF %d, error %d\n",
483 vf->vf_id, retval);
484 goto teardown;
485 }
486
487 retval = ice_eswitch_attach_vf(pf, vf);
488 if (retval) {
489 dev_err(ice_pf_to_dev(pf), "Failed to attach VF %d to eswitch, error %d",
490 vf->vf_id, retval);
491 ice_vf_vsi_release(vf);
492 goto teardown;
493 }
494
495 set_bit(ICE_VF_STATE_INIT, vf->vf_states);
496 ice_ena_vf_mappings(vf);
497 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
498 it_cnt++;
499 }
500
501 ice_flush(hw);
502 return 0;
503
504teardown:
505 ice_for_each_vf(pf, bkt, vf) {
506 if (it_cnt == 0)
507 break;
508
509 ice_dis_vf_mappings(vf);
510 ice_vf_vsi_release(vf);
511 it_cnt--;
512 }
513
514 return retval;
515}
516
517/**
518 * ice_sriov_free_vf - Free VF memory after all references are dropped
519 * @vf: pointer to VF to free
520 *
521 * Called by ice_put_vf through ice_release_vf once the last reference to a VF
522 * structure has been dropped.
523 */
524static void ice_sriov_free_vf(struct ice_vf *vf)
525{
526 mutex_destroy(&vf->cfg_lock);
527
528 kfree_rcu(vf, rcu);
529}
530
531/**
532 * ice_sriov_clear_reset_state - clears VF Reset status register
533 * @vf: the vf to configure
534 */
535static void ice_sriov_clear_reset_state(struct ice_vf *vf)
536{
537 struct ice_hw *hw = &vf->pf->hw;
538
539 /* Clear the reset status register so that VF immediately sees that
540 * the device is resetting, even if hardware hasn't yet gotten around
541 * to clearing VFGEN_RSTAT for us.
542 */
543 wr32(hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_INPROGRESS);
544}
545
546/**
547 * ice_sriov_clear_mbx_register - clears SRIOV VF's mailbox registers
548 * @vf: the vf to configure
549 */
550static void ice_sriov_clear_mbx_register(struct ice_vf *vf)
551{
552 struct ice_pf *pf = vf->pf;
553
554 wr32(&pf->hw, VF_MBX_ARQLEN(vf->vf_id), 0);
555 wr32(&pf->hw, VF_MBX_ATQLEN(vf->vf_id), 0);
556}
557
558/**
559 * ice_sriov_trigger_reset_register - trigger VF reset for SRIOV VF
560 * @vf: pointer to VF structure
561 * @is_vflr: true if reset occurred due to VFLR
562 *
563 * Trigger and cleanup after a VF reset for a SR-IOV VF.
564 */
565static void ice_sriov_trigger_reset_register(struct ice_vf *vf, bool is_vflr)
566{
567 struct ice_pf *pf = vf->pf;
568 u32 reg, reg_idx, bit_idx;
569 unsigned int vf_abs_id, i;
570 struct device *dev;
571 struct ice_hw *hw;
572
573 dev = ice_pf_to_dev(pf);
574 hw = &pf->hw;
575 vf_abs_id = vf->vf_id + hw->func_caps.vf_base_id;
576
577 /* In the case of a VFLR, HW has already reset the VF and we just need
578 * to clean up. Otherwise we must first trigger the reset using the
579 * VFRTRIG register.
580 */
581 if (!is_vflr) {
582 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
583 reg |= VPGEN_VFRTRIG_VFSWR_M;
584 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
585 }
586
587 /* clear the VFLR bit in GLGEN_VFLRSTAT */
588 reg_idx = (vf_abs_id) / 32;
589 bit_idx = (vf_abs_id) % 32;
590 wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
591 ice_flush(hw);
592
593 wr32(hw, PF_PCI_CIAA,
594 VF_DEVICE_STATUS | (vf_abs_id << PF_PCI_CIAA_VF_NUM_S));
595 for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
596 reg = rd32(hw, PF_PCI_CIAD);
597 /* no transactions pending so stop polling */
598 if ((reg & VF_TRANS_PENDING_M) == 0)
599 break;
600
601 dev_err(dev, "VF %u PCI transactions stuck\n", vf->vf_id);
602 udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
603 }
604}
605
606/**
607 * ice_sriov_poll_reset_status - poll SRIOV VF reset status
608 * @vf: pointer to VF structure
609 *
610 * Returns true when reset is successful, else returns false
611 */
612static bool ice_sriov_poll_reset_status(struct ice_vf *vf)
613{
614 struct ice_pf *pf = vf->pf;
615 unsigned int i;
616 u32 reg;
617
618 for (i = 0; i < 10; i++) {
619 /* VF reset requires driver to first reset the VF and then
620 * poll the status register to make sure that the reset
621 * completed successfully.
622 */
623 reg = rd32(&pf->hw, VPGEN_VFRSTAT(vf->vf_id));
624 if (reg & VPGEN_VFRSTAT_VFRD_M)
625 return true;
626
627 /* only sleep if the reset is not done */
628 usleep_range(10, 20);
629 }
630 return false;
631}
632
633/**
634 * ice_sriov_clear_reset_trigger - enable VF to access hardware
635 * @vf: VF to enabled hardware access for
636 */
637static void ice_sriov_clear_reset_trigger(struct ice_vf *vf)
638{
639 struct ice_hw *hw = &vf->pf->hw;
640 u32 reg;
641
642 reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_id));
643 reg &= ~VPGEN_VFRTRIG_VFSWR_M;
644 wr32(hw, VPGEN_VFRTRIG(vf->vf_id), reg);
645 ice_flush(hw);
646}
647
648/**
649 * ice_sriov_post_vsi_rebuild - tasks to do after the VF's VSI have been rebuilt
650 * @vf: VF to perform tasks on
651 */
652static void ice_sriov_post_vsi_rebuild(struct ice_vf *vf)
653{
654 ice_ena_vf_mappings(vf);
655 wr32(&vf->pf->hw, VFGEN_RSTAT(vf->vf_id), VIRTCHNL_VFR_VFACTIVE);
656}
657
658static const struct ice_vf_ops ice_sriov_vf_ops = {
659 .reset_type = ICE_VF_RESET,
660 .free = ice_sriov_free_vf,
661 .clear_reset_state = ice_sriov_clear_reset_state,
662 .clear_mbx_register = ice_sriov_clear_mbx_register,
663 .trigger_reset_register = ice_sriov_trigger_reset_register,
664 .poll_reset_status = ice_sriov_poll_reset_status,
665 .clear_reset_trigger = ice_sriov_clear_reset_trigger,
666 .irq_close = NULL,
667 .post_vsi_rebuild = ice_sriov_post_vsi_rebuild,
668};
669
670/**
671 * ice_create_vf_entries - Allocate and insert VF entries
672 * @pf: pointer to the PF structure
673 * @num_vfs: the number of VFs to allocate
674 *
675 * Allocate new VF entries and insert them into the hash table. Set some
676 * basic default fields for initializing the new VFs.
677 *
678 * After this function exits, the hash table will have num_vfs entries
679 * inserted.
680 *
681 * Returns 0 on success or an integer error code on failure.
682 */
683static int ice_create_vf_entries(struct ice_pf *pf, u16 num_vfs)
684{
685 struct pci_dev *pdev = pf->pdev;
686 struct ice_vfs *vfs = &pf->vfs;
687 struct pci_dev *vfdev = NULL;
688 struct ice_vf *vf;
689 u16 vf_pdev_id;
690 int err, pos;
691
692 lockdep_assert_held(&vfs->table_lock);
693
694 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
695 pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_pdev_id);
696
697 for (u16 vf_id = 0; vf_id < num_vfs; vf_id++) {
698 vf = kzalloc(sizeof(*vf), GFP_KERNEL);
699 if (!vf) {
700 err = -ENOMEM;
701 goto err_free_entries;
702 }
703 kref_init(&vf->refcnt);
704
705 vf->pf = pf;
706 vf->vf_id = vf_id;
707
708 /* set sriov vf ops for VFs created during SRIOV flow */
709 vf->vf_ops = &ice_sriov_vf_ops;
710
711 ice_initialize_vf_entry(vf);
712
713 do {
714 vfdev = pci_get_device(pdev->vendor, vf_pdev_id, vfdev);
715 } while (vfdev && vfdev->physfn != pdev);
716 vf->vfdev = vfdev;
717 vf->vf_sw_id = pf->first_sw;
718
719 pci_dev_get(vfdev);
720
721 hash_add_rcu(vfs->table, &vf->entry, vf_id);
722 }
723
724 /* Decrement of refcount done by pci_get_device() inside the loop does
725 * not touch the last iteration's vfdev, so it has to be done manually
726 * to balance pci_dev_get() added within the loop.
727 */
728 pci_dev_put(vfdev);
729
730 return 0;
731
732err_free_entries:
733 ice_free_vf_entries(pf);
734 return err;
735}
736
737/**
738 * ice_ena_vfs - enable VFs so they are ready to be used
739 * @pf: pointer to the PF structure
740 * @num_vfs: number of VFs to enable
741 */
742static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
743{
744 struct device *dev = ice_pf_to_dev(pf);
745 struct ice_hw *hw = &pf->hw;
746 int ret;
747
748 /* Disable global interrupt 0 so we don't try to handle the VFLR. */
749 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index),
750 ICE_ITR_NONE << GLINT_DYN_CTL_ITR_INDX_S);
751 set_bit(ICE_OICR_INTR_DIS, pf->state);
752 ice_flush(hw);
753
754 ret = pci_enable_sriov(pf->pdev, num_vfs);
755 if (ret)
756 goto err_unroll_intr;
757
758 mutex_lock(&pf->vfs.table_lock);
759
760 ret = ice_set_per_vf_res(pf, num_vfs);
761 if (ret) {
762 dev_err(dev, "Not enough resources for %d VFs, err %d. Try with fewer number of VFs\n",
763 num_vfs, ret);
764 goto err_unroll_sriov;
765 }
766
767 ret = ice_create_vf_entries(pf, num_vfs);
768 if (ret) {
769 dev_err(dev, "Failed to allocate VF entries for %d VFs\n",
770 num_vfs);
771 goto err_unroll_sriov;
772 }
773
774 ret = ice_start_vfs(pf);
775 if (ret) {
776 dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret);
777 ret = -EAGAIN;
778 goto err_unroll_vf_entries;
779 }
780
781 clear_bit(ICE_VF_DIS, pf->state);
782
783 /* rearm global interrupts */
784 if (test_and_clear_bit(ICE_OICR_INTR_DIS, pf->state))
785 ice_irq_dynamic_ena(hw, NULL, NULL);
786
787 mutex_unlock(&pf->vfs.table_lock);
788
789 return 0;
790
791err_unroll_vf_entries:
792 ice_free_vf_entries(pf);
793err_unroll_sriov:
794 mutex_unlock(&pf->vfs.table_lock);
795 pci_disable_sriov(pf->pdev);
796err_unroll_intr:
797 /* rearm interrupts here */
798 ice_irq_dynamic_ena(hw, NULL, NULL);
799 clear_bit(ICE_OICR_INTR_DIS, pf->state);
800 return ret;
801}
802
803/**
804 * ice_pci_sriov_ena - Enable or change number of VFs
805 * @pf: pointer to the PF structure
806 * @num_vfs: number of VFs to allocate
807 *
808 * Returns 0 on success and negative on failure
809 */
810static int ice_pci_sriov_ena(struct ice_pf *pf, int num_vfs)
811{
812 struct device *dev = ice_pf_to_dev(pf);
813 int err;
814
815 if (!num_vfs) {
816 ice_free_vfs(pf);
817 return 0;
818 }
819
820 if (num_vfs > pf->vfs.num_supported) {
821 dev_err(dev, "Can't enable %d VFs, max VFs supported is %d\n",
822 num_vfs, pf->vfs.num_supported);
823 return -EOPNOTSUPP;
824 }
825
826 dev_info(dev, "Enabling %d VFs\n", num_vfs);
827 err = ice_ena_vfs(pf, num_vfs);
828 if (err) {
829 dev_err(dev, "Failed to enable SR-IOV: %d\n", err);
830 return err;
831 }
832
833 set_bit(ICE_FLAG_SRIOV_ENA, pf->flags);
834 return 0;
835}
836
837/**
838 * ice_check_sriov_allowed - check if SR-IOV is allowed based on various checks
839 * @pf: PF to enabled SR-IOV on
840 */
841static int ice_check_sriov_allowed(struct ice_pf *pf)
842{
843 struct device *dev = ice_pf_to_dev(pf);
844
845 if (!test_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags)) {
846 dev_err(dev, "This device is not capable of SR-IOV\n");
847 return -EOPNOTSUPP;
848 }
849
850 if (ice_is_safe_mode(pf)) {
851 dev_err(dev, "SR-IOV cannot be configured - Device is in Safe Mode\n");
852 return -EOPNOTSUPP;
853 }
854
855 if (!ice_pf_state_is_nominal(pf)) {
856 dev_err(dev, "Cannot enable SR-IOV, device not ready\n");
857 return -EBUSY;
858 }
859
860 return 0;
861}
862
863/**
864 * ice_sriov_get_vf_total_msix - return number of MSI-X used by VFs
865 * @pdev: pointer to pci_dev struct
866 *
867 * The function is called via sysfs ops
868 */
869u32 ice_sriov_get_vf_total_msix(struct pci_dev *pdev)
870{
871 struct ice_pf *pf = pci_get_drvdata(pdev);
872
873 return pf->virt_irq_tracker.num_entries;
874}
875
876static void ice_sriov_remap_vectors(struct ice_pf *pf, u16 restricted_id)
877{
878 u16 vf_ids[ICE_MAX_SRIOV_VFS];
879 struct ice_vf *tmp_vf;
880 int to_remap = 0, bkt;
881
882 /* For better irqs usage try to remap irqs of VFs
883 * that aren't running yet
884 */
885 ice_for_each_vf(pf, bkt, tmp_vf) {
886 /* skip VF which is changing the number of MSI-X */
887 if (restricted_id == tmp_vf->vf_id ||
888 test_bit(ICE_VF_STATE_ACTIVE, tmp_vf->vf_states))
889 continue;
890
891 ice_dis_vf_mappings(tmp_vf);
892 ice_virt_free_irqs(pf, tmp_vf->first_vector_idx,
893 tmp_vf->num_msix);
894
895 vf_ids[to_remap] = tmp_vf->vf_id;
896 to_remap += 1;
897 }
898
899 for (int i = 0; i < to_remap; i++) {
900 tmp_vf = ice_get_vf_by_id(pf, vf_ids[i]);
901 if (!tmp_vf)
902 continue;
903
904 tmp_vf->first_vector_idx =
905 ice_virt_get_irqs(pf, tmp_vf->num_msix);
906 /* there is no need to rebuild VSI as we are only changing the
907 * vector indexes not amount of MSI-X or queues
908 */
909 ice_ena_vf_mappings(tmp_vf);
910 ice_put_vf(tmp_vf);
911 }
912}
913
914/**
915 * ice_sriov_set_msix_vec_count
916 * @vf_dev: pointer to pci_dev struct of VF device
917 * @msix_vec_count: new value for MSI-X amount on this VF
918 *
919 * Set requested MSI-X, queues and registers for @vf_dev.
920 *
921 * First do some sanity checks like if there are any VFs, if the new value
922 * is correct etc. Then disable old mapping (MSI-X and queues registers), change
923 * MSI-X and queues, rebuild VSI and enable new mapping.
924 *
925 * If it is possible (driver not binded to VF) try to remap also other VFs to
926 * linearize irqs register usage.
927 */
928int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
929{
930 struct pci_dev *pdev = pci_physfn(vf_dev);
931 struct ice_pf *pf = pci_get_drvdata(pdev);
932 u16 prev_msix, prev_queues, queues;
933 bool needs_rebuild = false;
934 struct ice_vsi *vsi;
935 struct ice_vf *vf;
936 int id;
937
938 if (!ice_get_num_vfs(pf))
939 return -ENOENT;
940
941 if (!msix_vec_count)
942 return 0;
943
944 queues = msix_vec_count;
945 /* add 1 MSI-X for OICR */
946 msix_vec_count += 1;
947
948 if (queues > min(ice_get_avail_txq_count(pf),
949 ice_get_avail_rxq_count(pf)))
950 return -EINVAL;
951
952 if (msix_vec_count < ICE_MIN_INTR_PER_VF)
953 return -EINVAL;
954
955 /* Transition of PCI VF function number to function_id */
956 for (id = 0; id < pci_num_vf(pdev); id++) {
957 if (vf_dev->devfn == pci_iov_virtfn_devfn(pdev, id))
958 break;
959 }
960
961 if (id == pci_num_vf(pdev))
962 return -ENOENT;
963
964 vf = ice_get_vf_by_id(pf, id);
965
966 if (!vf)
967 return -ENOENT;
968
969 vsi = ice_get_vf_vsi(vf);
970 if (!vsi) {
971 ice_put_vf(vf);
972 return -ENOENT;
973 }
974
975 prev_msix = vf->num_msix;
976 prev_queues = vf->num_vf_qs;
977
978 ice_dis_vf_mappings(vf);
979 ice_virt_free_irqs(pf, vf->first_vector_idx, vf->num_msix);
980
981 /* Remap all VFs beside the one is now configured */
982 ice_sriov_remap_vectors(pf, vf->vf_id);
983
984 vf->num_msix = msix_vec_count;
985 vf->num_vf_qs = queues;
986 vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
987 if (vf->first_vector_idx < 0)
988 goto unroll;
989
990 vsi->req_txq = queues;
991 vsi->req_rxq = queues;
992
993 if (ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT)) {
994 /* Try to rebuild with previous values */
995 needs_rebuild = true;
996 goto unroll;
997 }
998
999 dev_info(ice_pf_to_dev(pf),
1000 "Changing VF %d resources to %d vectors and %d queues\n",
1001 vf->vf_id, vf->num_msix, vf->num_vf_qs);
1002
1003 ice_ena_vf_mappings(vf);
1004 ice_put_vf(vf);
1005
1006 return 0;
1007
1008unroll:
1009 dev_info(ice_pf_to_dev(pf),
1010 "Can't set %d vectors on VF %d, falling back to %d\n",
1011 vf->num_msix, vf->vf_id, prev_msix);
1012
1013 vf->num_msix = prev_msix;
1014 vf->num_vf_qs = prev_queues;
1015
1016 vf->first_vector_idx = ice_virt_get_irqs(pf, vf->num_msix);
1017 if (vf->first_vector_idx < 0) {
1018 ice_put_vf(vf);
1019 return -EINVAL;
1020 }
1021
1022 if (needs_rebuild) {
1023 vsi->req_txq = prev_queues;
1024 vsi->req_rxq = prev_queues;
1025
1026 ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT);
1027 }
1028
1029 ice_ena_vf_mappings(vf);
1030 ice_put_vf(vf);
1031
1032 return -EINVAL;
1033}
1034
1035/**
1036 * ice_sriov_configure - Enable or change number of VFs via sysfs
1037 * @pdev: pointer to a pci_dev structure
1038 * @num_vfs: number of VFs to allocate or 0 to free VFs
1039 *
1040 * This function is called when the user updates the number of VFs in sysfs. On
1041 * success return whatever num_vfs was set to by the caller. Return negative on
1042 * failure.
1043 */
1044int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
1045{
1046 struct ice_pf *pf = pci_get_drvdata(pdev);
1047 struct device *dev = ice_pf_to_dev(pf);
1048 int err;
1049
1050 err = ice_check_sriov_allowed(pf);
1051 if (err)
1052 return err;
1053
1054 if (!num_vfs) {
1055 if (!pci_vfs_assigned(pdev)) {
1056 ice_free_vfs(pf);
1057 return 0;
1058 }
1059
1060 dev_err(dev, "can't free VFs because some are assigned to VMs.\n");
1061 return -EBUSY;
1062 }
1063
1064 err = ice_pci_sriov_ena(pf, num_vfs);
1065 if (err)
1066 return err;
1067
1068 return num_vfs;
1069}
1070
1071/**
1072 * ice_process_vflr_event - Free VF resources via IRQ calls
1073 * @pf: pointer to the PF structure
1074 *
1075 * called from the VFLR IRQ handler to
1076 * free up VF resources and state variables
1077 */
1078void ice_process_vflr_event(struct ice_pf *pf)
1079{
1080 struct ice_hw *hw = &pf->hw;
1081 struct ice_vf *vf;
1082 unsigned int bkt;
1083 u32 reg;
1084
1085 if (!test_and_clear_bit(ICE_VFLR_EVENT_PENDING, pf->state) ||
1086 !ice_has_vfs(pf))
1087 return;
1088
1089 mutex_lock(&pf->vfs.table_lock);
1090 ice_for_each_vf(pf, bkt, vf) {
1091 u32 reg_idx, bit_idx;
1092
1093 reg_idx = (hw->func_caps.vf_base_id + vf->vf_id) / 32;
1094 bit_idx = (hw->func_caps.vf_base_id + vf->vf_id) % 32;
1095 /* read GLGEN_VFLRSTAT register to find out the flr VFs */
1096 reg = rd32(hw, GLGEN_VFLRSTAT(reg_idx));
1097 if (reg & BIT(bit_idx))
1098 /* GLGEN_VFLRSTAT bit will be cleared in ice_reset_vf */
1099 ice_reset_vf(vf, ICE_VF_RESET_VFLR | ICE_VF_RESET_LOCK);
1100 }
1101 mutex_unlock(&pf->vfs.table_lock);
1102}
1103
1104/**
1105 * ice_get_vf_from_pfq - get the VF who owns the PF space queue passed in
1106 * @pf: PF used to index all VFs
1107 * @pfq: queue index relative to the PF's function space
1108 *
1109 * If no VF is found who owns the pfq then return NULL, otherwise return a
1110 * pointer to the VF who owns the pfq
1111 *
1112 * If this function returns non-NULL, it acquires a reference count of the VF
1113 * structure. The caller is responsible for calling ice_put_vf() to drop this
1114 * reference.
1115 */
1116static struct ice_vf *ice_get_vf_from_pfq(struct ice_pf *pf, u16 pfq)
1117{
1118 struct ice_vf *vf;
1119 unsigned int bkt;
1120
1121 rcu_read_lock();
1122 ice_for_each_vf_rcu(pf, bkt, vf) {
1123 struct ice_vsi *vsi;
1124 u16 rxq_idx;
1125
1126 vsi = ice_get_vf_vsi(vf);
1127 if (!vsi)
1128 continue;
1129
1130 ice_for_each_rxq(vsi, rxq_idx)
1131 if (vsi->rxq_map[rxq_idx] == pfq) {
1132 struct ice_vf *found;
1133
1134 if (kref_get_unless_zero(&vf->refcnt))
1135 found = vf;
1136 else
1137 found = NULL;
1138 rcu_read_unlock();
1139 return found;
1140 }
1141 }
1142 rcu_read_unlock();
1143
1144 return NULL;
1145}
1146
1147/**
1148 * ice_globalq_to_pfq - convert from global queue index to PF space queue index
1149 * @pf: PF used for conversion
1150 * @globalq: global queue index used to convert to PF space queue index
1151 */
1152static u32 ice_globalq_to_pfq(struct ice_pf *pf, u32 globalq)
1153{
1154 return globalq - pf->hw.func_caps.common_cap.rxq_first_id;
1155}
1156
1157/**
1158 * ice_vf_lan_overflow_event - handle LAN overflow event for a VF
1159 * @pf: PF that the LAN overflow event happened on
1160 * @event: structure holding the event information for the LAN overflow event
1161 *
1162 * Determine if the LAN overflow event was caused by a VF queue. If it was not
1163 * caused by a VF, do nothing. If a VF caused this LAN overflow event trigger a
1164 * reset on the offending VF.
1165 */
1166void
1167ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event)
1168{
1169 u32 gldcb_rtctq, queue;
1170 struct ice_vf *vf;
1171
1172 gldcb_rtctq = le32_to_cpu(event->desc.params.lan_overflow.prtdcb_ruptq);
1173 dev_dbg(ice_pf_to_dev(pf), "GLDCB_RTCTQ: 0x%08x\n", gldcb_rtctq);
1174
1175 /* event returns device global Rx queue number */
1176 queue = FIELD_GET(GLDCB_RTCTQ_RXQNUM_M, gldcb_rtctq);
1177
1178 vf = ice_get_vf_from_pfq(pf, ice_globalq_to_pfq(pf, queue));
1179 if (!vf)
1180 return;
1181
1182 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY | ICE_VF_RESET_LOCK);
1183 ice_put_vf(vf);
1184}
1185
1186/**
1187 * ice_set_vf_spoofchk
1188 * @netdev: network interface device structure
1189 * @vf_id: VF identifier
1190 * @ena: flag to enable or disable feature
1191 *
1192 * Enable or disable VF spoof checking
1193 */
1194int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
1195{
1196 struct ice_netdev_priv *np = netdev_priv(netdev);
1197 struct ice_pf *pf = np->vsi->back;
1198 struct ice_vsi *vf_vsi;
1199 struct device *dev;
1200 struct ice_vf *vf;
1201 int ret;
1202
1203 dev = ice_pf_to_dev(pf);
1204
1205 vf = ice_get_vf_by_id(pf, vf_id);
1206 if (!vf)
1207 return -EINVAL;
1208
1209 ret = ice_check_vf_ready_for_cfg(vf);
1210 if (ret)
1211 goto out_put_vf;
1212
1213 vf_vsi = ice_get_vf_vsi(vf);
1214 if (!vf_vsi) {
1215 netdev_err(netdev, "VSI %d for VF %d is null\n",
1216 vf->lan_vsi_idx, vf->vf_id);
1217 ret = -EINVAL;
1218 goto out_put_vf;
1219 }
1220
1221 if (vf_vsi->type != ICE_VSI_VF) {
1222 netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
1223 vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
1224 ret = -ENODEV;
1225 goto out_put_vf;
1226 }
1227
1228 if (ena == vf->spoofchk) {
1229 dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
1230 ret = 0;
1231 goto out_put_vf;
1232 }
1233
1234 ret = ice_vsi_apply_spoofchk(vf_vsi, ena);
1235 if (ret)
1236 dev_err(dev, "Failed to set spoofchk %s for VF %d VSI %d\n error %d\n",
1237 ena ? "ON" : "OFF", vf->vf_id, vf_vsi->vsi_num, ret);
1238 else
1239 vf->spoofchk = ena;
1240
1241out_put_vf:
1242 ice_put_vf(vf);
1243 return ret;
1244}
1245
1246/**
1247 * ice_get_vf_cfg
1248 * @netdev: network interface device structure
1249 * @vf_id: VF identifier
1250 * @ivi: VF configuration structure
1251 *
1252 * return VF configuration
1253 */
1254int
1255ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
1256{
1257 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1258 struct ice_vf *vf;
1259 int ret;
1260
1261 vf = ice_get_vf_by_id(pf, vf_id);
1262 if (!vf)
1263 return -EINVAL;
1264
1265 ret = ice_check_vf_ready_for_cfg(vf);
1266 if (ret)
1267 goto out_put_vf;
1268
1269 ivi->vf = vf_id;
1270 ether_addr_copy(ivi->mac, vf->hw_lan_addr);
1271
1272 /* VF configuration for VLAN and applicable QoS */
1273 ivi->vlan = ice_vf_get_port_vlan_id(vf);
1274 ivi->qos = ice_vf_get_port_vlan_prio(vf);
1275 if (ice_vf_is_port_vlan_ena(vf))
1276 ivi->vlan_proto = cpu_to_be16(ice_vf_get_port_vlan_tpid(vf));
1277
1278 ivi->trusted = vf->trusted;
1279 ivi->spoofchk = vf->spoofchk;
1280 if (!vf->link_forced)
1281 ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
1282 else if (vf->link_up)
1283 ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
1284 else
1285 ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
1286 ivi->max_tx_rate = vf->max_tx_rate;
1287 ivi->min_tx_rate = vf->min_tx_rate;
1288
1289out_put_vf:
1290 ice_put_vf(vf);
1291 return ret;
1292}
1293
1294/**
1295 * __ice_set_vf_mac - program VF MAC address
1296 * @pf: PF to be configure
1297 * @vf_id: VF identifier
1298 * @mac: MAC address
1299 *
1300 * program VF MAC address
1301 * Return: zero on success or an error code on failure
1302 */
1303int __ice_set_vf_mac(struct ice_pf *pf, u16 vf_id, const u8 *mac)
1304{
1305 struct device *dev;
1306 struct ice_vf *vf;
1307 int ret;
1308
1309 dev = ice_pf_to_dev(pf);
1310 if (is_multicast_ether_addr(mac)) {
1311 dev_err(dev, "%pM not a valid unicast address\n", mac);
1312 return -EINVAL;
1313 }
1314
1315 vf = ice_get_vf_by_id(pf, vf_id);
1316 if (!vf)
1317 return -EINVAL;
1318
1319 /* nothing left to do, unicast MAC already set */
1320 if (ether_addr_equal(vf->dev_lan_addr, mac) &&
1321 ether_addr_equal(vf->hw_lan_addr, mac)) {
1322 ret = 0;
1323 goto out_put_vf;
1324 }
1325
1326 ret = ice_check_vf_ready_for_cfg(vf);
1327 if (ret)
1328 goto out_put_vf;
1329
1330 mutex_lock(&vf->cfg_lock);
1331
1332 /* VF is notified of its new MAC via the PF's response to the
1333 * VIRTCHNL_OP_GET_VF_RESOURCES message after the VF has been reset
1334 */
1335 ether_addr_copy(vf->dev_lan_addr, mac);
1336 ether_addr_copy(vf->hw_lan_addr, mac);
1337 if (is_zero_ether_addr(mac)) {
1338 /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */
1339 vf->pf_set_mac = false;
1340 dev_info(dev, "Removing MAC on VF %d. VF driver will be reinitialized\n",
1341 vf->vf_id);
1342 } else {
1343 /* PF will add MAC rule for the VF */
1344 vf->pf_set_mac = true;
1345 dev_info(dev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n",
1346 mac, vf_id);
1347 }
1348
1349 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1350 mutex_unlock(&vf->cfg_lock);
1351
1352out_put_vf:
1353 ice_put_vf(vf);
1354 return ret;
1355}
1356
1357/**
1358 * ice_set_vf_mac - .ndo_set_vf_mac handler
1359 * @netdev: network interface device structure
1360 * @vf_id: VF identifier
1361 * @mac: MAC address
1362 *
1363 * program VF MAC address
1364 * Return: zero on success or an error code on failure
1365 */
1366int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
1367{
1368 return __ice_set_vf_mac(ice_netdev_to_pf(netdev), vf_id, mac);
1369}
1370
1371/**
1372 * ice_set_vf_trust
1373 * @netdev: network interface device structure
1374 * @vf_id: VF identifier
1375 * @trusted: Boolean value to enable/disable trusted VF
1376 *
1377 * Enable or disable a given VF as trusted
1378 */
1379int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
1380{
1381 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1382 struct ice_vf *vf;
1383 int ret;
1384
1385 vf = ice_get_vf_by_id(pf, vf_id);
1386 if (!vf)
1387 return -EINVAL;
1388
1389 if (ice_is_eswitch_mode_switchdev(pf)) {
1390 dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
1391 return -EOPNOTSUPP;
1392 }
1393
1394 ret = ice_check_vf_ready_for_cfg(vf);
1395 if (ret)
1396 goto out_put_vf;
1397
1398 /* Check if already trusted */
1399 if (trusted == vf->trusted) {
1400 ret = 0;
1401 goto out_put_vf;
1402 }
1403
1404 mutex_lock(&vf->cfg_lock);
1405
1406 while (!trusted && vf->num_mac_lldp)
1407 ice_vf_update_mac_lldp_num(vf, ice_get_vf_vsi(vf), false);
1408
1409 vf->trusted = trusted;
1410 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1411 dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
1412 vf_id, trusted ? "" : "un");
1413
1414 mutex_unlock(&vf->cfg_lock);
1415
1416out_put_vf:
1417 ice_put_vf(vf);
1418 return ret;
1419}
1420
1421/**
1422 * ice_set_vf_link_state
1423 * @netdev: network interface device structure
1424 * @vf_id: VF identifier
1425 * @link_state: required link state
1426 *
1427 * Set VF's link state, irrespective of physical link state status
1428 */
1429int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
1430{
1431 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1432 struct ice_vf *vf;
1433 int ret;
1434
1435 vf = ice_get_vf_by_id(pf, vf_id);
1436 if (!vf)
1437 return -EINVAL;
1438
1439 ret = ice_check_vf_ready_for_cfg(vf);
1440 if (ret)
1441 goto out_put_vf;
1442
1443 switch (link_state) {
1444 case IFLA_VF_LINK_STATE_AUTO:
1445 vf->link_forced = false;
1446 break;
1447 case IFLA_VF_LINK_STATE_ENABLE:
1448 vf->link_forced = true;
1449 vf->link_up = true;
1450 break;
1451 case IFLA_VF_LINK_STATE_DISABLE:
1452 vf->link_forced = true;
1453 vf->link_up = false;
1454 break;
1455 default:
1456 ret = -EINVAL;
1457 goto out_put_vf;
1458 }
1459
1460 ice_vc_notify_vf_link_state(vf);
1461
1462out_put_vf:
1463 ice_put_vf(vf);
1464 return ret;
1465}
1466
1467/**
1468 * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs
1469 * @pf: PF associated with VFs
1470 */
1471static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf)
1472{
1473 struct ice_vf *vf;
1474 unsigned int bkt;
1475 int rate = 0;
1476
1477 rcu_read_lock();
1478 ice_for_each_vf_rcu(pf, bkt, vf)
1479 rate += vf->min_tx_rate;
1480 rcu_read_unlock();
1481
1482 return rate;
1483}
1484
1485/**
1486 * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription
1487 * @vf: VF trying to configure min_tx_rate
1488 * @min_tx_rate: min Tx rate in Mbps
1489 *
1490 * Check if the min_tx_rate being passed in will cause oversubscription of total
1491 * min_tx_rate based on the current link speed and all other VFs configured
1492 * min_tx_rate
1493 *
1494 * Return true if the passed min_tx_rate would cause oversubscription, else
1495 * return false
1496 */
1497static bool
1498ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate)
1499{
1500 struct ice_vsi *vsi = ice_get_vf_vsi(vf);
1501 int all_vfs_min_tx_rate;
1502 int link_speed_mbps;
1503
1504 if (WARN_ON(!vsi))
1505 return false;
1506
1507 link_speed_mbps = ice_get_link_speed_mbps(vsi);
1508 all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
1509
1510 /* this VF's previous rate is being overwritten */
1511 all_vfs_min_tx_rate -= vf->min_tx_rate;
1512
1513 if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) {
1514 dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n",
1515 min_tx_rate, vf->vf_id,
1516 all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps,
1517 link_speed_mbps);
1518 return true;
1519 }
1520
1521 return false;
1522}
1523
1524/**
1525 * ice_set_vf_bw - set min/max VF bandwidth
1526 * @netdev: network interface device structure
1527 * @vf_id: VF identifier
1528 * @min_tx_rate: Minimum Tx rate in Mbps
1529 * @max_tx_rate: Maximum Tx rate in Mbps
1530 */
1531int
1532ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
1533 int max_tx_rate)
1534{
1535 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1536 struct ice_vsi *vsi;
1537 struct device *dev;
1538 struct ice_vf *vf;
1539 int ret;
1540
1541 dev = ice_pf_to_dev(pf);
1542
1543 vf = ice_get_vf_by_id(pf, vf_id);
1544 if (!vf)
1545 return -EINVAL;
1546
1547 ret = ice_check_vf_ready_for_cfg(vf);
1548 if (ret)
1549 goto out_put_vf;
1550
1551 vsi = ice_get_vf_vsi(vf);
1552 if (!vsi) {
1553 ret = -EINVAL;
1554 goto out_put_vf;
1555 }
1556
1557 if (min_tx_rate && ice_is_dcb_active(pf)) {
1558 dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n");
1559 ret = -EOPNOTSUPP;
1560 goto out_put_vf;
1561 }
1562
1563 if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate)) {
1564 ret = -EINVAL;
1565 goto out_put_vf;
1566 }
1567
1568 if (vf->min_tx_rate != (unsigned int)min_tx_rate) {
1569 ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000);
1570 if (ret) {
1571 dev_err(dev, "Unable to set min-tx-rate for VF %d\n",
1572 vf->vf_id);
1573 goto out_put_vf;
1574 }
1575
1576 vf->min_tx_rate = min_tx_rate;
1577 }
1578
1579 if (vf->max_tx_rate != (unsigned int)max_tx_rate) {
1580 ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000);
1581 if (ret) {
1582 dev_err(dev, "Unable to set max-tx-rate for VF %d\n",
1583 vf->vf_id);
1584 goto out_put_vf;
1585 }
1586
1587 vf->max_tx_rate = max_tx_rate;
1588 }
1589
1590out_put_vf:
1591 ice_put_vf(vf);
1592 return ret;
1593}
1594
1595/**
1596 * ice_get_vf_stats - populate some stats for the VF
1597 * @netdev: the netdev of the PF
1598 * @vf_id: the host OS identifier (0-255)
1599 * @vf_stats: pointer to the OS memory to be initialized
1600 */
1601int ice_get_vf_stats(struct net_device *netdev, int vf_id,
1602 struct ifla_vf_stats *vf_stats)
1603{
1604 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1605 struct ice_eth_stats *stats;
1606 struct ice_vsi *vsi;
1607 struct ice_vf *vf;
1608 int ret;
1609
1610 vf = ice_get_vf_by_id(pf, vf_id);
1611 if (!vf)
1612 return -EINVAL;
1613
1614 ret = ice_check_vf_ready_for_cfg(vf);
1615 if (ret)
1616 goto out_put_vf;
1617
1618 vsi = ice_get_vf_vsi(vf);
1619 if (!vsi) {
1620 ret = -EINVAL;
1621 goto out_put_vf;
1622 }
1623
1624 ice_update_eth_stats(vsi);
1625 stats = &vsi->eth_stats;
1626
1627 memset(vf_stats, 0, sizeof(*vf_stats));
1628
1629 vf_stats->rx_packets = stats->rx_unicast + stats->rx_broadcast +
1630 stats->rx_multicast;
1631 vf_stats->tx_packets = stats->tx_unicast + stats->tx_broadcast +
1632 stats->tx_multicast;
1633 vf_stats->rx_bytes = stats->rx_bytes;
1634 vf_stats->tx_bytes = stats->tx_bytes;
1635 vf_stats->broadcast = stats->rx_broadcast;
1636 vf_stats->multicast = stats->rx_multicast;
1637 vf_stats->rx_dropped = stats->rx_discards;
1638 vf_stats->tx_dropped = stats->tx_discards;
1639
1640out_put_vf:
1641 ice_put_vf(vf);
1642 return ret;
1643}
1644
1645/**
1646 * ice_is_supported_port_vlan_proto - make sure the vlan_proto is supported
1647 * @hw: hardware structure used to check the VLAN mode
1648 * @vlan_proto: VLAN TPID being checked
1649 *
1650 * If the device is configured in Double VLAN Mode (DVM), then both ETH_P_8021Q
1651 * and ETH_P_8021AD are supported. If the device is configured in Single VLAN
1652 * Mode (SVM), then only ETH_P_8021Q is supported.
1653 */
1654static bool
1655ice_is_supported_port_vlan_proto(struct ice_hw *hw, u16 vlan_proto)
1656{
1657 bool is_supported = false;
1658
1659 switch (vlan_proto) {
1660 case ETH_P_8021Q:
1661 is_supported = true;
1662 break;
1663 case ETH_P_8021AD:
1664 if (ice_is_dvm_ena(hw))
1665 is_supported = true;
1666 break;
1667 }
1668
1669 return is_supported;
1670}
1671
1672/**
1673 * ice_set_vf_port_vlan
1674 * @netdev: network interface device structure
1675 * @vf_id: VF identifier
1676 * @vlan_id: VLAN ID being set
1677 * @qos: priority setting
1678 * @vlan_proto: VLAN protocol
1679 *
1680 * program VF Port VLAN ID and/or QoS
1681 */
1682int
1683ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
1684 __be16 vlan_proto)
1685{
1686 struct ice_pf *pf = ice_netdev_to_pf(netdev);
1687 u16 local_vlan_proto = ntohs(vlan_proto);
1688 struct device *dev;
1689 struct ice_vf *vf;
1690 int ret;
1691
1692 dev = ice_pf_to_dev(pf);
1693
1694 if (vlan_id >= VLAN_N_VID || qos > 7) {
1695 dev_err(dev, "Invalid Port VLAN parameters for VF %d, ID %d, QoS %d\n",
1696 vf_id, vlan_id, qos);
1697 return -EINVAL;
1698 }
1699
1700 if (!ice_is_supported_port_vlan_proto(&pf->hw, local_vlan_proto)) {
1701 dev_err(dev, "VF VLAN protocol 0x%04x is not supported\n",
1702 local_vlan_proto);
1703 return -EPROTONOSUPPORT;
1704 }
1705
1706 vf = ice_get_vf_by_id(pf, vf_id);
1707 if (!vf)
1708 return -EINVAL;
1709
1710 ret = ice_check_vf_ready_for_cfg(vf);
1711 if (ret)
1712 goto out_put_vf;
1713
1714 if (ice_vf_get_port_vlan_prio(vf) == qos &&
1715 ice_vf_get_port_vlan_tpid(vf) == local_vlan_proto &&
1716 ice_vf_get_port_vlan_id(vf) == vlan_id) {
1717 /* duplicate request, so just return success */
1718 dev_dbg(dev, "Duplicate port VLAN %u, QoS %u, TPID 0x%04x request\n",
1719 vlan_id, qos, local_vlan_proto);
1720 ret = 0;
1721 goto out_put_vf;
1722 }
1723
1724 mutex_lock(&vf->cfg_lock);
1725
1726 vf->port_vlan_info = ICE_VLAN(local_vlan_proto, vlan_id, qos);
1727 if (ice_vf_is_port_vlan_ena(vf))
1728 dev_info(dev, "Setting VLAN %u, QoS %u, TPID 0x%04x on VF %d\n",
1729 vlan_id, qos, local_vlan_proto, vf_id);
1730 else
1731 dev_info(dev, "Clearing port VLAN on VF %d\n", vf_id);
1732
1733 ice_reset_vf(vf, ICE_VF_RESET_NOTIFY);
1734 mutex_unlock(&vf->cfg_lock);
1735
1736out_put_vf:
1737 ice_put_vf(vf);
1738 return ret;
1739}
1740
1741/**
1742 * ice_print_vf_rx_mdd_event - print VF Rx malicious driver detect event
1743 * @vf: pointer to the VF structure
1744 */
1745void ice_print_vf_rx_mdd_event(struct ice_vf *vf)
1746{
1747 struct ice_pf *pf = vf->pf;
1748 struct device *dev;
1749
1750 dev = ice_pf_to_dev(pf);
1751
1752 dev_info(dev, "%d Rx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
1753 vf->mdd_rx_events.count, pf->hw.pf_id, vf->vf_id,
1754 vf->dev_lan_addr,
1755 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
1756 ? "on" : "off");
1757}
1758
1759/**
1760 * ice_print_vf_tx_mdd_event - print VF Tx malicious driver detect event
1761 * @vf: pointer to the VF structure
1762 */
1763void ice_print_vf_tx_mdd_event(struct ice_vf *vf)
1764{
1765 struct ice_pf *pf = vf->pf;
1766 struct device *dev;
1767
1768 dev = ice_pf_to_dev(pf);
1769
1770 dev_info(dev, "%d Tx Malicious Driver Detection events detected on PF %d VF %d MAC %pM. mdd-auto-reset-vfs=%s\n",
1771 vf->mdd_tx_events.count, pf->hw.pf_id, vf->vf_id,
1772 vf->dev_lan_addr,
1773 test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)
1774 ? "on" : "off");
1775}
1776
1777/**
1778 * ice_print_vfs_mdd_events - print VFs malicious driver detect event
1779 * @pf: pointer to the PF structure
1780 *
1781 * Called from ice_handle_mdd_event to rate limit and print VFs MDD events.
1782 */
1783void ice_print_vfs_mdd_events(struct ice_pf *pf)
1784{
1785 struct ice_vf *vf;
1786 unsigned int bkt;
1787
1788 /* check that there are pending MDD events to print */
1789 if (!test_and_clear_bit(ICE_MDD_VF_PRINT_PENDING, pf->state))
1790 return;
1791
1792 /* VF MDD event logs are rate limited to one second intervals */
1793 if (time_is_after_jiffies(pf->vfs.last_printed_mdd_jiffies + HZ * 1))
1794 return;
1795
1796 pf->vfs.last_printed_mdd_jiffies = jiffies;
1797
1798 mutex_lock(&pf->vfs.table_lock);
1799 ice_for_each_vf(pf, bkt, vf) {
1800 /* only print Rx MDD event message if there are new events */
1801 if (vf->mdd_rx_events.count != vf->mdd_rx_events.last_printed) {
1802 vf->mdd_rx_events.last_printed =
1803 vf->mdd_rx_events.count;
1804 ice_print_vf_rx_mdd_event(vf);
1805 }
1806
1807 /* only print Tx MDD event message if there are new events */
1808 if (vf->mdd_tx_events.count != vf->mdd_tx_events.last_printed) {
1809 vf->mdd_tx_events.last_printed =
1810 vf->mdd_tx_events.count;
1811 ice_print_vf_tx_mdd_event(vf);
1812 }
1813 }
1814 mutex_unlock(&pf->vfs.table_lock);
1815}
1816
1817/**
1818 * ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
1819 * @pf: pointer to the PF structure
1820 *
1821 * Called when recovering from a PF FLR to restore interrupt capability to
1822 * the VFs.
1823 */
1824void ice_restore_all_vfs_msi_state(struct ice_pf *pf)
1825{
1826 struct ice_vf *vf;
1827 u32 bkt;
1828
1829 ice_for_each_vf(pf, bkt, vf)
1830 pci_restore_msi_state(vf->vfdev);
1831}