Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ixgbe: Add support for SR-IOV w/ DCB or RSS

This change essentially makes it so that we can enable almost all of the
features all at once. This patch allows for the combination of SR-IOV,
DCB, and FCoE in the case of the x540. It also beefs up the SR-IOV by
adding support for RSS to the PF.

The testing matrix gets to be very complex for this patch as there are a
number of different features and subsets for queueing options. I tried to
narrow these down a bit by restricting the PF to only supporting 4TC DCB
when it is enabled in addition to SR-IOV.

Cc: Greg Rose <gregory.v.rose@intel.com>
Cc: John Fastabend <john.r.fastabend@intel.com>
Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com>
Tested-by: Ross Brattain <ross.b.brattain@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>

authored by

Alexander Duyck and committed by
Jeff Kirsher
73079ea0 435b19f6

+426 -50
+4
drivers/net/ethernet/intel/ixgbe/ixgbe.h
··· 284 284 u16 offset; /* offset to start of feature */ 285 285 } ____cacheline_internodealigned_in_smp; 286 286 287 + #define IXGBE_82599_VMDQ_8Q_MASK 0x78 288 + #define IXGBE_82599_VMDQ_4Q_MASK 0x7C 289 + #define IXGBE_82599_VMDQ_2Q_MASK 0x7E 290 + 287 291 /* 288 292 * FCoE requires that all Rx buffers be over 2200 bytes in length. Since 289 293 * this is twice the size of a half page we need to double the page order
+357 -26
drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
··· 29 29 #include "ixgbe_sriov.h" 30 30 31 31 #ifdef CONFIG_IXGBE_DCB 32 + /** 33 + * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV 34 + * @adapter: board private structure to initialize 35 + * 36 + * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It 37 + * will also try to cache the proper offsets if RSS/FCoE are enabled along 38 + * with VMDq. 39 + * 40 + **/ 41 + static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) 42 + { 43 + #ifdef IXGBE_FCOE 44 + struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; 45 + #endif /* IXGBE_FCOE */ 46 + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 47 + int i; 48 + u16 reg_idx; 49 + u8 tcs = netdev_get_num_tc(adapter->netdev); 50 + 51 + /* verify we have DCB queueing enabled before proceeding */ 52 + if (tcs <= 1) 53 + return false; 54 + 55 + /* verify we have VMDq enabled before proceeding */ 56 + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 57 + return false; 58 + 59 + /* start at VMDq register offset for SR-IOV enabled setups */ 60 + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 61 + for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { 62 + /* If we are greater than indices move to next pool */ 63 + if ((reg_idx & ~vmdq->mask) >= tcs) 64 + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 65 + adapter->rx_ring[i]->reg_idx = reg_idx; 66 + } 67 + 68 + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 69 + for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { 70 + /* If we are greater than indices move to next pool */ 71 + if ((reg_idx & ~vmdq->mask) >= tcs) 72 + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 73 + adapter->tx_ring[i]->reg_idx = reg_idx; 74 + } 75 + 76 + #ifdef IXGBE_FCOE 77 + /* nothing to do if FCoE is disabled */ 78 + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) 79 + return true; 80 + 81 + /* The work is already done if the FCoE ring is shared */ 82 + if (fcoe->offset < tcs) 83 + return true; 84 + 85 + /* The FCoE rings exist separately, we need to move their reg_idx */ 86 + if (fcoe->indices) { 87 + u16 queues_per_pool = __ALIGN_MASK(1, ~vmdq->mask); 88 + u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter); 89 + 90 + reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; 91 + for (i = fcoe->offset; i < adapter->num_rx_queues; i++) { 92 + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; 93 + adapter->rx_ring[i]->reg_idx = reg_idx; 94 + reg_idx++; 95 + } 96 + 97 + reg_idx = (vmdq->offset + vmdq->indices) * queues_per_pool; 98 + for (i = fcoe->offset; i < adapter->num_tx_queues; i++) { 99 + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask) + fcoe_tc; 100 + adapter->tx_ring[i]->reg_idx = reg_idx; 101 + reg_idx++; 102 + } 103 + } 104 + 105 + #endif /* IXGBE_FCOE */ 106 + return true; 107 + } 108 + 32 109 /* ixgbe_get_first_reg_idx - Return first register index associated with ring */ 33 110 static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, 34 111 unsigned int *tx, unsigned int *rx) ··· 197 120 * no other mapping is used. 198 121 * 199 122 */ 200 - static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) 123 + static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) 201 124 { 202 - adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2; 203 - adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2; 204 - if (adapter->num_vfs) 205 - return true; 206 - else 125 + #ifdef IXGBE_FCOE 126 + struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; 127 + #endif /* IXGBE_FCOE */ 128 + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; 129 + struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; 130 + int i; 131 + u16 reg_idx; 132 + 133 + /* only proceed if VMDq is enabled */ 134 + if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) 207 135 return false; 136 + 137 + /* start at VMDq register offset for SR-IOV enabled setups */ 138 + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 139 + for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { 140 + #ifdef IXGBE_FCOE 141 + /* Allow first FCoE queue to be mapped as RSS */ 142 + if (fcoe->offset && (i > fcoe->offset)) 143 + break; 144 + #endif 145 + /* If we are greater than indices move to next pool */ 146 + if ((reg_idx & ~vmdq->mask) >= rss->indices) 147 + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 148 + adapter->rx_ring[i]->reg_idx = reg_idx; 149 + } 150 + 151 + #ifdef IXGBE_FCOE 152 + /* FCoE uses a linear block of queues so just assigning 1:1 */ 153 + for (; i < adapter->num_rx_queues; i++, reg_idx++) 154 + adapter->rx_ring[i]->reg_idx = reg_idx; 155 + 156 + #endif 157 + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); 158 + for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { 159 + #ifdef IXGBE_FCOE 160 + /* Allow first FCoE queue to be mapped as RSS */ 161 + if (fcoe->offset && (i > fcoe->offset)) 162 + break; 163 + #endif 164 + /* If we are greater than indices move to next pool */ 165 + if ((reg_idx & rss->mask) >= rss->indices) 166 + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); 167 + adapter->tx_ring[i]->reg_idx = reg_idx; 168 + } 169 + 170 + #ifdef IXGBE_FCOE 171 + /* FCoE uses a linear block of queues so just assigning 1:1 */ 172 + for (; i < adapter->num_tx_queues; i++, reg_idx++) 173 + adapter->tx_ring[i]->reg_idx = reg_idx; 174 + 175 + #endif 176 + 177 + return true; 208 178 } 209 179 210 180 /** ··· 293 169 adapter->rx_ring[0]->reg_idx = 0; 294 170 adapter->tx_ring[0]->reg_idx = 0; 295 171 172 + #ifdef CONFIG_IXGBE_DCB 173 + if (ixgbe_cache_ring_dcb_sriov(adapter)) 174 + return; 175 + 176 + if (ixgbe_cache_ring_dcb(adapter)) 177 + return; 178 + 179 + #endif 296 180 if (ixgbe_cache_ring_sriov(adapter)) 297 181 return; 298 182 299 - #ifdef CONFIG_IXGBE_DCB 300 - if (ixgbe_cache_ring_dcb(adapter)) 301 - return; 302 - #endif 303 - 304 183 ixgbe_cache_ring_rss(adapter); 305 - } 306 - 307 - /** 308 - * ixgbe_set_sriov_queues - Allocate queues for IOV use 309 - * @adapter: board private structure to initialize 310 - * 311 - * IOV doesn't actually use anything, so just NAK the 312 - * request for now and let the other queue routines 313 - * figure out what to do. 314 - */ 315 - static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) 316 - { 317 - return false; 318 184 } 319 185 320 186 #define IXGBE_RSS_16Q_MASK 0xF ··· 314 200 #define IXGBE_RSS_DISABLED_MASK 0x0 315 201 316 202 #ifdef CONFIG_IXGBE_DCB 203 + /** 204 + * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB 205 + * @adapter: board private structure to initialize 206 + * 207 + * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues 208 + * and VM pools where appropriate. Also assign queues based on DCB 209 + * priorities and map accordingly.. 210 + * 211 + **/ 212 + static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) 213 + { 214 + int i; 215 + u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; 216 + u16 vmdq_m = 0; 217 + #ifdef IXGBE_FCOE 218 + u16 fcoe_i = 0; 219 + #endif 220 + u8 tcs = netdev_get_num_tc(adapter->netdev); 221 + 222 + /* verify we have DCB queueing enabled before proceeding */ 223 + if (tcs <= 1) 224 + return false; 225 + 226 + /* verify we have VMDq enabled before proceeding */ 227 + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 228 + return false; 229 + 230 + /* Add starting offset to total pool count */ 231 + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; 232 + 233 + /* 16 pools w/ 8 TC per pool */ 234 + if (tcs > 4) { 235 + vmdq_i = min_t(u16, vmdq_i, 16); 236 + vmdq_m = IXGBE_82599_VMDQ_8Q_MASK; 237 + /* 32 pools w/ 4 TC per pool */ 238 + } else { 239 + vmdq_i = min_t(u16, vmdq_i, 32); 240 + vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; 241 + } 242 + 243 + #ifdef IXGBE_FCOE 244 + /* queues in the remaining pools are available for FCoE */ 245 + fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; 246 + 247 + #endif 248 + /* remove the starting offset from the pool count */ 249 + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; 250 + 251 + /* save features for later use */ 252 + adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; 253 + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; 254 + 255 + /* 256 + * We do not support DCB, VMDq, and RSS all simultaneously 257 + * so we will disable RSS since it is the lowest priority 258 + */ 259 + adapter->ring_feature[RING_F_RSS].indices = 1; 260 + adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; 261 + 262 + adapter->num_rx_pools = vmdq_i; 263 + adapter->num_rx_queues_per_pool = tcs; 264 + 265 + adapter->num_tx_queues = vmdq_i * tcs; 266 + adapter->num_rx_queues = vmdq_i * tcs; 267 + 268 + #ifdef IXGBE_FCOE 269 + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 270 + struct ixgbe_ring_feature *fcoe; 271 + 272 + fcoe = &adapter->ring_feature[RING_F_FCOE]; 273 + 274 + /* limit ourselves based on feature limits */ 275 + fcoe_i = min_t(u16, fcoe_i, num_online_cpus()); 276 + fcoe_i = min_t(u16, fcoe_i, fcoe->limit); 277 + 278 + if (fcoe_i) { 279 + /* alloc queues for FCoE separately */ 280 + fcoe->indices = fcoe_i; 281 + fcoe->offset = vmdq_i * tcs; 282 + 283 + /* add queues to adapter */ 284 + adapter->num_tx_queues += fcoe_i; 285 + adapter->num_rx_queues += fcoe_i; 286 + } else if (tcs > 1) { 287 + /* use queue belonging to FcoE TC */ 288 + fcoe->indices = 1; 289 + fcoe->offset = ixgbe_fcoe_get_tc(adapter); 290 + } else { 291 + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; 292 + 293 + fcoe->indices = 0; 294 + fcoe->offset = 0; 295 + } 296 + } 297 + 298 + #endif /* IXGBE_FCOE */ 299 + /* configure TC to queue mapping */ 300 + for (i = 0; i < tcs; i++) 301 + netdev_set_tc_queue(adapter->netdev, i, 1, i); 302 + 303 + return true; 304 + } 305 + 317 306 static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) 318 307 { 319 308 struct net_device *dev = adapter->netdev; ··· 478 261 } 479 262 480 263 #endif 264 + /** 265 + * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices 266 + * @adapter: board private structure to initialize 267 + * 268 + * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues 269 + * and VM pools where appropriate. If RSS is available, then also try and 270 + * enable RSS and map accordingly. 271 + * 272 + **/ 273 + static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) 274 + { 275 + u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; 276 + u16 vmdq_m = 0; 277 + u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; 278 + u16 rss_m = IXGBE_RSS_DISABLED_MASK; 279 + #ifdef IXGBE_FCOE 280 + u16 fcoe_i = 0; 281 + #endif 282 + 283 + /* only proceed if SR-IOV is enabled */ 284 + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) 285 + return false; 286 + 287 + /* Add starting offset to total pool count */ 288 + vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; 289 + 290 + /* double check we are limited to maximum pools */ 291 + vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); 292 + 293 + /* 64 pool mode with 2 queues per pool */ 294 + if ((vmdq_i > 32) || (rss_i < 4)) { 295 + vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; 296 + rss_m = IXGBE_RSS_2Q_MASK; 297 + rss_i = min_t(u16, rss_i, 2); 298 + /* 32 pool mode with 4 queues per pool */ 299 + } else { 300 + vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; 301 + rss_m = IXGBE_RSS_4Q_MASK; 302 + rss_i = 4; 303 + } 304 + 305 + #ifdef IXGBE_FCOE 306 + /* queues in the remaining pools are available for FCoE */ 307 + fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); 308 + 309 + #endif 310 + /* remove the starting offset from the pool count */ 311 + vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; 312 + 313 + /* save features for later use */ 314 + adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; 315 + adapter->ring_feature[RING_F_VMDQ].mask = vmdq_m; 316 + 317 + /* limit RSS based on user input and save for later use */ 318 + adapter->ring_feature[RING_F_RSS].indices = rss_i; 319 + adapter->ring_feature[RING_F_RSS].mask = rss_m; 320 + 321 + adapter->num_rx_pools = vmdq_i; 322 + adapter->num_rx_queues_per_pool = rss_i; 323 + 324 + adapter->num_rx_queues = vmdq_i * rss_i; 325 + adapter->num_tx_queues = vmdq_i * rss_i; 326 + 327 + /* disable ATR as it is not supported when VMDq is enabled */ 328 + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 329 + 330 + #ifdef IXGBE_FCOE 331 + /* 332 + * FCoE can use rings from adjacent buffers to allow RSS 333 + * like behavior. To account for this we need to add the 334 + * FCoE indices to the total ring count. 335 + */ 336 + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 337 + struct ixgbe_ring_feature *fcoe; 338 + 339 + fcoe = &adapter->ring_feature[RING_F_FCOE]; 340 + 341 + /* limit ourselves based on feature limits */ 342 + fcoe_i = min_t(u16, fcoe_i, fcoe->limit); 343 + 344 + if (vmdq_i > 1 && fcoe_i) { 345 + /* reserve no more than number of CPUs */ 346 + fcoe_i = min_t(u16, fcoe_i, num_online_cpus()); 347 + 348 + /* alloc queues for FCoE separately */ 349 + fcoe->indices = fcoe_i; 350 + fcoe->offset = vmdq_i * rss_i; 351 + } else { 352 + /* merge FCoE queues with RSS queues */ 353 + fcoe_i = min_t(u16, fcoe_i + rss_i, num_online_cpus()); 354 + 355 + /* limit indices to rss_i if MSI-X is disabled */ 356 + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 357 + fcoe_i = rss_i; 358 + 359 + /* attempt to reserve some queues for just FCoE */ 360 + fcoe->indices = min_t(u16, fcoe_i, fcoe->limit); 361 + fcoe->offset = fcoe_i - fcoe->indices; 362 + 363 + fcoe_i -= rss_i; 364 + } 365 + 366 + /* add queues to adapter */ 367 + adapter->num_tx_queues += fcoe_i; 368 + adapter->num_rx_queues += fcoe_i; 369 + } 370 + 371 + #endif 372 + return true; 373 + } 374 + 481 375 /** 482 376 * ixgbe_set_rss_queues - Allocate queues for RSS 483 377 * @adapter: board private structure to initialize ··· 681 353 adapter->num_rx_pools = adapter->num_rx_queues; 682 354 adapter->num_rx_queues_per_pool = 1; 683 355 684 - if (ixgbe_set_sriov_queues(adapter)) 356 + #ifdef CONFIG_IXGBE_DCB 357 + if (ixgbe_set_dcb_sriov_queues(adapter)) 685 358 return; 686 359 687 - #ifdef CONFIG_IXGBE_DCB 688 360 if (ixgbe_set_dcb_queues(adapter)) 689 361 return; 690 362 691 363 #endif 364 + if (ixgbe_set_sriov_queues(adapter)) 365 + return; 366 + 692 367 ixgbe_set_rss_queues(adapter); 693 368 } 694 369
+24 -13
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 3161 3161 * Set up VF register offsets for selected VT Mode, 3162 3162 * i.e. 32 or 64 VFs for SR-IOV 3163 3163 */ 3164 - gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); 3165 - gcr_ext |= IXGBE_GCR_EXT_MSIX_EN; 3166 - gcr_ext |= IXGBE_GCR_EXT_VT_MODE_64; 3164 + switch (adapter->ring_feature[RING_F_VMDQ].mask) { 3165 + case IXGBE_82599_VMDQ_8Q_MASK: 3166 + gcr_ext = IXGBE_GCR_EXT_VT_MODE_16; 3167 + break; 3168 + case IXGBE_82599_VMDQ_4Q_MASK: 3169 + gcr_ext = IXGBE_GCR_EXT_VT_MODE_32; 3170 + break; 3171 + default: 3172 + gcr_ext = IXGBE_GCR_EXT_VT_MODE_64; 3173 + break; 3174 + } 3175 + 3167 3176 IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); 3168 3177 3169 3178 /* enable Tx loopback for VF/PF communication */ ··· 3956 3947 3957 3948 if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 3958 3949 gpie &= ~IXGBE_GPIE_VTMODE_MASK; 3959 - gpie |= IXGBE_GPIE_VTMODE_64; 3950 + 3951 + switch (adapter->ring_feature[RING_F_VMDQ].mask) { 3952 + case IXGBE_82599_VMDQ_8Q_MASK: 3953 + gpie |= IXGBE_GPIE_VTMODE_16; 3954 + break; 3955 + case IXGBE_82599_VMDQ_4Q_MASK: 3956 + gpie |= IXGBE_GPIE_VTMODE_32; 3957 + break; 3958 + default: 3959 + gpie |= IXGBE_GPIE_VTMODE_64; 3960 + break; 3961 + } 3960 3962 } 3961 3963 3962 3964 /* Enable Thermal over heat sensor interrupt */ ··· 6694 6674 return -EINVAL; 6695 6675 } 6696 6676 6697 - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { 6698 - e_err(drv, "Enable failed, SR-IOV enabled\n"); 6699 - return -EINVAL; 6700 - } 6701 - 6702 6677 /* Hardware supports up to 8 traffic classes */ 6703 6678 if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || 6704 6679 (hw->mac.type == ixgbe_mac_82598EB && ··· 7239 7224 7240 7225 netdev->priv_flags |= IFF_UNICAST_FLT; 7241 7226 netdev->priv_flags |= IFF_SUPP_NOFCS; 7242 - 7243 - if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) 7244 - adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED | 7245 - IXGBE_FLAG_DCB_ENABLED); 7246 7227 7247 7228 #ifdef CONFIG_IXGBE_DCB 7248 7229 netdev->dcbnl_ops = &dcbnl_ops;
+41 -11
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
··· 107 107 "VF drivers to avoid spoofed packet errors\n"); 108 108 } else { 109 109 err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); 110 + if (err) { 111 + e_err(probe, "Failed to enable PCI sriov: %d\n", err); 112 + goto err_novfs; 113 + } 110 114 } 111 - if (err) { 112 - e_err(probe, "Failed to enable PCI sriov: %d\n", err); 113 - goto err_novfs; 114 - } 115 - adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; 116 115 116 + adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; 117 117 e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs); 118 + 119 + /* Enable VMDq flag so device will be set in VM mode */ 120 + adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED; 121 + if (!adapter->ring_feature[RING_F_VMDQ].limit) 122 + adapter->ring_feature[RING_F_VMDQ].limit = 1; 123 + adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs; 118 124 119 125 num_vf_macvlans = hw->mac.num_rar_entries - 120 126 (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs); ··· 152 146 * and memory allocated set up the mailbox parameters 153 147 */ 154 148 ixgbe_init_mbx_params_pf(hw); 155 - memcpy(&hw->mbx.ops, ii->mbx_ops, 156 - sizeof(hw->mbx.ops)); 149 + memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops)); 150 + 151 + /* limit trafffic classes based on VFs enabled */ 152 + if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && 153 + (adapter->num_vfs < 16)) { 154 + adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; 155 + adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; 156 + } else if (adapter->num_vfs < 32) { 157 + adapter->dcb_cfg.num_tcs.pg_tcs = 4; 158 + adapter->dcb_cfg.num_tcs.pfc_tcs = 4; 159 + } else { 160 + adapter->dcb_cfg.num_tcs.pg_tcs = 1; 161 + adapter->dcb_cfg.num_tcs.pfc_tcs = 1; 162 + } 163 + 164 + /* We do not support RSS w/ SR-IOV */ 165 + adapter->ring_feature[RING_F_RSS].limit = 1; 157 166 158 167 /* Disable RSC when in SR-IOV mode */ 159 168 adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | 160 169 IXGBE_FLAG2_RSC_ENABLED); 170 + 171 + #ifdef IXGBE_FCOE 172 + /* 173 + * When SR-IOV is enabled 82599 cannot support jumbo frames 174 + * so we must disable FCoE because we cannot support FCoE MTU. 175 + */ 176 + if (adapter->hw.mac.type == ixgbe_mac_82599EB) 177 + adapter->flags &= ~(IXGBE_FLAG_FCOE_ENABLED | 178 + IXGBE_FLAG_FCOE_CAPABLE); 179 + #endif 180 + 181 + /* enable spoof checking for all VFs */ 161 182 for (i = 0; i < adapter->num_vfs; i++) 162 183 adapter->vfinfo[i].spoofchk_enabled = true; 163 184 return; ··· 204 171 void ixgbe_disable_sriov(struct ixgbe_adapter *adapter) 205 172 { 206 173 struct ixgbe_hw *hw = &adapter->hw; 207 - u32 gcr; 208 174 u32 gpie; 209 175 u32 vmdctl; 210 176 int i; ··· 214 182 #endif 215 183 216 184 /* turn off device IOV mode */ 217 - gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); 218 - gcr &= ~(IXGBE_GCR_EXT_SRIOV); 219 - IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr); 185 + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, 0); 220 186 gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); 221 187 gpie &= ~IXGBE_GPIE_VTMODE_MASK; 222 188 IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);