Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

igb: Add support for enabling VFs to PF driver.

This patch adds the support to handle requests from the VF to perform
operations such as completing resets, setting/reading mac address, adding
vlans, adding multicast addresses, setting rlpml, and general
communications between the PF and all VFs.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Alexander Duyck and committed by
David S. Miller
4ae196df e1739522

+1077 -26
+1 -1
drivers/net/igb/Makefile
··· 33 33 obj-$(CONFIG_IGB) += igb.o 34 34 35 35 igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \ 36 - e1000_mac.o e1000_nvm.o e1000_phy.o 36 + e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o 37 37
+42
drivers/net/igb/e1000_82575.c
··· 213 213 return -E1000_ERR_PHY; 214 214 } 215 215 216 + /* if 82576 then initialize mailbox parameters */ 217 + if (mac->type == e1000_82576) 218 + igb_init_mbx_params_pf(hw); 219 + 216 220 return 0; 217 221 } 218 222 ··· 1415 1411 rd32(E1000_ROC); 1416 1412 rd32(E1000_RNBC); 1417 1413 rd32(E1000_MPC); 1414 + } 1415 + 1416 + /** 1417 + * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback 1418 + * @hw: pointer to the hardware struct 1419 + * @enable: state to enter, either enabled or disabled 1420 + * 1421 + * enables/disables L2 switch loopback functionality. 1422 + **/ 1423 + void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable) 1424 + { 1425 + u32 dtxswc = rd32(E1000_DTXSWC); 1426 + 1427 + if (enable) 1428 + dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN; 1429 + else 1430 + dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN; 1431 + 1432 + wr32(E1000_DTXSWC, dtxswc); 1433 + } 1434 + 1435 + /** 1436 + * igb_vmdq_set_replication_pf - enable or disable vmdq replication 1437 + * @hw: pointer to the hardware struct 1438 + * @enable: state to enter, either enabled or disabled 1439 + * 1440 + * enables/disables replication of packets across multiple pools. 1441 + **/ 1442 + void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable) 1443 + { 1444 + u32 vt_ctl = rd32(E1000_VT_CTL); 1445 + 1446 + if (enable) 1447 + vt_ctl |= E1000_VT_CTL_VM_REPL_EN; 1448 + else 1449 + vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN; 1450 + 1451 + wr32(E1000_VT_CTL, vt_ctl); 1418 1452 } 1419 1453 1420 1454 static struct e1000_mac_operations e1000_mac_ops_82575 = {
+17
drivers/net/igb/e1000_82575.h
··· 162 162 #define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ 163 163 #define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ 164 164 165 + #define MAX_NUM_VFS 8 166 + 167 + #define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ 168 + 165 169 /* Easy defines for setting default pool, would normally be left a zero */ 166 170 #define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7 167 171 #define E1000_VT_CTL_DEFAULT_POOL_MASK (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT) ··· 185 181 #define E1000_VMOLR_BAM 0x08000000 /* Accept Broadcast packets */ 186 182 #define E1000_VMOLR_MPME 0x10000000 /* Multicast promiscuous mode */ 187 183 #define E1000_VMOLR_STRVLAN 0x40000000 /* Vlan stripping enable */ 184 + #define E1000_VMOLR_STRCRC 0x80000000 /* CRC stripping enable */ 185 + 186 + #define E1000_VLVF_ARRAY_SIZE 32 187 + #define E1000_VLVF_VLANID_MASK 0x00000FFF 188 + #define E1000_VLVF_POOLSEL_SHIFT 12 189 + #define E1000_VLVF_POOLSEL_MASK (0xFF << E1000_VLVF_POOLSEL_SHIFT) 190 + #define E1000_VLVF_LVLAN 0x00100000 191 + #define E1000_VLVF_VLANID_ENABLE 0x80000000 192 + 193 + #define E1000_IOVCTL 0x05BBC 194 + #define E1000_IOVCTL_REUSE_VFQ 0x00000001 188 195 189 196 #define ALL_QUEUES 0xFFFF 190 197 198 + void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); 199 + void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); 191 200 192 201 #endif
+9
drivers/net/igb/e1000_defines.h
··· 45 45 46 46 /* Extended Device Control */ 47 47 #define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */ 48 + /* Physical Func Reset Done Indication */ 49 + #define E1000_CTRL_EXT_PFRSTD 0x00004000 48 50 #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 49 51 #define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 50 52 #define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 ··· 327 325 #define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */ 328 326 #define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */ 329 327 #define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */ 328 + #define E1000_ICR_VMMB 0x00000100 /* VM MB event */ 330 329 /* If this bit asserted, the driver should claim the interrupt */ 331 330 #define E1000_ICR_INT_ASSERTED 0x80000000 332 331 /* LAN connected device generates an interrupt */ ··· 365 362 /* Interrupt Mask Set */ 366 363 #define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */ 367 364 #define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */ 365 + #define E1000_IMS_VMMB E1000_ICR_VMMB /* Mail box activity */ 368 366 #define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */ 369 367 #define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */ 370 368 #define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */ ··· 417 413 #define E1000_BLK_PHY_RESET 12 418 414 #define E1000_ERR_SWFW_SYNC 13 419 415 #define E1000_NOT_IMPLEMENTED 14 416 + #define E1000_ERR_MBX 15 420 417 421 418 /* Loop limit on how long we wait for auto-negotiation to complete */ 422 419 #define COPPER_LINK_UP_LIMIT 10 ··· 663 658 #define E1000_GEN_CTL_READY 0x80000000 664 659 #define E1000_GEN_CTL_ADDRESS_SHIFT 8 665 660 #define E1000_GEN_POLL_TIMEOUT 640 661 + 662 + #define E1000_VFTA_ENTRY_SHIFT 5 663 + #define E1000_VFTA_ENTRY_MASK 0x7F 664 + #define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F 666 665 667 666 #endif
+30 -1
drivers/net/igb/e1000_hw.h
··· 32 32 #include <linux/delay.h> 33 33 #include <linux/io.h> 34 34 35 - #include "e1000_mac.h" 36 35 #include "e1000_regs.h" 37 36 #include "e1000_defines.h" 38 37 ··· 271 272 #include "e1000_mac.h" 272 273 #include "e1000_phy.h" 273 274 #include "e1000_nvm.h" 275 + #include "e1000_mbx.h" 274 276 275 277 struct e1000_mac_operations { 276 278 s32 (*check_for_link)(struct e1000_hw *); ··· 427 427 enum e1000_fc_type original_type; 428 428 }; 429 429 430 + struct e1000_mbx_operations { 431 + s32 (*init_params)(struct e1000_hw *hw); 432 + s32 (*read)(struct e1000_hw *, u32 *, u16, u16); 433 + s32 (*write)(struct e1000_hw *, u32 *, u16, u16); 434 + s32 (*read_posted)(struct e1000_hw *, u32 *, u16, u16); 435 + s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16); 436 + s32 (*check_for_msg)(struct e1000_hw *, u16); 437 + s32 (*check_for_ack)(struct e1000_hw *, u16); 438 + s32 (*check_for_rst)(struct e1000_hw *, u16); 439 + }; 440 + 441 + struct e1000_mbx_stats { 442 + u32 msgs_tx; 443 + u32 msgs_rx; 444 + 445 + u32 acks; 446 + u32 reqs; 447 + u32 rsts; 448 + }; 449 + 450 + struct e1000_mbx_info { 451 + struct e1000_mbx_operations ops; 452 + struct e1000_mbx_stats stats; 453 + u32 timeout; 454 + u32 usec_delay; 455 + u16 size; 456 + }; 457 + 430 458 struct e1000_dev_spec_82575 { 431 459 bool sgmii_active; 432 460 }; ··· 471 443 struct e1000_phy_info phy; 472 444 struct e1000_nvm_info nvm; 473 445 struct e1000_bus_info bus; 446 + struct e1000_mbx_info mbx; 474 447 struct e1000_host_mng_dhcp_cookie mng_cookie; 475 448 476 449 union {
+24
drivers/net/igb/e1000_mac.c
··· 118 118 } 119 119 120 120 /** 121 + * igb_vfta_set - enable or disable vlan in VLAN filter table 122 + * @hw: pointer to the HW structure 123 + * @vid: VLAN id to add or remove 124 + * @add: if true add filter, if false remove 125 + * 126 + * Sets or clears a bit in the VLAN filter table array based on VLAN id 127 + * and if we are adding or removing the filter 128 + **/ 129 + void igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add) 130 + { 131 + u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK; 132 + u32 mask = 1 < (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK); 133 + u32 vfta; 134 + 135 + vfta = array_rd32(E1000_VFTA, index); 136 + if (add) 137 + vfta |= mask; 138 + else 139 + vfta &= ~mask; 140 + 141 + igb_write_vfta(hw, index, vfta); 142 + } 143 + 144 + /** 121 145 * igb_check_alt_mac_addr - Check for alternate MAC addr 122 146 * @hw: pointer to the HW structure 123 147 *
+1
drivers/net/igb/e1000_mac.h
··· 58 58 59 59 void igb_clear_hw_cntrs_base(struct e1000_hw *hw); 60 60 void igb_clear_vfta(struct e1000_hw *hw); 61 + void igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add); 61 62 void igb_config_collision_dist(struct e1000_hw *hw); 62 63 void igb_mta_set(struct e1000_hw *hw, u32 hash_value); 63 64 void igb_put_hw_semaphore(struct e1000_hw *hw);
+447
drivers/net/igb/e1000_mbx.c
··· 1 + /******************************************************************************* 2 + 3 + Intel(R) Gigabit Ethernet Linux driver 4 + Copyright(c) 2007-2009 Intel Corporation. 5 + 6 + This program is free software; you can redistribute it and/or modify it 7 + under the terms and conditions of the GNU General Public License, 8 + version 2, as published by the Free Software Foundation. 9 + 10 + This program is distributed in the hope it will be useful, but WITHOUT 11 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + more details. 14 + 15 + You should have received a copy of the GNU General Public License along with 16 + this program; if not, write to the Free Software Foundation, Inc., 17 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 + 19 + The full GNU General Public License is included in this distribution in 20 + the file called "COPYING". 21 + 22 + Contact Information: 23 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 + 26 + *******************************************************************************/ 27 + 28 + #include "e1000_mbx.h" 29 + 30 + /** 31 + * igb_read_mbx - Reads a message from the mailbox 32 + * @hw: pointer to the HW structure 33 + * @msg: The message buffer 34 + * @size: Length of buffer 35 + * @mbx_id: id of mailbox to read 36 + * 37 + * returns SUCCESS if it successfuly read message from buffer 38 + **/ 39 + s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) 40 + { 41 + struct e1000_mbx_info *mbx = &hw->mbx; 42 + s32 ret_val = -E1000_ERR_MBX; 43 + 44 + /* limit read to size of mailbox */ 45 + if (size > mbx->size) 46 + size = mbx->size; 47 + 48 + if (mbx->ops.read) 49 + ret_val = mbx->ops.read(hw, msg, size, mbx_id); 50 + 51 + return ret_val; 52 + } 53 + 54 + /** 55 + * igb_write_mbx - Write a message to the mailbox 56 + * @hw: pointer to the HW structure 57 + * @msg: The message buffer 58 + * @size: Length of buffer 59 + * @mbx_id: id of mailbox to write 60 + * 61 + * returns SUCCESS if it successfully copied message into the buffer 62 + **/ 63 + s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) 64 + { 65 + struct e1000_mbx_info *mbx = &hw->mbx; 66 + s32 ret_val = 0; 67 + 68 + if (size > mbx->size) 69 + ret_val = -E1000_ERR_MBX; 70 + 71 + else if (mbx->ops.write) 72 + ret_val = mbx->ops.write(hw, msg, size, mbx_id); 73 + 74 + return ret_val; 75 + } 76 + 77 + /** 78 + * igb_check_for_msg - checks to see if someone sent us mail 79 + * @hw: pointer to the HW structure 80 + * @mbx_id: id of mailbox to check 81 + * 82 + * returns SUCCESS if the Status bit was found or else ERR_MBX 83 + **/ 84 + s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id) 85 + { 86 + struct e1000_mbx_info *mbx = &hw->mbx; 87 + s32 ret_val = -E1000_ERR_MBX; 88 + 89 + if (mbx->ops.check_for_msg) 90 + ret_val = mbx->ops.check_for_msg(hw, mbx_id); 91 + 92 + return ret_val; 93 + } 94 + 95 + /** 96 + * igb_check_for_ack - checks to see if someone sent us ACK 97 + * @hw: pointer to the HW structure 98 + * @mbx_id: id of mailbox to check 99 + * 100 + * returns SUCCESS if the Status bit was found or else ERR_MBX 101 + **/ 102 + s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id) 103 + { 104 + struct e1000_mbx_info *mbx = &hw->mbx; 105 + s32 ret_val = -E1000_ERR_MBX; 106 + 107 + if (mbx->ops.check_for_ack) 108 + ret_val = mbx->ops.check_for_ack(hw, mbx_id); 109 + 110 + return ret_val; 111 + } 112 + 113 + /** 114 + * igb_check_for_rst - checks to see if other side has reset 115 + * @hw: pointer to the HW structure 116 + * @mbx_id: id of mailbox to check 117 + * 118 + * returns SUCCESS if the Status bit was found or else ERR_MBX 119 + **/ 120 + s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id) 121 + { 122 + struct e1000_mbx_info *mbx = &hw->mbx; 123 + s32 ret_val = -E1000_ERR_MBX; 124 + 125 + if (mbx->ops.check_for_rst) 126 + ret_val = mbx->ops.check_for_rst(hw, mbx_id); 127 + 128 + return ret_val; 129 + } 130 + 131 + /** 132 + * igb_poll_for_msg - Wait for message notification 133 + * @hw: pointer to the HW structure 134 + * @mbx_id: id of mailbox to write 135 + * 136 + * returns SUCCESS if it successfully received a message notification 137 + **/ 138 + static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id) 139 + { 140 + struct e1000_mbx_info *mbx = &hw->mbx; 141 + int countdown = mbx->timeout; 142 + 143 + if (!mbx->ops.check_for_msg) 144 + goto out; 145 + 146 + while (mbx->ops.check_for_msg(hw, mbx_id)) { 147 + if (!countdown) 148 + break; 149 + countdown--; 150 + udelay(mbx->usec_delay); 151 + } 152 + out: 153 + return countdown ? 0 : -E1000_ERR_MBX; 154 + } 155 + 156 + /** 157 + * igb_poll_for_ack - Wait for message acknowledgement 158 + * @hw: pointer to the HW structure 159 + * @mbx_id: id of mailbox to write 160 + * 161 + * returns SUCCESS if it successfully received a message acknowledgement 162 + **/ 163 + static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id) 164 + { 165 + struct e1000_mbx_info *mbx = &hw->mbx; 166 + int countdown = mbx->timeout; 167 + 168 + if (!mbx->ops.check_for_ack) 169 + goto out; 170 + 171 + while (mbx->ops.check_for_ack(hw, mbx_id)) { 172 + if (!countdown) 173 + break; 174 + countdown--; 175 + udelay(mbx->usec_delay); 176 + } 177 + out: 178 + return countdown ? 0 : -E1000_ERR_MBX; 179 + } 180 + 181 + /** 182 + * igb_read_posted_mbx - Wait for message notification and receive message 183 + * @hw: pointer to the HW structure 184 + * @msg: The message buffer 185 + * @size: Length of buffer 186 + * @mbx_id: id of mailbox to write 187 + * 188 + * returns SUCCESS if it successfully received a message notification and 189 + * copied it into the receive buffer. 190 + **/ 191 + s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) 192 + { 193 + struct e1000_mbx_info *mbx = &hw->mbx; 194 + s32 ret_val = -E1000_ERR_MBX; 195 + 196 + if (!mbx->ops.read) 197 + goto out; 198 + 199 + ret_val = igb_poll_for_msg(hw, mbx_id); 200 + 201 + if (!ret_val) 202 + ret_val = mbx->ops.read(hw, msg, size, mbx_id); 203 + out: 204 + return ret_val; 205 + } 206 + 207 + /** 208 + * igb_write_posted_mbx - Write a message to the mailbox, wait for ack 209 + * @hw: pointer to the HW structure 210 + * @msg: The message buffer 211 + * @size: Length of buffer 212 + * @mbx_id: id of mailbox to write 213 + * 214 + * returns SUCCESS if it successfully copied message into the buffer and 215 + * received an ack to that message within delay * timeout period 216 + **/ 217 + s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) 218 + { 219 + struct e1000_mbx_info *mbx = &hw->mbx; 220 + s32 ret_val = 0; 221 + 222 + if (!mbx->ops.write) 223 + goto out; 224 + 225 + /* send msg*/ 226 + ret_val = mbx->ops.write(hw, msg, size, mbx_id); 227 + 228 + /* if msg sent wait until we receive an ack */ 229 + if (!ret_val) 230 + ret_val = igb_poll_for_ack(hw, mbx_id); 231 + out: 232 + return ret_val; 233 + } 234 + 235 + /** 236 + * e1000_init_mbx_ops_generic - Initialize NVM function pointers 237 + * @hw: pointer to the HW structure 238 + * 239 + * Setups up the function pointers to no-op functions 240 + **/ 241 + void e1000_init_mbx_ops_generic(struct e1000_hw *hw) 242 + { 243 + struct e1000_mbx_info *mbx = &hw->mbx; 244 + mbx->ops.read_posted = igb_read_posted_mbx; 245 + mbx->ops.write_posted = igb_write_posted_mbx; 246 + } 247 + 248 + static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask) 249 + { 250 + u32 mbvficr = rd32(E1000_MBVFICR); 251 + s32 ret_val = -E1000_ERR_MBX; 252 + 253 + if (mbvficr & mask) { 254 + ret_val = 0; 255 + wr32(E1000_MBVFICR, mask); 256 + } 257 + 258 + return ret_val; 259 + } 260 + 261 + /** 262 + * igb_check_for_msg_pf - checks to see if the VF has sent mail 263 + * @hw: pointer to the HW structure 264 + * @vf_number: the VF index 265 + * 266 + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX 267 + **/ 268 + static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number) 269 + { 270 + s32 ret_val = -E1000_ERR_MBX; 271 + 272 + if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) { 273 + ret_val = 0; 274 + hw->mbx.stats.reqs++; 275 + } 276 + 277 + return ret_val; 278 + } 279 + 280 + /** 281 + * igb_check_for_ack_pf - checks to see if the VF has ACKed 282 + * @hw: pointer to the HW structure 283 + * @vf_number: the VF index 284 + * 285 + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX 286 + **/ 287 + static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number) 288 + { 289 + s32 ret_val = -E1000_ERR_MBX; 290 + 291 + if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) { 292 + ret_val = 0; 293 + hw->mbx.stats.acks++; 294 + } 295 + 296 + return ret_val; 297 + } 298 + 299 + /** 300 + * igb_check_for_rst_pf - checks to see if the VF has reset 301 + * @hw: pointer to the HW structure 302 + * @vf_number: the VF index 303 + * 304 + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX 305 + **/ 306 + static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number) 307 + { 308 + u32 vflre = rd32(E1000_VFLRE); 309 + s32 ret_val = -E1000_ERR_MBX; 310 + 311 + if (vflre & (1 << vf_number)) { 312 + ret_val = 0; 313 + wr32(E1000_VFLRE, (1 << vf_number)); 314 + hw->mbx.stats.rsts++; 315 + } 316 + 317 + return ret_val; 318 + } 319 + 320 + /** 321 + * igb_write_mbx_pf - Places a message in the mailbox 322 + * @hw: pointer to the HW structure 323 + * @msg: The message buffer 324 + * @size: Length of buffer 325 + * @vf_number: the VF index 326 + * 327 + * returns SUCCESS if it successfully copied message into the buffer 328 + **/ 329 + static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, 330 + u16 vf_number) 331 + { 332 + u32 p2v_mailbox; 333 + s32 ret_val = 0; 334 + u16 i; 335 + 336 + /* Take ownership of the buffer */ 337 + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); 338 + 339 + /* Make sure we have ownership now... */ 340 + p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); 341 + if (!(p2v_mailbox & E1000_P2VMAILBOX_PFU)) { 342 + /* failed to grab ownership */ 343 + ret_val = -E1000_ERR_MBX; 344 + goto out_no_write; 345 + } 346 + 347 + /* 348 + * flush any ack or msg which may already be in the queue 349 + * as they are likely the result of an error 350 + */ 351 + igb_check_for_ack_pf(hw, vf_number); 352 + igb_check_for_msg_pf(hw, vf_number); 353 + 354 + /* copy the caller specified message to the mailbox memory buffer */ 355 + for (i = 0; i < size; i++) 356 + array_wr32(E1000_VMBMEM(vf_number), i, msg[i]); 357 + 358 + /* Interrupt VF to tell it a message has been sent and release buffer*/ 359 + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS); 360 + 361 + /* update stats */ 362 + hw->mbx.stats.msgs_tx++; 363 + 364 + out_no_write: 365 + return ret_val; 366 + 367 + } 368 + 369 + /** 370 + * igb_read_mbx_pf - Read a message from the mailbox 371 + * @hw: pointer to the HW structure 372 + * @msg: The message buffer 373 + * @size: Length of buffer 374 + * @vf_number: the VF index 375 + * 376 + * This function copies a message from the mailbox buffer to the caller's 377 + * memory buffer. The presumption is that the caller knows that there was 378 + * a message due to a VF request so no polling for message is needed. 379 + **/ 380 + static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, 381 + u16 vf_number) 382 + { 383 + u32 p2v_mailbox; 384 + s32 ret_val = 0; 385 + u16 i; 386 + 387 + /* Take ownership of the buffer */ 388 + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); 389 + 390 + /* Make sure we have ownership now... */ 391 + p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number)); 392 + if (!(p2v_mailbox & E1000_P2VMAILBOX_PFU)) { 393 + /* failed to grab ownership */ 394 + ret_val = -E1000_ERR_MBX; 395 + goto out_no_read; 396 + } 397 + 398 + /* copy the message to the mailbox memory buffer */ 399 + for (i = 0; i < size; i++) 400 + msg[i] = array_rd32(E1000_VMBMEM(vf_number), i); 401 + 402 + /* Acknowledge the message and release buffer */ 403 + wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK); 404 + 405 + /* update stats */ 406 + hw->mbx.stats.msgs_rx++; 407 + 408 + ret_val = 0; 409 + 410 + out_no_read: 411 + return ret_val; 412 + } 413 + 414 + /** 415 + * e1000_init_mbx_params_pf - set initial values for pf mailbox 416 + * @hw: pointer to the HW structure 417 + * 418 + * Initializes the hw->mbx struct to correct values for pf mailbox 419 + */ 420 + s32 igb_init_mbx_params_pf(struct e1000_hw *hw) 421 + { 422 + struct e1000_mbx_info *mbx = &hw->mbx; 423 + 424 + if (hw->mac.type == e1000_82576) { 425 + mbx->timeout = 0; 426 + mbx->usec_delay = 0; 427 + 428 + mbx->size = E1000_VFMAILBOX_SIZE; 429 + 430 + mbx->ops.read = igb_read_mbx_pf; 431 + mbx->ops.write = igb_write_mbx_pf; 432 + mbx->ops.read_posted = igb_read_posted_mbx; 433 + mbx->ops.write_posted = igb_write_posted_mbx; 434 + mbx->ops.check_for_msg = igb_check_for_msg_pf; 435 + mbx->ops.check_for_ack = igb_check_for_ack_pf; 436 + mbx->ops.check_for_rst = igb_check_for_rst_pf; 437 + 438 + mbx->stats.msgs_tx = 0; 439 + mbx->stats.msgs_rx = 0; 440 + mbx->stats.reqs = 0; 441 + mbx->stats.acks = 0; 442 + mbx->stats.rsts = 0; 443 + } 444 + 445 + return 0; 446 + } 447 +
+77
drivers/net/igb/e1000_mbx.h
··· 1 + /******************************************************************************* 2 + 3 + Intel(R) Gigabit Ethernet Linux driver 4 + Copyright(c) 2007-2009 Intel Corporation. 5 + 6 + This program is free software; you can redistribute it and/or modify it 7 + under the terms and conditions of the GNU General Public License, 8 + version 2, as published by the Free Software Foundation. 9 + 10 + This program is distributed in the hope it will be useful, but WITHOUT 11 + ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + more details. 14 + 15 + You should have received a copy of the GNU General Public License along with 16 + this program; if not, write to the Free Software Foundation, Inc., 17 + 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 + 19 + The full GNU General Public License is included in this distribution in 20 + the file called "COPYING". 21 + 22 + Contact Information: 23 + e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 24 + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 25 + 26 + *******************************************************************************/ 27 + 28 + #ifndef _E1000_MBX_H_ 29 + #define _E1000_MBX_H_ 30 + 31 + #include "e1000_hw.h" 32 + 33 + #define E1000_P2VMAILBOX_STS 0x00000001 /* Initiate message send to VF */ 34 + #define E1000_P2VMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ 35 + #define E1000_P2VMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ 36 + #define E1000_P2VMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ 37 + #define E1000_P2VMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ 38 + 39 + #define E1000_MBVFICR_VFREQ_MASK 0x000000FF /* bits for VF messages */ 40 + #define E1000_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ 41 + #define E1000_MBVFICR_VFACK_MASK 0x00FF0000 /* bits for VF acks */ 42 + #define E1000_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ 43 + 44 + #define E1000_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ 45 + 46 + /* If it's a E1000_VF_* msg then it originates in the VF and is sent to the 47 + * PF. The reverse is true if it is E1000_PF_*. 48 + * Message ACK's are the value or'd with 0xF0000000 49 + */ 50 + #define E1000_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with 51 + * this are the ACK */ 52 + #define E1000_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with 53 + * this are the NACK */ 54 + #define E1000_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still 55 + clear to send requests */ 56 + #define E1000_VT_MSGINFO_SHIFT 16 57 + /* bits 23:16 are used for exra info for certain messages */ 58 + #define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) 59 + 60 + #define E1000_VF_RESET 0x01 /* VF requests reset */ 61 + #define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ 62 + #define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ 63 + #define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ 64 + #define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ 65 + 66 + #define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ 67 + 68 + s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16); 69 + s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16); 70 + s32 igb_read_posted_mbx(struct e1000_hw *, u32 *, u16, u16); 71 + s32 igb_write_posted_mbx(struct e1000_hw *, u32 *, u16, u16); 72 + s32 igb_check_for_msg(struct e1000_hw *, u16); 73 + s32 igb_check_for_ack(struct e1000_hw *, u16); 74 + s32 igb_check_for_rst(struct e1000_hw *, u16); 75 + s32 igb_init_mbx_params_pf(struct e1000_hw *); 76 + 77 + #endif /* _E1000_MBX_H_ */
+12
drivers/net/igb/e1000_regs.h
··· 321 321 #define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */ 322 322 323 323 /* VT Registers */ 324 + #define E1000_MBVFICR 0x00C80 /* Mailbox VF Cause - RWC */ 325 + #define E1000_MBVFIMR 0x00C84 /* Mailbox VF int Mask - RW */ 326 + #define E1000_VFLRE 0x00C88 /* VF Register Events - RWC */ 327 + #define E1000_VFRE 0x00C8C /* VF Receive Enables */ 328 + #define E1000_VFTE 0x00C90 /* VF Transmit Enables */ 324 329 #define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ 330 + #define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ 331 + #define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ 332 + #define E1000_IOVTCL 0x05BBC /* IOV Control Register */ 325 333 /* These act per VF so an array friendly macro is used */ 334 + #define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) 335 + #define E1000_VMBMEM(_n) (0x00800 + (64 * (_n))) 326 336 #define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n))) 337 + #define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine 338 + * Filter - RW */ 327 339 328 340 #define wr32(reg, value) (writel(value, hw->hw_addr + reg)) 329 341 #define rd32(reg) (readl(hw->hw_addr + reg))
+12
drivers/net/igb/igb.h
··· 62 62 #define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES 63 63 #define IGB_ABS_MAX_TX_QUEUES 4 64 64 65 + #define IGB_MAX_VF_MC_ENTRIES 30 66 + #define IGB_MAX_VF_FUNCTIONS 8 67 + #define IGB_MAX_VFTA_ENTRIES 128 68 + 69 + struct vf_data_storage { 70 + unsigned char vf_mac_addresses[ETH_ALEN]; 71 + u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; 72 + u16 num_vf_mc_hashes; 73 + bool clear_to_send; 74 + }; 75 + 65 76 /* RX descriptor control thresholds. 66 77 * PTHRESH - MAC will consider prefetch if it has fewer than this number of 67 78 * descriptors available in its onboard memory. ··· 283 272 unsigned int tx_ring_count; 284 273 unsigned int rx_ring_count; 285 274 unsigned int vfs_allocated_count; 275 + struct vf_data_storage *vf_data; 286 276 }; 287 277 288 278 #define IGB_FLAG_HAS_MSI (1 << 0)
+405 -24
drivers/net/igb/igb_main.c
··· 122 122 static void igb_vlan_rx_add_vid(struct net_device *, u16); 123 123 static void igb_vlan_rx_kill_vid(struct net_device *, u16); 124 124 static void igb_restore_vlan(struct igb_adapter *); 125 + static void igb_ping_all_vfs(struct igb_adapter *); 126 + static void igb_msg_task(struct igb_adapter *); 127 + static int igb_rcv_msg_from_vf(struct igb_adapter *, u32); 125 128 static inline void igb_set_rah_pool(struct e1000_hw *, int , int); 126 129 static void igb_set_mc_list_pools(struct igb_adapter *, int, u16); 130 + static void igb_vmm_control(struct igb_adapter *); 127 131 static inline void igb_set_vmolr(struct e1000_hw *, int); 128 - static inline void igb_set_vf_rlpml(struct igb_adapter *, int, int); 132 + static inline int igb_set_vf_rlpml(struct igb_adapter *, int, int); 133 + static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *); 134 + static void igb_restore_vf_multicasts(struct igb_adapter *adapter); 129 135 130 136 static int igb_suspend(struct pci_dev *, pm_message_t); 131 137 #ifdef CONFIG_PM ··· 774 768 wr32(E1000_EIAC, adapter->eims_enable_mask); 775 769 wr32(E1000_EIAM, adapter->eims_enable_mask); 776 770 wr32(E1000_EIMS, adapter->eims_enable_mask); 777 - wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC); 771 + if (adapter->vfs_allocated_count) 772 + wr32(E1000_MBVFIMR, 0xFF); 773 + wr32(E1000_IMS, (E1000_IMS_LSC | E1000_IMS_VMMB | 774 + E1000_IMS_DOUTSYNC)); 778 775 } else { 779 776 wr32(E1000_IMS, IMS_ENABLE_MASK); 780 777 wr32(E1000_IAM, IMS_ENABLE_MASK); ··· 901 892 if (adapter->msix_entries) 902 893 igb_configure_msix(adapter); 903 894 895 + igb_vmm_control(adapter); 904 896 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0); 905 897 igb_set_vmolr(hw, adapter->vfs_allocated_count); 906 898 ··· 1056 1046 fc->pause_time = 0xFFFF; 1057 1047 fc->send_xon = 1; 1058 1048 fc->type = fc->original_type; 1049 + 1050 + /* disable receive for all VFs and wait one second */ 1051 + if (adapter->vfs_allocated_count) { 1052 + int i; 1053 + for (i = 0 ; i < adapter->vfs_allocated_count; i++) 1054 + adapter->vf_data[i].clear_to_send = false; 1055 + 1056 + /* ping all the active vfs to let them know we are going down */ 1057 + igb_ping_all_vfs(adapter); 1058 + 1059 + /* disable transmits and receives */ 1060 + wr32(E1000_VFRE, 0); 1061 + wr32(E1000_VFTE, 0); 1062 + } 1059 1063 1060 1064 /* Allow time for pending master requests to run */ 1061 1065 adapter->hw.mac.ops.reset_hw(&adapter->hw); ··· 1648 1624 * clean_rx handler before we do so. */ 1649 1625 igb_configure(adapter); 1650 1626 1627 + igb_vmm_control(adapter); 1651 1628 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0); 1652 1629 igb_set_vmolr(hw, adapter->vfs_allocated_count); 1653 1630 ··· 2481 2456 mac->rar_entry_count); 2482 2457 2483 2458 igb_set_mc_list_pools(adapter, i, mac->rar_entry_count); 2459 + igb_restore_vf_multicasts(adapter); 2460 + 2484 2461 kfree(mta_list); 2485 2462 } 2486 2463 ··· 2598 2571 netif_carrier_on(netdev); 2599 2572 netif_tx_wake_all_queues(netdev); 2600 2573 2574 + igb_ping_all_vfs(adapter); 2575 + 2601 2576 /* link state has changed, schedule phy info update */ 2602 2577 if (!test_bit(__IGB_DOWN, &adapter->state)) 2603 2578 mod_timer(&adapter->phy_info_timer, ··· 2614 2585 netdev->name); 2615 2586 netif_carrier_off(netdev); 2616 2587 netif_tx_stop_all_queues(netdev); 2588 + 2589 + igb_ping_all_vfs(adapter); 2617 2590 2618 2591 /* link state has changed, schedule phy info update */ 2619 2592 if (!test_bit(__IGB_DOWN, &adapter->state)) ··· 3554 3523 /* HW is reporting DMA is out of sync */ 3555 3524 adapter->stats.doosync++; 3556 3525 } 3557 - if (!(icr & E1000_ICR_LSC)) 3558 - goto no_link_interrupt; 3559 - hw->mac.get_link_status = 1; 3560 - /* guard against interrupt when we're going down */ 3561 - if (!test_bit(__IGB_DOWN, &adapter->state)) 3562 - mod_timer(&adapter->watchdog_timer, jiffies + 1); 3563 3526 3564 - no_link_interrupt: 3565 - wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC); 3527 + /* Check for a mailbox event */ 3528 + if (icr & E1000_ICR_VMMB) 3529 + igb_msg_task(adapter); 3530 + 3531 + if (icr & E1000_ICR_LSC) { 3532 + hw->mac.get_link_status = 1; 3533 + /* guard against interrupt when we're going down */ 3534 + if (!test_bit(__IGB_DOWN, &adapter->state)) 3535 + mod_timer(&adapter->watchdog_timer, jiffies + 1); 3536 + } 3537 + 3538 + wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_VMMB); 3566 3539 wr32(E1000_EIMS, adapter->eims_other); 3567 3540 3568 3541 return IRQ_HANDLED; ··· 3753 3718 return ret_val ? NOTIFY_BAD : NOTIFY_DONE; 3754 3719 } 3755 3720 #endif /* CONFIG_IGB_DCA */ 3721 + 3722 + static void igb_ping_all_vfs(struct igb_adapter *adapter) 3723 + { 3724 + struct e1000_hw *hw = &adapter->hw; 3725 + u32 ping; 3726 + int i; 3727 + 3728 + for (i = 0 ; i < adapter->vfs_allocated_count; i++) { 3729 + ping = E1000_PF_CONTROL_MSG; 3730 + if (adapter->vf_data[i].clear_to_send) 3731 + ping |= E1000_VT_MSGTYPE_CTS; 3732 + igb_write_mbx(hw, &ping, 1, i); 3733 + } 3734 + } 3735 + 3736 + static int igb_set_vf_multicasts(struct igb_adapter *adapter, 3737 + u32 *msgbuf, u32 vf) 3738 + { 3739 + int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; 3740 + u16 *hash_list = (u16 *)&msgbuf[1]; 3741 + struct vf_data_storage *vf_data = &adapter->vf_data[vf]; 3742 + int i; 3743 + 3744 + /* only up to 30 hash values supported */ 3745 + if (n > 30) 3746 + n = 30; 3747 + 3748 + /* salt away the number of multi cast addresses assigned 3749 + * to this VF for later use to restore when the PF multi cast 3750 + * list changes 3751 + */ 3752 + vf_data->num_vf_mc_hashes = n; 3753 + 3754 + /* VFs are limited to using the MTA hash table for their multicast 3755 + * addresses */ 3756 + for (i = 0; i < n; i++) 3757 + vf_data->vf_mc_hashes[i] = hash_list[i];; 3758 + 3759 + /* Flush and reset the mta with the new values */ 3760 + igb_set_multi(adapter->netdev); 3761 + 3762 + return 0; 3763 + } 3764 + 3765 + static void igb_restore_vf_multicasts(struct igb_adapter *adapter) 3766 + { 3767 + struct e1000_hw *hw = &adapter->hw; 3768 + struct vf_data_storage *vf_data; 3769 + int i, j; 3770 + 3771 + for (i = 0; i < adapter->vfs_allocated_count; i++) { 3772 + vf_data = &adapter->vf_data[i]; 3773 + for (j = 0; j < vf_data[i].num_vf_mc_hashes; j++) 3774 + igb_mta_set(hw, vf_data->vf_mc_hashes[j]); 3775 + } 3776 + } 3777 + 3778 + static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf) 3779 + { 3780 + struct e1000_hw *hw = &adapter->hw; 3781 + u32 pool_mask, reg, vid; 3782 + int i; 3783 + 3784 + pool_mask = 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); 3785 + 3786 + /* Find the vlan filter for this id */ 3787 + for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { 3788 + reg = rd32(E1000_VLVF(i)); 3789 + 3790 + /* remove the vf from the pool */ 3791 + reg &= ~pool_mask; 3792 + 3793 + /* if pool is empty then remove entry from vfta */ 3794 + if (!(reg & E1000_VLVF_POOLSEL_MASK) && 3795 + (reg & E1000_VLVF_VLANID_ENABLE)) { 3796 + reg = 0; 3797 + vid = reg & E1000_VLVF_VLANID_MASK; 3798 + igb_vfta_set(hw, vid, false); 3799 + } 3800 + 3801 + wr32(E1000_VLVF(i), reg); 3802 + } 3803 + } 3804 + 3805 + static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf) 3806 + { 3807 + struct e1000_hw *hw = &adapter->hw; 3808 + u32 reg, i; 3809 + 3810 + /* It is an error to call this function when VFs are not enabled */ 3811 + if (!adapter->vfs_allocated_count) 3812 + return -1; 3813 + 3814 + /* Find the vlan filter for this id */ 3815 + for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { 3816 + reg = rd32(E1000_VLVF(i)); 3817 + if ((reg & E1000_VLVF_VLANID_ENABLE) && 3818 + vid == (reg & E1000_VLVF_VLANID_MASK)) 3819 + break; 3820 + } 3821 + 3822 + if (add) { 3823 + if (i == E1000_VLVF_ARRAY_SIZE) { 3824 + /* Did not find a matching VLAN ID entry that was 3825 + * enabled. Search for a free filter entry, i.e. 3826 + * one without the enable bit set 3827 + */ 3828 + for (i = 0; i < E1000_VLVF_ARRAY_SIZE; i++) { 3829 + reg = rd32(E1000_VLVF(i)); 3830 + if (!(reg & E1000_VLVF_VLANID_ENABLE)) 3831 + break; 3832 + } 3833 + } 3834 + if (i < E1000_VLVF_ARRAY_SIZE) { 3835 + /* Found an enabled/available entry */ 3836 + reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT + vf); 3837 + 3838 + /* if !enabled we need to set this up in vfta */ 3839 + if (!(reg & E1000_VLVF_VLANID_ENABLE)) { 3840 + /* add VID to filter table */ 3841 + igb_vfta_set(hw, vid, true); 3842 + reg |= E1000_VLVF_VLANID_ENABLE; 3843 + } 3844 + 3845 + wr32(E1000_VLVF(i), reg); 3846 + return 0; 3847 + } 3848 + } else { 3849 + if (i < E1000_VLVF_ARRAY_SIZE) { 3850 + /* remove vf from the pool */ 3851 + reg &= ~(1 << (E1000_VLVF_POOLSEL_SHIFT + vf)); 3852 + /* if pool is empty then remove entry from vfta */ 3853 + if (!(reg & E1000_VLVF_POOLSEL_MASK)) { 3854 + reg = 0; 3855 + igb_vfta_set(hw, vid, false); 3856 + } 3857 + wr32(E1000_VLVF(i), reg); 3858 + return 0; 3859 + } 3860 + } 3861 + return -1; 3862 + } 3863 + 3864 + static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf) 3865 + { 3866 + int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT; 3867 + int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK); 3868 + 3869 + return igb_vlvf_set(adapter, vid, add, vf); 3870 + } 3871 + 3872 + static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) 3873 + { 3874 + struct e1000_hw *hw = &adapter->hw; 3875 + 3876 + /* disable mailbox functionality for vf */ 3877 + adapter->vf_data[vf].clear_to_send = false; 3878 + 3879 + /* reset offloads to defaults */ 3880 + igb_set_vmolr(hw, vf); 3881 + 3882 + /* reset vlans for device */ 3883 + igb_clear_vf_vfta(adapter, vf); 3884 + 3885 + /* reset multicast table array for vf */ 3886 + adapter->vf_data[vf].num_vf_mc_hashes = 0; 3887 + 3888 + /* Flush and reset the mta with the new values */ 3889 + igb_set_multi(adapter->netdev); 3890 + } 3891 + 3892 + static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) 3893 + { 3894 + struct e1000_hw *hw = &adapter->hw; 3895 + unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; 3896 + u32 reg, msgbuf[3]; 3897 + u8 *addr = (u8 *)(&msgbuf[1]); 3898 + 3899 + /* process all the same items cleared in a function level reset */ 3900 + igb_vf_reset_event(adapter, vf); 3901 + 3902 + /* set vf mac address */ 3903 + igb_rar_set(hw, vf_mac, vf + 1); 3904 + igb_set_rah_pool(hw, vf, vf + 1); 3905 + 3906 + /* enable transmit and receive for vf */ 3907 + reg = rd32(E1000_VFTE); 3908 + wr32(E1000_VFTE, reg | (1 << vf)); 3909 + reg = rd32(E1000_VFRE); 3910 + wr32(E1000_VFRE, reg | (1 << vf)); 3911 + 3912 + /* enable mailbox functionality for vf */ 3913 + adapter->vf_data[vf].clear_to_send = true; 3914 + 3915 + /* reply to reset with ack and vf mac address */ 3916 + msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; 3917 + memcpy(addr, vf_mac, 6); 3918 + igb_write_mbx(hw, msgbuf, 3, vf); 3919 + } 3920 + 3921 + static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) 3922 + { 3923 + unsigned char *addr = (char *)&msg[1]; 3924 + int err = -1; 3925 + 3926 + if (is_valid_ether_addr(addr)) 3927 + err = igb_set_vf_mac(adapter, vf, addr); 3928 + 3929 + return err; 3930 + 3931 + } 3932 + 3933 + static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf) 3934 + { 3935 + struct e1000_hw *hw = &adapter->hw; 3936 + u32 msg = E1000_VT_MSGTYPE_NACK; 3937 + 3938 + /* if device isn't clear to send it shouldn't be reading either */ 3939 + if (!adapter->vf_data[vf].clear_to_send) 3940 + igb_write_mbx(hw, &msg, 1, vf); 3941 + } 3942 + 3943 + 3944 + static void igb_msg_task(struct igb_adapter *adapter) 3945 + { 3946 + struct e1000_hw *hw = &adapter->hw; 3947 + u32 vf; 3948 + 3949 + for (vf = 0; vf < adapter->vfs_allocated_count; vf++) { 3950 + /* process any reset requests */ 3951 + if (!igb_check_for_rst(hw, vf)) { 3952 + adapter->vf_data[vf].clear_to_send = false; 3953 + igb_vf_reset_event(adapter, vf); 3954 + } 3955 + 3956 + /* process any messages pending */ 3957 + if (!igb_check_for_msg(hw, vf)) 3958 + igb_rcv_msg_from_vf(adapter, vf); 3959 + 3960 + /* process any acks */ 3961 + if (!igb_check_for_ack(hw, vf)) 3962 + igb_rcv_ack_from_vf(adapter, vf); 3963 + 3964 + } 3965 + } 3966 + 3967 + static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) 3968 + { 3969 + u32 mbx_size = E1000_VFMAILBOX_SIZE; 3970 + u32 msgbuf[mbx_size]; 3971 + struct e1000_hw *hw = &adapter->hw; 3972 + s32 retval; 3973 + 3974 + retval = igb_read_mbx(hw, msgbuf, mbx_size, vf); 3975 + 3976 + if (retval) 3977 + dev_err(&adapter->pdev->dev, 3978 + "Error receiving message from VF\n"); 3979 + 3980 + /* this is a message we already processed, do nothing */ 3981 + if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) 3982 + return retval; 3983 + 3984 + /* 3985 + * until the vf completes a reset it should not be 3986 + * allowed to start any configuration. 3987 + */ 3988 + 3989 + if (msgbuf[0] == E1000_VF_RESET) { 3990 + igb_vf_reset_msg(adapter, vf); 3991 + 3992 + return retval; 3993 + } 3994 + 3995 + if (!adapter->vf_data[vf].clear_to_send) { 3996 + msgbuf[0] |= E1000_VT_MSGTYPE_NACK; 3997 + igb_write_mbx(hw, msgbuf, 1, vf); 3998 + return retval; 3999 + } 4000 + 4001 + switch ((msgbuf[0] & 0xFFFF)) { 4002 + case E1000_VF_SET_MAC_ADDR: 4003 + retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); 4004 + break; 4005 + case E1000_VF_SET_MULTICAST: 4006 + retval = igb_set_vf_multicasts(adapter, msgbuf, vf); 4007 + break; 4008 + case E1000_VF_SET_LPE: 4009 + retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf); 4010 + break; 4011 + case E1000_VF_SET_VLAN: 4012 + retval = igb_set_vf_vlan(adapter, msgbuf, vf); 4013 + break; 4014 + default: 4015 + dev_err(&adapter->pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); 4016 + retval = -1; 4017 + break; 4018 + } 4019 + 4020 + /* notify the VF of the results of what it sent us */ 4021 + if (retval) 4022 + msgbuf[0] |= E1000_VT_MSGTYPE_NACK; 4023 + else 4024 + msgbuf[0] |= E1000_VT_MSGTYPE_ACK; 4025 + 4026 + msgbuf[0] |= E1000_VT_MSGTYPE_CTS; 4027 + 4028 + igb_write_mbx(hw, msgbuf, 1, vf); 4029 + 4030 + return retval; 4031 + } 3756 4032 3757 4033 /** 3758 4034 * igb_intr_msi - Interrupt Handler ··· 4928 4582 { 4929 4583 struct igb_adapter *adapter = netdev_priv(netdev); 4930 4584 struct e1000_hw *hw = &adapter->hw; 4931 - u32 vfta, index; 4585 + int pf_id = adapter->vfs_allocated_count; 4932 4586 4933 4587 if ((hw->mng_cookie.status & 4934 4588 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 4935 4589 (vid == adapter->mng_vlan_id)) 4936 4590 return; 4937 - /* add VID to filter table */ 4938 - index = (vid >> 5) & 0x7F; 4939 - vfta = array_rd32(E1000_VFTA, index); 4940 - vfta |= (1 << (vid & 0x1F)); 4941 - igb_write_vfta(&adapter->hw, index, vfta); 4591 + 4592 + /* add vid to vlvf if sr-iov is enabled, 4593 + * if that fails add directly to filter table */ 4594 + if (igb_vlvf_set(adapter, vid, true, pf_id)) 4595 + igb_vfta_set(hw, vid, true); 4596 + 4942 4597 } 4943 4598 4944 4599 static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 4945 4600 { 4946 4601 struct igb_adapter *adapter = netdev_priv(netdev); 4947 4602 struct e1000_hw *hw = &adapter->hw; 4948 - u32 vfta, index; 4603 + int pf_id = adapter->vfs_allocated_count; 4949 4604 4950 4605 igb_irq_disable(adapter); 4951 4606 vlan_group_set_device(adapter->vlgrp, vid, NULL); ··· 4962 4615 return; 4963 4616 } 4964 4617 4965 - /* remove VID from filter table */ 4966 - index = (vid >> 5) & 0x7F; 4967 - vfta = array_rd32(E1000_VFTA, index); 4968 - vfta &= ~(1 << (vid & 0x1F)); 4969 - igb_write_vfta(&adapter->hw, index, vfta); 4618 + /* remove vid from vlvf if sr-iov is enabled, 4619 + * if not in vlvf remove from vfta */ 4620 + if (igb_vlvf_set(adapter, vid, false, pf_id)) 4621 + igb_vfta_set(hw, vid, false); 4970 4622 } 4971 4623 4972 4624 static void igb_restore_vlan(struct igb_adapter *adapter) ··· 5296 4950 wr32(E1000_VMOLR(vfn), reg_data); 5297 4951 } 5298 4952 5299 - static inline void igb_set_vf_rlpml(struct igb_adapter *adapter, int size, 5300 - int vfn) 4953 + static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size, 4954 + int vfn) 5301 4955 { 5302 4956 struct e1000_hw *hw = &adapter->hw; 5303 4957 u32 vmolr; ··· 5306 4960 vmolr &= ~E1000_VMOLR_RLPML_MASK; 5307 4961 vmolr |= size | E1000_VMOLR_LPE; 5308 4962 wr32(E1000_VMOLR(vfn), vmolr); 4963 + 4964 + return 0; 5309 4965 } 5310 4966 5311 4967 static inline void igb_set_rah_pool(struct e1000_hw *hw, int pool, int entry) ··· 5331 4983 5332 4984 for (; i < total_rar_filters; i++) 5333 4985 igb_set_rah_pool(hw, adapter->vfs_allocated_count, i); 4986 + } 4987 + 4988 + static int igb_set_vf_mac(struct igb_adapter *adapter, 4989 + int vf, unsigned char *mac_addr) 4990 + { 4991 + struct e1000_hw *hw = &adapter->hw; 4992 + int rar_entry = vf + 1; /* VF MAC addresses start at entry 1 */ 4993 + 4994 + igb_rar_set(hw, mac_addr, rar_entry); 4995 + 4996 + memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, 6); 4997 + 4998 + igb_set_rah_pool(hw, vf, rar_entry); 4999 + 5000 + return 0; 5001 + } 5002 + 5003 + static void igb_vmm_control(struct igb_adapter *adapter) 5004 + { 5005 + struct e1000_hw *hw = &adapter->hw; 5006 + u32 reg_data; 5007 + 5008 + if (!adapter->vfs_allocated_count) 5009 + return; 5010 + 5011 + /* VF's need PF reset indication before they 5012 + * can send/receive mail */ 5013 + reg_data = rd32(E1000_CTRL_EXT); 5014 + reg_data |= E1000_CTRL_EXT_PFRSTD; 5015 + wr32(E1000_CTRL_EXT, reg_data); 5016 + 5017 + igb_vmdq_set_loopback_pf(hw, true); 5018 + igb_vmdq_set_replication_pf(hw, true); 5334 5019 } 5335 5020 5336 5021 /* igb_main.c */