Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

igb: transition driver to only using advanced descriptors

Currently the driver uses advanced descriptors for its main functionality,
but then uses legacy when testing. This patch changes this so that
advanced descriptors are used throughout and all mentions of legacy
descriptors are removed.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Acked-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Alexander Duyck and committed by
David S. Miller
85e8d004 cbd347ad

+32 -166
-138
drivers/net/igb/e1000_hw.h
··· 144 144 e1000_fc_default = 0xFF 145 145 }; 146 146 147 - 148 - /* Receive Descriptor */ 149 - struct e1000_rx_desc { 150 - __le64 buffer_addr; /* Address of the descriptor's data buffer */ 151 - __le16 length; /* Length of data DMAed into data buffer */ 152 - __le16 csum; /* Packet checksum */ 153 - u8 status; /* Descriptor status */ 154 - u8 errors; /* Descriptor Errors */ 155 - __le16 special; 156 - }; 157 - 158 - /* Receive Descriptor - Extended */ 159 - union e1000_rx_desc_extended { 160 - struct { 161 - __le64 buffer_addr; 162 - __le64 reserved; 163 - } read; 164 - struct { 165 - struct { 166 - __le32 mrq; /* Multiple Rx Queues */ 167 - union { 168 - __le32 rss; /* RSS Hash */ 169 - struct { 170 - __le16 ip_id; /* IP id */ 171 - __le16 csum; /* Packet Checksum */ 172 - } csum_ip; 173 - } hi_dword; 174 - } lower; 175 - struct { 176 - __le32 status_error; /* ext status/error */ 177 - __le16 length; 178 - __le16 vlan; /* VLAN tag */ 179 - } upper; 180 - } wb; /* writeback */ 181 - }; 182 - 183 - #define MAX_PS_BUFFERS 4 184 - /* Receive Descriptor - Packet Split */ 185 - union e1000_rx_desc_packet_split { 186 - struct { 187 - /* one buffer for protocol header(s), three data buffers */ 188 - __le64 buffer_addr[MAX_PS_BUFFERS]; 189 - } read; 190 - struct { 191 - struct { 192 - __le32 mrq; /* Multiple Rx Queues */ 193 - union { 194 - __le32 rss; /* RSS Hash */ 195 - struct { 196 - __le16 ip_id; /* IP id */ 197 - __le16 csum; /* Packet Checksum */ 198 - } csum_ip; 199 - } hi_dword; 200 - } lower; 201 - struct { 202 - __le32 status_error; /* ext status/error */ 203 - __le16 length0; /* length of buffer 0 */ 204 - __le16 vlan; /* VLAN tag */ 205 - } middle; 206 - struct { 207 - __le16 header_status; 208 - __le16 length[3]; /* length of buffers 1-3 */ 209 - } upper; 210 - __le64 reserved; 211 - } wb; /* writeback */ 212 - }; 213 - 214 - /* Transmit Descriptor */ 215 - struct e1000_tx_desc { 216 - __le64 buffer_addr; /* Address of the descriptor's data buffer */ 217 - union { 218 - __le32 data; 219 - struct { 220 - __le16 length; /* Data buffer length */ 221 - u8 cso; /* Checksum offset */ 222 - u8 cmd; /* Descriptor control */ 223 - } flags; 224 - } lower; 225 - union { 226 - __le32 data; 227 - struct { 228 - u8 status; /* Descriptor status */ 229 - u8 css; /* Checksum start */ 230 - __le16 special; 231 - } fields; 232 - } upper; 233 - }; 234 - 235 - /* Offload Context Descriptor */ 236 - struct e1000_context_desc { 237 - union { 238 - __le32 ip_config; 239 - struct { 240 - u8 ipcss; /* IP checksum start */ 241 - u8 ipcso; /* IP checksum offset */ 242 - __le16 ipcse; /* IP checksum end */ 243 - } ip_fields; 244 - } lower_setup; 245 - union { 246 - __le32 tcp_config; 247 - struct { 248 - u8 tucss; /* TCP checksum start */ 249 - u8 tucso; /* TCP checksum offset */ 250 - __le16 tucse; /* TCP checksum end */ 251 - } tcp_fields; 252 - } upper_setup; 253 - __le32 cmd_and_length; 254 - union { 255 - __le32 data; 256 - struct { 257 - u8 status; /* Descriptor status */ 258 - u8 hdr_len; /* Header length */ 259 - __le16 mss; /* Maximum segment size */ 260 - } fields; 261 - } tcp_seg_setup; 262 - }; 263 - 264 - /* Offload data descriptor */ 265 - struct e1000_data_desc { 266 - __le64 buffer_addr; /* Address of the descriptor's buffer address */ 267 - union { 268 - __le32 data; 269 - struct { 270 - __le16 length; /* Data buffer length */ 271 - u8 typ_len_ext; 272 - u8 cmd; 273 - } flags; 274 - } lower; 275 - union { 276 - __le32 data; 277 - struct { 278 - u8 status; /* Descriptor status */ 279 - u8 popts; /* Packet Options */ 280 - __le16 special; 281 - } fields; 282 - } upper; 283 - }; 284 - 285 147 /* Statistics counters collected by the MAC */ 286 148 struct e1000_hw_stats { 287 149 u64 crcerrs;
-3
drivers/net/igb/igb.h
··· 180 180 (&(((union e1000_adv_tx_desc *)((R).desc))[i])) 181 181 #define E1000_TX_CTXTDESC_ADV(R, i) \ 182 182 (&(((struct e1000_adv_tx_context_desc *)((R).desc))[i])) 183 - #define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i])) 184 - #define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc) 185 - #define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc) 186 183 187 184 /* board specific private data structure */ 188 185
+29 -22
drivers/net/igb/igb_ethtool.c
··· 1272 1272 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1273 1273 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1274 1274 struct pci_dev *pdev = adapter->pdev; 1275 + struct igb_buffer *buffer_info; 1275 1276 u32 rctl; 1276 1277 int i, ret_val; 1277 1278 ··· 1289 1288 goto err_nomem; 1290 1289 } 1291 1290 1292 - tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); 1291 + tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 1293 1292 tx_ring->size = ALIGN(tx_ring->size, 4096); 1294 1293 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 1295 1294 &tx_ring->dma); ··· 1303 1302 ((u64) tx_ring->dma & 0x00000000FFFFFFFF)); 1304 1303 wr32(E1000_TDBAH(0), ((u64) tx_ring->dma >> 32)); 1305 1304 wr32(E1000_TDLEN(0), 1306 - tx_ring->count * sizeof(struct e1000_tx_desc)); 1305 + tx_ring->count * sizeof(union e1000_adv_tx_desc)); 1307 1306 wr32(E1000_TDH(0), 0); 1308 1307 wr32(E1000_TDT(0), 0); 1309 1308 wr32(E1000_TCTL, ··· 1312 1311 E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT); 1313 1312 1314 1313 for (i = 0; i < tx_ring->count; i++) { 1315 - struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i); 1314 + union e1000_adv_tx_desc *tx_desc; 1316 1315 struct sk_buff *skb; 1317 1316 unsigned int size = 1024; 1318 1317 1318 + tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); 1319 1319 skb = alloc_skb(size, GFP_KERNEL); 1320 1320 if (!skb) { 1321 1321 ret_val = 3; 1322 1322 goto err_nomem; 1323 1323 } 1324 1324 skb_put(skb, size); 1325 - tx_ring->buffer_info[i].skb = skb; 1326 - tx_ring->buffer_info[i].length = skb->len; 1327 - tx_ring->buffer_info[i].dma = 1328 - pci_map_single(pdev, skb->data, skb->len, 1329 - PCI_DMA_TODEVICE); 1330 - tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma); 1331 - tx_desc->lower.data = cpu_to_le32(skb->len); 1332 - tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP | 1333 - E1000_TXD_CMD_IFCS | 1334 - E1000_TXD_CMD_RS); 1335 - tx_desc->upper.data = 0; 1325 + buffer_info = &tx_ring->buffer_info[i]; 1326 + buffer_info->skb = skb; 1327 + buffer_info->length = skb->len; 1328 + buffer_info->dma = pci_map_single(pdev, skb->data, skb->len, 1329 + PCI_DMA_TODEVICE); 1330 + tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 1331 + tx_desc->read.olinfo_status = cpu_to_le32(skb->len) << 1332 + E1000_ADVTXD_PAYLEN_SHIFT; 1333 + tx_desc->read.cmd_type_len = cpu_to_le32(skb->len); 1334 + tx_desc->read.cmd_type_len |= cpu_to_le32(E1000_TXD_CMD_EOP | 1335 + E1000_TXD_CMD_IFCS | 1336 + E1000_TXD_CMD_RS | 1337 + E1000_ADVTXD_DTYP_DATA | 1338 + E1000_ADVTXD_DCMD_DEXT); 1336 1339 } 1337 1340 1338 1341 /* Setup Rx descriptor ring and Rx buffers */ ··· 1352 1347 goto err_nomem; 1353 1348 } 1354 1349 1355 - rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc); 1350 + rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); 1356 1351 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 1357 1352 &rx_ring->dma); 1358 1353 if (!rx_ring->desc) { ··· 1374 1369 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF | 1375 1370 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT); 1376 1371 wr32(E1000_RCTL, rctl); 1377 - wr32(E1000_SRRCTL(0), 0); 1372 + wr32(E1000_SRRCTL(0), E1000_SRRCTL_DESCTYPE_ADV_ONEBUF); 1378 1373 1379 1374 for (i = 0; i < rx_ring->count; i++) { 1380 - struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i); 1375 + union e1000_adv_rx_desc *rx_desc; 1381 1376 struct sk_buff *skb; 1382 1377 1378 + buffer_info = &rx_ring->buffer_info[i]; 1379 + rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); 1383 1380 skb = alloc_skb(IGB_RXBUFFER_2048 + NET_IP_ALIGN, 1384 1381 GFP_KERNEL); 1385 1382 if (!skb) { ··· 1389 1382 goto err_nomem; 1390 1383 } 1391 1384 skb_reserve(skb, NET_IP_ALIGN); 1392 - rx_ring->buffer_info[i].skb = skb; 1393 - rx_ring->buffer_info[i].dma = 1394 - pci_map_single(pdev, skb->data, IGB_RXBUFFER_2048, 1395 - PCI_DMA_FROMDEVICE); 1396 - rx_desc->buffer_addr = cpu_to_le64(rx_ring->buffer_info[i].dma); 1385 + buffer_info->skb = skb; 1386 + buffer_info->dma = pci_map_single(pdev, skb->data, 1387 + IGB_RXBUFFER_2048, 1388 + PCI_DMA_FROMDEVICE); 1389 + rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma); 1397 1390 memset(skb->data, 0x00, skb->len); 1398 1391 } 1399 1392
+3 -3
drivers/net/igb/igb_main.c
··· 994 994 /* the tx fifo also stores 16 bytes of information about the tx 995 995 * but don't include ethernet FCS because hardware appends it */ 996 996 min_tx_space = (adapter->max_frame_size + 997 - sizeof(struct e1000_tx_desc) - 997 + sizeof(union e1000_adv_tx_desc) - 998 998 ETH_FCS_LEN) * 2; 999 999 min_tx_space = ALIGN(min_tx_space, 1024); 1000 1000 min_tx_space >>= 10; ··· 1704 1704 memset(tx_ring->buffer_info, 0, size); 1705 1705 1706 1706 /* round up to nearest 4K */ 1707 - tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); 1707 + tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 1708 1708 tx_ring->size = ALIGN(tx_ring->size, 4096); 1709 1709 1710 1710 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, ··· 1773 1773 struct igb_ring *ring = &adapter->tx_ring[i]; 1774 1774 j = ring->reg_idx; 1775 1775 wr32(E1000_TDLEN(j), 1776 - ring->count * sizeof(struct e1000_tx_desc)); 1776 + ring->count * sizeof(union e1000_adv_tx_desc)); 1777 1777 tdba = ring->dma; 1778 1778 wr32(E1000_TDBAL(j), 1779 1779 tdba & 0x00000000ffffffffULL);