Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] S2io: Multi buffer mode support

Hi,
This patch provides dynamic two buffer-mode and 3 buffer-mode options.
Previously 2 buffer-mode was compilation option. Now with this patch applied
one can load driver in 2 buffer-mode with module-load parameter

ie.
#insmod s2io.ko rx_ring_mode=2

This patch also provides 3 buffer-mode which provides header separation
functionality. In 3 buffer-mode skb->data will have L2/L3/L4 headers and
"skb_shinfo(skb)->frag_list->data" will have have L4 payload.
one can load driver in 3 buffer-mode with same above module-load parameter

ie.
#insmod s2io.ko rx_ring_mode=3

Please review the patch.

Signed-off-by: Ananda Raju <ananda.raju@neterion.com>
Signed-off-by: Jeff Garzik <jgarzik@pobox.com>

authored by

Ananda Raju and committed by
Jeff Garzik
da6971d8 29b09fcc

+453 -407
-11
drivers/net/Kconfig
··· 2258 2258 2259 2259 If in doubt, say N. 2260 2260 2261 - config 2BUFF_MODE 2262 - bool "Use 2 Buffer Mode on Rx side." 2263 - depends on S2IO 2264 - ---help--- 2265 - On enabling the 2 buffer mode, the received frame will be 2266 - split into 2 parts before being DMA'ed to the hosts memory. 2267 - The parts are the ethernet header and ethernet payload. 2268 - This is useful on systems where DMA'ing to to unaligned 2269 - physical memory loactions comes with a heavy price. 2270 - If not sure please say N. 2271 - 2272 2261 endmenu 2273 2262 2274 2263 if !UML
+406 -352
drivers/net/s2io.c
··· 30 30 * in the driver. 31 31 * rx_ring_sz: This defines the number of descriptors each ring can have. This 32 32 * is also an array of size 8. 33 + * rx_ring_mode: This defines the operation mode of all 8 rings. The valid 34 + * values are 1, 2 and 3. 33 35 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver. 34 36 * tx_fifo_len: This too is an array of 8. Each element defines the number of 35 37 * Tx descriptors that can be associated with each corresponding FIFO. ··· 67 65 #include "s2io.h" 68 66 #include "s2io-regs.h" 69 67 70 - #define DRV_VERSION "Version 2.0.9.1" 68 + #define DRV_VERSION "Version 2.0.9.3" 71 69 72 70 /* S2io Driver name & version. */ 73 71 static char s2io_driver_name[] = "Neterion"; 74 72 static char s2io_driver_version[] = DRV_VERSION; 73 + 74 + int rxd_size[4] = {32,48,48,64}; 75 + int rxd_count[4] = {127,85,85,63}; 75 76 76 77 static inline int RXD_IS_UP2DT(RxD_t *rxdp) 77 78 { ··· 109 104 mac_control = &sp->mac_control; 110 105 if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) { 111 106 level = LOW; 112 - if (rxb_size <= MAX_RXDS_PER_BLOCK) { 107 + if (rxb_size <= rxd_count[sp->rxd_mode]) { 113 108 level = PANIC; 114 109 } 115 110 } ··· 301 296 {[0 ...(MAX_RX_RINGS - 1)] = 0 }; 302 297 static unsigned int rts_frm_len[MAX_RX_RINGS] = 303 298 {[0 ...(MAX_RX_RINGS - 1)] = 0 }; 299 + static unsigned int rx_ring_mode = 1; 304 300 static unsigned int use_continuous_tx_intrs = 1; 305 301 static unsigned int rmac_pause_time = 65535; 306 302 static unsigned int mc_pause_threshold_q0q3 = 187; ··· 310 304 static unsigned int tmac_util_period = 5; 311 305 static unsigned int rmac_util_period = 5; 312 306 static unsigned int bimodal = 0; 307 + static unsigned int l3l4hdr_size = 128; 313 308 #ifndef CONFIG_S2IO_NAPI 314 309 static unsigned int indicate_max_pkts; 315 310 #endif ··· 364 357 int i, j, blk_cnt, rx_sz, tx_sz; 365 358 int lst_size, lst_per_page; 366 359 struct net_device *dev = nic->dev; 367 - #ifdef CONFIG_2BUFF_MODE 368 360 unsigned long tmp; 369 361 buffAdd_t *ba; 370 - #endif 371 362 372 363 mac_info_t *mac_control; 373 364 struct config_param *config; ··· 463 458 /* Allocation and initialization of RXDs in Rings */ 464 459 size = 0; 465 460 for (i = 0; i < config->rx_ring_num; i++) { 466 - if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) { 461 + if (config->rx_cfg[i].num_rxd % 462 + (rxd_count[nic->rxd_mode] + 1)) { 467 463 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name); 468 464 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ", 469 465 i); ··· 473 467 } 474 468 size += config->rx_cfg[i].num_rxd; 475 469 mac_control->rings[i].block_count = 476 - config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 477 - mac_control->rings[i].pkt_cnt = 478 - config->rx_cfg[i].num_rxd - mac_control->rings[i].block_count; 470 + config->rx_cfg[i].num_rxd / 471 + (rxd_count[nic->rxd_mode] + 1 ); 472 + mac_control->rings[i].pkt_cnt = config->rx_cfg[i].num_rxd - 473 + mac_control->rings[i].block_count; 479 474 } 480 - size = (size * (sizeof(RxD_t))); 475 + if (nic->rxd_mode == RXD_MODE_1) 476 + size = (size * (sizeof(RxD1_t))); 477 + else 478 + size = (size * (sizeof(RxD3_t))); 481 479 rx_sz = size; 482 480 483 481 for (i = 0; i < config->rx_ring_num; i++) { ··· 496 486 mac_control->rings[i].nic = nic; 497 487 mac_control->rings[i].ring_no = i; 498 488 499 - blk_cnt = 500 - config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 489 + blk_cnt = config->rx_cfg[i].num_rxd / 490 + (rxd_count[nic->rxd_mode] + 1); 501 491 /* Allocating all the Rx blocks */ 502 492 for (j = 0; j < blk_cnt; j++) { 503 - #ifndef CONFIG_2BUFF_MODE 504 - size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t)); 505 - #else 506 - size = SIZE_OF_BLOCK; 507 - #endif 493 + rx_block_info_t *rx_blocks; 494 + int l; 495 + 496 + rx_blocks = &mac_control->rings[i].rx_blocks[j]; 497 + size = SIZE_OF_BLOCK; //size is always page size 508 498 tmp_v_addr = pci_alloc_consistent(nic->pdev, size, 509 499 &tmp_p_addr); 510 500 if (tmp_v_addr == NULL) { ··· 514 504 * memory that was alloced till the 515 505 * failure happened. 516 506 */ 517 - mac_control->rings[i].rx_blocks[j].block_virt_addr = 518 - tmp_v_addr; 507 + rx_blocks->block_virt_addr = tmp_v_addr; 519 508 return -ENOMEM; 520 509 } 521 510 memset(tmp_v_addr, 0, size); 511 + rx_blocks->block_virt_addr = tmp_v_addr; 512 + rx_blocks->block_dma_addr = tmp_p_addr; 513 + rx_blocks->rxds = kmalloc(sizeof(rxd_info_t)* 514 + rxd_count[nic->rxd_mode], 515 + GFP_KERNEL); 516 + for (l=0; l<rxd_count[nic->rxd_mode];l++) { 517 + rx_blocks->rxds[l].virt_addr = 518 + rx_blocks->block_virt_addr + 519 + (rxd_size[nic->rxd_mode] * l); 520 + rx_blocks->rxds[l].dma_addr = 521 + rx_blocks->block_dma_addr + 522 + (rxd_size[nic->rxd_mode] * l); 523 + } 524 + 522 525 mac_control->rings[i].rx_blocks[j].block_virt_addr = 523 526 tmp_v_addr; 524 527 mac_control->rings[i].rx_blocks[j].block_dma_addr = ··· 551 528 blk_cnt].block_dma_addr; 552 529 553 530 pre_rxd_blk = (RxD_block_t *) tmp_v_addr; 554 - pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD 555 - * marker. 556 - */ 557 - #ifndef CONFIG_2BUFF_MODE 558 531 pre_rxd_blk->reserved_2_pNext_RxD_block = 559 532 (unsigned long) tmp_v_addr_next; 560 - #endif 561 533 pre_rxd_blk->pNext_RxD_Blk_physical = 562 534 (u64) tmp_p_addr_next; 563 535 } 564 536 } 565 - 566 - #ifdef CONFIG_2BUFF_MODE 567 - /* 568 - * Allocation of Storages for buffer addresses in 2BUFF mode 569 - * and the buffers as well. 570 - */ 571 - for (i = 0; i < config->rx_ring_num; i++) { 572 - blk_cnt = 573 - config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 574 - mac_control->rings[i].ba = kmalloc((sizeof(buffAdd_t *) * blk_cnt), 537 + if (nic->rxd_mode >= RXD_MODE_3A) { 538 + /* 539 + * Allocation of Storages for buffer addresses in 2BUFF mode 540 + * and the buffers as well. 541 + */ 542 + for (i = 0; i < config->rx_ring_num; i++) { 543 + blk_cnt = config->rx_cfg[i].num_rxd / 544 + (rxd_count[nic->rxd_mode]+ 1); 545 + mac_control->rings[i].ba = 546 + kmalloc((sizeof(buffAdd_t *) * blk_cnt), 575 547 GFP_KERNEL); 576 - if (!mac_control->rings[i].ba) 577 - return -ENOMEM; 578 - for (j = 0; j < blk_cnt; j++) { 579 - int k = 0; 580 - mac_control->rings[i].ba[j] = kmalloc((sizeof(buffAdd_t) * 581 - (MAX_RXDS_PER_BLOCK + 1)), 582 - GFP_KERNEL); 583 - if (!mac_control->rings[i].ba[j]) 548 + if (!mac_control->rings[i].ba) 584 549 return -ENOMEM; 585 - while (k != MAX_RXDS_PER_BLOCK) { 586 - ba = &mac_control->rings[i].ba[j][k]; 587 - 588 - ba->ba_0_org = (void *) kmalloc 589 - (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL); 590 - if (!ba->ba_0_org) 550 + for (j = 0; j < blk_cnt; j++) { 551 + int k = 0; 552 + mac_control->rings[i].ba[j] = 553 + kmalloc((sizeof(buffAdd_t) * 554 + (rxd_count[nic->rxd_mode] + 1)), 555 + GFP_KERNEL); 556 + if (!mac_control->rings[i].ba[j]) 591 557 return -ENOMEM; 592 - tmp = (unsigned long) ba->ba_0_org; 593 - tmp += ALIGN_SIZE; 594 - tmp &= ~((unsigned long) ALIGN_SIZE); 595 - ba->ba_0 = (void *) tmp; 558 + while (k != rxd_count[nic->rxd_mode]) { 559 + ba = &mac_control->rings[i].ba[j][k]; 596 560 597 - ba->ba_1_org = (void *) kmalloc 598 - (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL); 599 - if (!ba->ba_1_org) 600 - return -ENOMEM; 601 - tmp = (unsigned long) ba->ba_1_org; 602 - tmp += ALIGN_SIZE; 603 - tmp &= ~((unsigned long) ALIGN_SIZE); 604 - ba->ba_1 = (void *) tmp; 605 - k++; 561 + ba->ba_0_org = (void *) kmalloc 562 + (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL); 563 + if (!ba->ba_0_org) 564 + return -ENOMEM; 565 + tmp = (unsigned long)ba->ba_0_org; 566 + tmp += ALIGN_SIZE; 567 + tmp &= ~((unsigned long) ALIGN_SIZE); 568 + ba->ba_0 = (void *) tmp; 569 + 570 + ba->ba_1_org = (void *) kmalloc 571 + (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL); 572 + if (!ba->ba_1_org) 573 + return -ENOMEM; 574 + tmp = (unsigned long) ba->ba_1_org; 575 + tmp += ALIGN_SIZE; 576 + tmp &= ~((unsigned long) ALIGN_SIZE); 577 + ba->ba_1 = (void *) tmp; 578 + k++; 579 + } 606 580 } 607 581 } 608 582 } 609 - #endif 610 583 611 584 /* Allocation and initialization of Statistics block */ 612 585 size = sizeof(StatInfo_t); ··· 688 669 kfree(mac_control->fifos[i].list_info); 689 670 } 690 671 691 - #ifndef CONFIG_2BUFF_MODE 692 - size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t)); 693 - #else 694 672 size = SIZE_OF_BLOCK; 695 - #endif 696 673 for (i = 0; i < config->rx_ring_num; i++) { 697 674 blk_cnt = mac_control->rings[i].block_count; 698 675 for (j = 0; j < blk_cnt; j++) { ··· 700 685 break; 701 686 pci_free_consistent(nic->pdev, size, 702 687 tmp_v_addr, tmp_p_addr); 688 + kfree(mac_control->rings[i].rx_blocks[j].rxds); 703 689 } 704 690 } 705 691 706 - #ifdef CONFIG_2BUFF_MODE 707 - /* Freeing buffer storage addresses in 2BUFF mode. */ 708 - for (i = 0; i < config->rx_ring_num; i++) { 709 - blk_cnt = 710 - config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1); 711 - for (j = 0; j < blk_cnt; j++) { 712 - int k = 0; 713 - if (!mac_control->rings[i].ba[j]) 714 - continue; 715 - while (k != MAX_RXDS_PER_BLOCK) { 716 - buffAdd_t *ba = &mac_control->rings[i].ba[j][k]; 717 - kfree(ba->ba_0_org); 718 - kfree(ba->ba_1_org); 719 - k++; 692 + if (nic->rxd_mode >= RXD_MODE_3A) { 693 + /* Freeing buffer storage addresses in 2BUFF mode. */ 694 + for (i = 0; i < config->rx_ring_num; i++) { 695 + blk_cnt = config->rx_cfg[i].num_rxd / 696 + (rxd_count[nic->rxd_mode] + 1); 697 + for (j = 0; j < blk_cnt; j++) { 698 + int k = 0; 699 + if (!mac_control->rings[i].ba[j]) 700 + continue; 701 + while (k != rxd_count[nic->rxd_mode]) { 702 + buffAdd_t *ba = 703 + &mac_control->rings[i].ba[j][k]; 704 + kfree(ba->ba_0_org); 705 + kfree(ba->ba_1_org); 706 + k++; 707 + } 708 + kfree(mac_control->rings[i].ba[j]); 720 709 } 721 - kfree(mac_control->rings[i].ba[j]); 710 + kfree(mac_control->rings[i].ba); 722 711 } 723 - kfree(mac_control->rings[i].ba); 724 712 } 725 - #endif 726 713 727 714 if (mac_control->stats_mem) { 728 715 pci_free_consistent(nic->pdev, ··· 1911 1894 val64 = readq(&bar0->prc_ctrl_n[i]); 1912 1895 if (nic->config.bimodal) 1913 1896 val64 |= PRC_CTRL_BIMODAL_INTERRUPT; 1914 - #ifndef CONFIG_2BUFF_MODE 1915 - val64 |= PRC_CTRL_RC_ENABLED; 1916 - #else 1917 - val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3; 1918 - #endif 1897 + if (nic->rxd_mode == RXD_MODE_1) 1898 + val64 |= PRC_CTRL_RC_ENABLED; 1899 + else 1900 + val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3; 1919 1901 writeq(val64, &bar0->prc_ctrl_n[i]); 1920 1902 } 1921 1903 1922 - #ifdef CONFIG_2BUFF_MODE 1923 - /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */ 1924 - val64 = readq(&bar0->rx_pa_cfg); 1925 - val64 |= RX_PA_CFG_IGNORE_L2_ERR; 1926 - writeq(val64, &bar0->rx_pa_cfg); 1927 - #endif 1904 + if (nic->rxd_mode == RXD_MODE_3B) { 1905 + /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */ 1906 + val64 = readq(&bar0->rx_pa_cfg); 1907 + val64 |= RX_PA_CFG_IGNORE_L2_ERR; 1908 + writeq(val64, &bar0->rx_pa_cfg); 1909 + } 1928 1910 1929 1911 /* 1930 1912 * Enabling MC-RLDRAM. After enabling the device, we timeout ··· 2106 2090 } 2107 2091 } 2108 2092 2093 + int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb) 2094 + { 2095 + struct net_device *dev = nic->dev; 2096 + struct sk_buff *frag_list; 2097 + u64 tmp; 2098 + 2099 + /* Buffer-1 receives L3/L4 headers */ 2100 + ((RxD3_t*)rxdp)->Buffer1_ptr = pci_map_single 2101 + (nic->pdev, skb->data, l3l4hdr_size + 4, 2102 + PCI_DMA_FROMDEVICE); 2103 + 2104 + /* skb_shinfo(skb)->frag_list will have L4 data payload */ 2105 + skb_shinfo(skb)->frag_list = dev_alloc_skb(dev->mtu + ALIGN_SIZE); 2106 + if (skb_shinfo(skb)->frag_list == NULL) { 2107 + DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb failed\n ", dev->name); 2108 + return -ENOMEM ; 2109 + } 2110 + frag_list = skb_shinfo(skb)->frag_list; 2111 + frag_list->next = NULL; 2112 + tmp = (u64) frag_list->data; 2113 + tmp += ALIGN_SIZE; 2114 + tmp &= ~ALIGN_SIZE; 2115 + frag_list->data = (void *) tmp; 2116 + frag_list->tail = (void *) tmp; 2117 + 2118 + /* Buffer-2 receives L4 data payload */ 2119 + ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev, 2120 + frag_list->data, dev->mtu, 2121 + PCI_DMA_FROMDEVICE); 2122 + rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4); 2123 + rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu); 2124 + 2125 + return SUCCESS; 2126 + } 2127 + 2109 2128 /** 2110 2129 * fill_rx_buffers - Allocates the Rx side skbs 2111 2130 * @nic: device private variable ··· 2168 2117 struct sk_buff *skb; 2169 2118 RxD_t *rxdp; 2170 2119 int off, off1, size, block_no, block_no1; 2171 - int offset, offset1; 2172 2120 u32 alloc_tab = 0; 2173 2121 u32 alloc_cnt; 2174 2122 mac_info_t *mac_control; 2175 2123 struct config_param *config; 2176 - #ifdef CONFIG_2BUFF_MODE 2177 - RxD_t *rxdpnext; 2178 - int nextblk; 2179 2124 u64 tmp; 2180 2125 buffAdd_t *ba; 2181 - dma_addr_t rxdpphys; 2182 - #endif 2183 2126 #ifndef CONFIG_S2IO_NAPI 2184 2127 unsigned long flags; 2185 2128 #endif ··· 2183 2138 config = &nic->config; 2184 2139 alloc_cnt = mac_control->rings[ring_no].pkt_cnt - 2185 2140 atomic_read(&nic->rx_bufs_left[ring_no]); 2186 - size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE + 2187 - HEADER_802_2_SIZE + HEADER_SNAP_SIZE; 2188 2141 2189 2142 while (alloc_tab < alloc_cnt) { 2190 2143 block_no = mac_control->rings[ring_no].rx_curr_put_info. ··· 2191 2148 block_index; 2192 2149 off = mac_control->rings[ring_no].rx_curr_put_info.offset; 2193 2150 off1 = mac_control->rings[ring_no].rx_curr_get_info.offset; 2194 - #ifndef CONFIG_2BUFF_MODE 2195 - offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off; 2196 - offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1; 2197 - #else 2198 - offset = block_no * (MAX_RXDS_PER_BLOCK) + off; 2199 - offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1; 2200 - #endif 2201 2151 2202 - rxdp = mac_control->rings[ring_no].rx_blocks[block_no]. 2203 - block_virt_addr + off; 2204 - if ((offset == offset1) && (rxdp->Host_Control)) { 2205 - DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name); 2152 + rxdp = mac_control->rings[ring_no]. 2153 + rx_blocks[block_no].rxds[off].virt_addr; 2154 + 2155 + if ((block_no == block_no1) && (off == off1) && 2156 + (rxdp->Host_Control)) { 2157 + DBG_PRINT(INTR_DBG, "%s: Get and Put", 2158 + dev->name); 2206 2159 DBG_PRINT(INTR_DBG, " info equated\n"); 2207 2160 goto end; 2208 2161 } 2209 - #ifndef CONFIG_2BUFF_MODE 2210 - if (rxdp->Control_1 == END_OF_BLOCK) { 2162 + if (off && (off == rxd_count[nic->rxd_mode])) { 2211 2163 mac_control->rings[ring_no].rx_curr_put_info. 2212 2164 block_index++; 2165 + if (mac_control->rings[ring_no].rx_curr_put_info. 2166 + block_index == mac_control->rings[ring_no]. 2167 + block_count) 2168 + mac_control->rings[ring_no].rx_curr_put_info. 2169 + block_index = 0; 2170 + block_no = mac_control->rings[ring_no]. 2171 + rx_curr_put_info.block_index; 2172 + if (off == rxd_count[nic->rxd_mode]) 2173 + off = 0; 2213 2174 mac_control->rings[ring_no].rx_curr_put_info. 2214 - block_index %= mac_control->rings[ring_no].block_count; 2215 - block_no = mac_control->rings[ring_no].rx_curr_put_info. 2216 - block_index; 2217 - off++; 2218 - off %= (MAX_RXDS_PER_BLOCK + 1); 2219 - mac_control->rings[ring_no].rx_curr_put_info.offset = 2220 - off; 2221 - rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2); 2175 + offset = off; 2176 + rxdp = mac_control->rings[ring_no]. 2177 + rx_blocks[block_no].block_virt_addr; 2222 2178 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n", 2223 2179 dev->name, rxdp); 2224 2180 } 2225 2181 #ifndef CONFIG_S2IO_NAPI 2226 2182 spin_lock_irqsave(&nic->put_lock, flags); 2227 2183 mac_control->rings[ring_no].put_pos = 2228 - (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off; 2184 + (block_no * (rxd_count[nic->rxd_mode] + 1)) + off; 2229 2185 spin_unlock_irqrestore(&nic->put_lock, flags); 2230 2186 #endif 2231 - #else 2232 - if (rxdp->Host_Control == END_OF_BLOCK) { 2187 + if ((rxdp->Control_1 & RXD_OWN_XENA) && 2188 + ((nic->rxd_mode >= RXD_MODE_3A) && 2189 + (rxdp->Control_2 & BIT(0)))) { 2233 2190 mac_control->rings[ring_no].rx_curr_put_info. 2234 - block_index++; 2235 - mac_control->rings[ring_no].rx_curr_put_info.block_index 2236 - %= mac_control->rings[ring_no].block_count; 2237 - block_no = mac_control->rings[ring_no].rx_curr_put_info 2238 - .block_index; 2239 - off = 0; 2240 - DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n", 2241 - dev->name, block_no, 2242 - (unsigned long long) rxdp->Control_1); 2243 - mac_control->rings[ring_no].rx_curr_put_info.offset = 2244 - off; 2245 - rxdp = mac_control->rings[ring_no].rx_blocks[block_no]. 2246 - block_virt_addr; 2247 - } 2248 - #ifndef CONFIG_S2IO_NAPI 2249 - spin_lock_irqsave(&nic->put_lock, flags); 2250 - mac_control->rings[ring_no].put_pos = (block_no * 2251 - (MAX_RXDS_PER_BLOCK + 1)) + off; 2252 - spin_unlock_irqrestore(&nic->put_lock, flags); 2253 - #endif 2254 - #endif 2255 - 2256 - #ifndef CONFIG_2BUFF_MODE 2257 - if (rxdp->Control_1 & RXD_OWN_XENA) 2258 - #else 2259 - if (rxdp->Control_2 & BIT(0)) 2260 - #endif 2261 - { 2262 - mac_control->rings[ring_no].rx_curr_put_info. 2263 - offset = off; 2191 + offset = off; 2264 2192 goto end; 2265 2193 } 2266 - #ifdef CONFIG_2BUFF_MODE 2267 - /* 2268 - * RxDs Spanning cache lines will be replenished only 2269 - * if the succeeding RxD is also owned by Host. It 2270 - * will always be the ((8*i)+3) and ((8*i)+6) 2271 - * descriptors for the 48 byte descriptor. The offending 2272 - * decsriptor is of-course the 3rd descriptor. 2273 - */ 2274 - rxdpphys = mac_control->rings[ring_no].rx_blocks[block_no]. 2275 - block_dma_addr + (off * sizeof(RxD_t)); 2276 - if (((u64) (rxdpphys)) % 128 > 80) { 2277 - rxdpnext = mac_control->rings[ring_no].rx_blocks[block_no]. 2278 - block_virt_addr + (off + 1); 2279 - if (rxdpnext->Host_Control == END_OF_BLOCK) { 2280 - nextblk = (block_no + 1) % 2281 - (mac_control->rings[ring_no].block_count); 2282 - rxdpnext = mac_control->rings[ring_no].rx_blocks 2283 - [nextblk].block_virt_addr; 2284 - } 2285 - if (rxdpnext->Control_2 & BIT(0)) 2286 - goto end; 2287 - } 2288 - #endif 2194 + /* calculate size of skb based on ring mode */ 2195 + size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE + 2196 + HEADER_802_2_SIZE + HEADER_SNAP_SIZE; 2197 + if (nic->rxd_mode == RXD_MODE_1) 2198 + size += NET_IP_ALIGN; 2199 + else if (nic->rxd_mode == RXD_MODE_3B) 2200 + size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4; 2201 + else 2202 + size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4; 2289 2203 2290 - #ifndef CONFIG_2BUFF_MODE 2291 - skb = dev_alloc_skb(size + NET_IP_ALIGN); 2292 - #else 2293 - skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4); 2294 - #endif 2295 - if (!skb) { 2204 + /* allocate skb */ 2205 + skb = dev_alloc_skb(size); 2206 + if(!skb) { 2296 2207 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name); 2297 2208 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n"); 2298 2209 if (first_rxdp) { 2299 2210 wmb(); 2300 2211 first_rxdp->Control_1 |= RXD_OWN_XENA; 2301 2212 } 2302 - return -ENOMEM; 2213 + return -ENOMEM ; 2303 2214 } 2304 - #ifndef CONFIG_2BUFF_MODE 2305 - skb_reserve(skb, NET_IP_ALIGN); 2306 - memset(rxdp, 0, sizeof(RxD_t)); 2307 - rxdp->Buffer0_ptr = pci_map_single 2308 - (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE); 2309 - rxdp->Control_2 &= (~MASK_BUFFER0_SIZE); 2310 - rxdp->Control_2 |= SET_BUFFER0_SIZE(size); 2215 + if (nic->rxd_mode == RXD_MODE_1) { 2216 + /* 1 buffer mode - normal operation mode */ 2217 + memset(rxdp, 0, sizeof(RxD1_t)); 2218 + skb_reserve(skb, NET_IP_ALIGN); 2219 + ((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single 2220 + (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE); 2221 + rxdp->Control_2 &= (~MASK_BUFFER0_SIZE_1); 2222 + rxdp->Control_2 |= SET_BUFFER0_SIZE_1(size); 2223 + 2224 + } else if (nic->rxd_mode >= RXD_MODE_3A) { 2225 + /* 2226 + * 2 or 3 buffer mode - 2227 + * Both 2 buffer mode and 3 buffer mode provides 128 2228 + * byte aligned receive buffers. 2229 + * 2230 + * 3 buffer mode provides header separation where in 2231 + * skb->data will have L3/L4 headers where as 2232 + * skb_shinfo(skb)->frag_list will have the L4 data 2233 + * payload 2234 + */ 2235 + 2236 + memset(rxdp, 0, sizeof(RxD3_t)); 2237 + ba = &mac_control->rings[ring_no].ba[block_no][off]; 2238 + skb_reserve(skb, BUF0_LEN); 2239 + tmp = (u64)(unsigned long) skb->data; 2240 + tmp += ALIGN_SIZE; 2241 + tmp &= ~ALIGN_SIZE; 2242 + skb->data = (void *) (unsigned long)tmp; 2243 + skb->tail = (void *) (unsigned long)tmp; 2244 + 2245 + ((RxD3_t*)rxdp)->Buffer0_ptr = 2246 + pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, 2247 + PCI_DMA_FROMDEVICE); 2248 + rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); 2249 + if (nic->rxd_mode == RXD_MODE_3B) { 2250 + /* Two buffer mode */ 2251 + 2252 + /* 2253 + * Buffer2 will have L3/L4 header plus 2254 + * L4 payload 2255 + */ 2256 + ((RxD3_t*)rxdp)->Buffer2_ptr = pci_map_single 2257 + (nic->pdev, skb->data, dev->mtu + 4, 2258 + PCI_DMA_FROMDEVICE); 2259 + 2260 + /* Buffer-1 will be dummy buffer not used */ 2261 + ((RxD3_t*)rxdp)->Buffer1_ptr = 2262 + pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN, 2263 + PCI_DMA_FROMDEVICE); 2264 + rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); 2265 + rxdp->Control_2 |= SET_BUFFER2_SIZE_3 2266 + (dev->mtu + 4); 2267 + } else { 2268 + /* 3 buffer mode */ 2269 + if (fill_rxd_3buf(nic, rxdp, skb) == -ENOMEM) { 2270 + dev_kfree_skb_irq(skb); 2271 + if (first_rxdp) { 2272 + wmb(); 2273 + first_rxdp->Control_1 |= 2274 + RXD_OWN_XENA; 2275 + } 2276 + return -ENOMEM ; 2277 + } 2278 + } 2279 + rxdp->Control_2 |= BIT(0); 2280 + } 2311 2281 rxdp->Host_Control = (unsigned long) (skb); 2312 2282 if (alloc_tab & ((1 << rxsync_frequency) - 1)) 2313 2283 rxdp->Control_1 |= RXD_OWN_XENA; 2314 2284 off++; 2315 - off %= (MAX_RXDS_PER_BLOCK + 1); 2285 + if (off == (rxd_count[nic->rxd_mode] + 1)) 2286 + off = 0; 2316 2287 mac_control->rings[ring_no].rx_curr_put_info.offset = off; 2317 - #else 2318 - ba = &mac_control->rings[ring_no].ba[block_no][off]; 2319 - skb_reserve(skb, BUF0_LEN); 2320 - tmp = ((unsigned long) skb->data & ALIGN_SIZE); 2321 - if (tmp) 2322 - skb_reserve(skb, (ALIGN_SIZE + 1) - tmp); 2323 2288 2324 - memset(rxdp, 0, sizeof(RxD_t)); 2325 - rxdp->Buffer2_ptr = pci_map_single 2326 - (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4, 2327 - PCI_DMA_FROMDEVICE); 2328 - rxdp->Buffer0_ptr = 2329 - pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, 2330 - PCI_DMA_FROMDEVICE); 2331 - rxdp->Buffer1_ptr = 2332 - pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN, 2333 - PCI_DMA_FROMDEVICE); 2334 - 2335 - rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4); 2336 - rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN); 2337 - rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */ 2338 - rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */ 2339 - rxdp->Host_Control = (u64) ((unsigned long) (skb)); 2340 - if (alloc_tab & ((1 << rxsync_frequency) - 1)) 2341 - rxdp->Control_1 |= RXD_OWN_XENA; 2342 - off++; 2343 - mac_control->rings[ring_no].rx_curr_put_info.offset = off; 2344 - #endif 2345 2289 rxdp->Control_2 |= SET_RXD_MARKER; 2346 - 2347 2290 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) { 2348 2291 if (first_rxdp) { 2349 2292 wmb(); ··· 2354 2325 return SUCCESS; 2355 2326 } 2356 2327 2328 + static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk) 2329 + { 2330 + struct net_device *dev = sp->dev; 2331 + int j; 2332 + struct sk_buff *skb; 2333 + RxD_t *rxdp; 2334 + mac_info_t *mac_control; 2335 + buffAdd_t *ba; 2336 + 2337 + mac_control = &sp->mac_control; 2338 + for (j = 0 ; j < rxd_count[sp->rxd_mode]; j++) { 2339 + rxdp = mac_control->rings[ring_no]. 2340 + rx_blocks[blk].rxds[j].virt_addr; 2341 + skb = (struct sk_buff *) 2342 + ((unsigned long) rxdp->Host_Control); 2343 + if (!skb) { 2344 + continue; 2345 + } 2346 + if (sp->rxd_mode == RXD_MODE_1) { 2347 + pci_unmap_single(sp->pdev, (dma_addr_t) 2348 + ((RxD1_t*)rxdp)->Buffer0_ptr, 2349 + dev->mtu + 2350 + HEADER_ETHERNET_II_802_3_SIZE 2351 + + HEADER_802_2_SIZE + 2352 + HEADER_SNAP_SIZE, 2353 + PCI_DMA_FROMDEVICE); 2354 + memset(rxdp, 0, sizeof(RxD1_t)); 2355 + } else if(sp->rxd_mode == RXD_MODE_3B) { 2356 + ba = &mac_control->rings[ring_no]. 2357 + ba[blk][j]; 2358 + pci_unmap_single(sp->pdev, (dma_addr_t) 2359 + ((RxD3_t*)rxdp)->Buffer0_ptr, 2360 + BUF0_LEN, 2361 + PCI_DMA_FROMDEVICE); 2362 + pci_unmap_single(sp->pdev, (dma_addr_t) 2363 + ((RxD3_t*)rxdp)->Buffer1_ptr, 2364 + BUF1_LEN, 2365 + PCI_DMA_FROMDEVICE); 2366 + pci_unmap_single(sp->pdev, (dma_addr_t) 2367 + ((RxD3_t*)rxdp)->Buffer2_ptr, 2368 + dev->mtu + 4, 2369 + PCI_DMA_FROMDEVICE); 2370 + memset(rxdp, 0, sizeof(RxD3_t)); 2371 + } else { 2372 + pci_unmap_single(sp->pdev, (dma_addr_t) 2373 + ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, 2374 + PCI_DMA_FROMDEVICE); 2375 + pci_unmap_single(sp->pdev, (dma_addr_t) 2376 + ((RxD3_t*)rxdp)->Buffer1_ptr, 2377 + l3l4hdr_size + 4, 2378 + PCI_DMA_FROMDEVICE); 2379 + pci_unmap_single(sp->pdev, (dma_addr_t) 2380 + ((RxD3_t*)rxdp)->Buffer2_ptr, dev->mtu, 2381 + PCI_DMA_FROMDEVICE); 2382 + memset(rxdp, 0, sizeof(RxD3_t)); 2383 + } 2384 + dev_kfree_skb(skb); 2385 + atomic_dec(&sp->rx_bufs_left[ring_no]); 2386 + } 2387 + } 2388 + 2357 2389 /** 2358 2390 * free_rx_buffers - Frees all Rx buffers 2359 2391 * @sp: device private variable. ··· 2427 2337 static void free_rx_buffers(struct s2io_nic *sp) 2428 2338 { 2429 2339 struct net_device *dev = sp->dev; 2430 - int i, j, blk = 0, off, buf_cnt = 0; 2431 - RxD_t *rxdp; 2432 - struct sk_buff *skb; 2340 + int i, blk = 0, buf_cnt = 0; 2433 2341 mac_info_t *mac_control; 2434 2342 struct config_param *config; 2435 - #ifdef CONFIG_2BUFF_MODE 2436 - buffAdd_t *ba; 2437 - #endif 2438 2343 2439 2344 mac_control = &sp->mac_control; 2440 2345 config = &sp->config; 2441 2346 2442 2347 for (i = 0; i < config->rx_ring_num; i++) { 2443 - for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) { 2444 - off = j % (MAX_RXDS_PER_BLOCK + 1); 2445 - rxdp = mac_control->rings[i].rx_blocks[blk]. 2446 - block_virt_addr + off; 2348 + for (blk = 0; blk < rx_ring_sz[i]; blk++) 2349 + free_rxd_blk(sp,i,blk); 2447 2350 2448 - #ifndef CONFIG_2BUFF_MODE 2449 - if (rxdp->Control_1 == END_OF_BLOCK) { 2450 - rxdp = 2451 - (RxD_t *) ((unsigned long) rxdp-> 2452 - Control_2); 2453 - j++; 2454 - blk++; 2455 - } 2456 - #else 2457 - if (rxdp->Host_Control == END_OF_BLOCK) { 2458 - blk++; 2459 - continue; 2460 - } 2461 - #endif 2462 - 2463 - if (!(rxdp->Control_1 & RXD_OWN_XENA)) { 2464 - memset(rxdp, 0, sizeof(RxD_t)); 2465 - continue; 2466 - } 2467 - 2468 - skb = 2469 - (struct sk_buff *) ((unsigned long) rxdp-> 2470 - Host_Control); 2471 - if (skb) { 2472 - #ifndef CONFIG_2BUFF_MODE 2473 - pci_unmap_single(sp->pdev, (dma_addr_t) 2474 - rxdp->Buffer0_ptr, 2475 - dev->mtu + 2476 - HEADER_ETHERNET_II_802_3_SIZE 2477 - + HEADER_802_2_SIZE + 2478 - HEADER_SNAP_SIZE, 2479 - PCI_DMA_FROMDEVICE); 2480 - #else 2481 - ba = &mac_control->rings[i].ba[blk][off]; 2482 - pci_unmap_single(sp->pdev, (dma_addr_t) 2483 - rxdp->Buffer0_ptr, 2484 - BUF0_LEN, 2485 - PCI_DMA_FROMDEVICE); 2486 - pci_unmap_single(sp->pdev, (dma_addr_t) 2487 - rxdp->Buffer1_ptr, 2488 - BUF1_LEN, 2489 - PCI_DMA_FROMDEVICE); 2490 - pci_unmap_single(sp->pdev, (dma_addr_t) 2491 - rxdp->Buffer2_ptr, 2492 - dev->mtu + BUF0_LEN + 4, 2493 - PCI_DMA_FROMDEVICE); 2494 - #endif 2495 - dev_kfree_skb(skb); 2496 - atomic_dec(&sp->rx_bufs_left[i]); 2497 - buf_cnt++; 2498 - } 2499 - memset(rxdp, 0, sizeof(RxD_t)); 2500 - } 2501 2351 mac_control->rings[i].rx_curr_put_info.block_index = 0; 2502 2352 mac_control->rings[i].rx_curr_get_info.block_index = 0; 2503 2353 mac_control->rings[i].rx_curr_put_info.offset = 0; ··· 2543 2513 { 2544 2514 nic_t *nic = ring_data->nic; 2545 2515 struct net_device *dev = (struct net_device *) nic->dev; 2546 - int get_block, get_offset, put_block, put_offset, ring_bufs; 2516 + int get_block, put_block, put_offset; 2547 2517 rx_curr_get_info_t get_info, put_info; 2548 2518 RxD_t *rxdp; 2549 2519 struct sk_buff *skb; ··· 2562 2532 get_block = get_info.block_index; 2563 2533 put_info = ring_data->rx_curr_put_info; 2564 2534 put_block = put_info.block_index; 2565 - ring_bufs = get_info.ring_len+1; 2566 - rxdp = ring_data->rx_blocks[get_block].block_virt_addr + 2567 - get_info.offset; 2568 - get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) + 2569 - get_info.offset; 2535 + rxdp = ring_data->rx_blocks[get_block].rxds[get_info.offset].virt_addr; 2570 2536 #ifndef CONFIG_S2IO_NAPI 2571 2537 spin_lock(&nic->put_lock); 2572 2538 put_offset = ring_data->put_pos; 2573 2539 spin_unlock(&nic->put_lock); 2574 2540 #else 2575 - put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) + 2541 + put_offset = (put_block * (rxd_count[nic->rxd_mode] + 1)) + 2576 2542 put_info.offset; 2577 2543 #endif 2578 - while (RXD_IS_UP2DT(rxdp) && 2579 - (((get_offset + 1) % ring_bufs) != put_offset)) { 2544 + while (RXD_IS_UP2DT(rxdp)) { 2545 + /* If your are next to put index then it's FIFO full condition */ 2546 + if ((get_block == put_block) && 2547 + (get_info.offset + 1) == put_info.offset) { 2548 + DBG_PRINT(ERR_DBG, "%s: Ring Full\n",dev->name); 2549 + break; 2550 + } 2580 2551 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); 2581 2552 if (skb == NULL) { 2582 2553 DBG_PRINT(ERR_DBG, "%s: The skb is ", ··· 2586 2555 spin_unlock(&nic->rx_lock); 2587 2556 return; 2588 2557 } 2589 - #ifndef CONFIG_2BUFF_MODE 2590 - pci_unmap_single(nic->pdev, (dma_addr_t) 2591 - rxdp->Buffer0_ptr, 2558 + if (nic->rxd_mode == RXD_MODE_1) { 2559 + pci_unmap_single(nic->pdev, (dma_addr_t) 2560 + ((RxD1_t*)rxdp)->Buffer0_ptr, 2592 2561 dev->mtu + 2593 2562 HEADER_ETHERNET_II_802_3_SIZE + 2594 2563 HEADER_802_2_SIZE + 2595 2564 HEADER_SNAP_SIZE, 2596 2565 PCI_DMA_FROMDEVICE); 2597 - #else 2598 - pci_unmap_single(nic->pdev, (dma_addr_t) 2599 - rxdp->Buffer0_ptr, 2566 + } else if (nic->rxd_mode == RXD_MODE_3B) { 2567 + pci_unmap_single(nic->pdev, (dma_addr_t) 2568 + ((RxD3_t*)rxdp)->Buffer0_ptr, 2600 2569 BUF0_LEN, PCI_DMA_FROMDEVICE); 2601 - pci_unmap_single(nic->pdev, (dma_addr_t) 2602 - rxdp->Buffer1_ptr, 2570 + pci_unmap_single(nic->pdev, (dma_addr_t) 2571 + ((RxD3_t*)rxdp)->Buffer1_ptr, 2603 2572 BUF1_LEN, PCI_DMA_FROMDEVICE); 2604 - pci_unmap_single(nic->pdev, (dma_addr_t) 2605 - rxdp->Buffer2_ptr, 2606 - dev->mtu + BUF0_LEN + 4, 2573 + pci_unmap_single(nic->pdev, (dma_addr_t) 2574 + ((RxD3_t*)rxdp)->Buffer2_ptr, 2575 + dev->mtu + 4, 2607 2576 PCI_DMA_FROMDEVICE); 2608 - #endif 2577 + } else { 2578 + pci_unmap_single(nic->pdev, (dma_addr_t) 2579 + ((RxD3_t*)rxdp)->Buffer0_ptr, BUF0_LEN, 2580 + PCI_DMA_FROMDEVICE); 2581 + pci_unmap_single(nic->pdev, (dma_addr_t) 2582 + ((RxD3_t*)rxdp)->Buffer1_ptr, 2583 + l3l4hdr_size + 4, 2584 + PCI_DMA_FROMDEVICE); 2585 + pci_unmap_single(nic->pdev, (dma_addr_t) 2586 + ((RxD3_t*)rxdp)->Buffer2_ptr, 2587 + dev->mtu, PCI_DMA_FROMDEVICE); 2588 + } 2609 2589 rx_osm_handler(ring_data, rxdp); 2610 2590 get_info.offset++; 2611 - ring_data->rx_curr_get_info.offset = 2612 - get_info.offset; 2613 - rxdp = ring_data->rx_blocks[get_block].block_virt_addr + 2614 - get_info.offset; 2615 - if (get_info.offset && 2616 - (!(get_info.offset % MAX_RXDS_PER_BLOCK))) { 2591 + ring_data->rx_curr_get_info.offset = get_info.offset; 2592 + rxdp = ring_data->rx_blocks[get_block]. 2593 + rxds[get_info.offset].virt_addr; 2594 + if (get_info.offset == rxd_count[nic->rxd_mode]) { 2617 2595 get_info.offset = 0; 2618 - ring_data->rx_curr_get_info.offset 2619 - = get_info.offset; 2596 + ring_data->rx_curr_get_info.offset = get_info.offset; 2620 2597 get_block++; 2621 - get_block %= ring_data->block_count; 2622 - ring_data->rx_curr_get_info.block_index 2623 - = get_block; 2598 + if (get_block == ring_data->block_count) 2599 + get_block = 0; 2600 + ring_data->rx_curr_get_info.block_index = get_block; 2624 2601 rxdp = ring_data->rx_blocks[get_block].block_virt_addr; 2625 2602 } 2626 2603 2627 - get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) + 2628 - get_info.offset; 2629 2604 #ifdef CONFIG_S2IO_NAPI 2630 2605 nic->pkts_to_process -= 1; 2631 2606 if (!nic->pkts_to_process) ··· 3081 3044 3082 3045 int wait_for_msix_trans(nic_t *nic, int i) 3083 3046 { 3084 - XENA_dev_config_t __iomem *bar0 = nic->bar0; 3047 + XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0; 3085 3048 u64 val64; 3086 3049 int ret = 0, cnt = 0; 3087 3050 ··· 3102 3065 3103 3066 void restore_xmsi_data(nic_t *nic) 3104 3067 { 3105 - XENA_dev_config_t __iomem *bar0 = nic->bar0; 3068 + XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0; 3106 3069 u64 val64; 3107 3070 int i; 3108 3071 ··· 3120 3083 3121 3084 void store_xmsi_data(nic_t *nic) 3122 3085 { 3123 - XENA_dev_config_t __iomem *bar0 = nic->bar0; 3086 + XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0; 3124 3087 u64 val64, addr, data; 3125 3088 int i; 3126 3089 ··· 3143 3106 3144 3107 int s2io_enable_msi(nic_t *nic) 3145 3108 { 3146 - XENA_dev_config_t __iomem *bar0 = nic->bar0; 3109 + XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0; 3147 3110 u16 msi_ctrl, msg_val; 3148 3111 struct config_param *config = &nic->config; 3149 3112 struct net_device *dev = nic->dev; ··· 3193 3156 3194 3157 int s2io_enable_msi_x(nic_t *nic) 3195 3158 { 3196 - XENA_dev_config_t __iomem *bar0 = nic->bar0; 3159 + XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0; 3197 3160 u64 tx_mat, rx_mat; 3198 3161 u16 msi_control; /* Temp variable */ 3199 3162 int ret, i, j, msix_indx = 1; ··· 5574 5537 ((unsigned long) rxdp->Host_Control); 5575 5538 int ring_no = ring_data->ring_no; 5576 5539 u16 l3_csum, l4_csum; 5577 - #ifdef CONFIG_2BUFF_MODE 5578 - int buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2); 5579 - int buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2); 5580 - int get_block = ring_data->rx_curr_get_info.block_index; 5581 - int get_off = ring_data->rx_curr_get_info.offset; 5582 - buffAdd_t *ba = &ring_data->ba[get_block][get_off]; 5583 - unsigned char *buff; 5584 - #else 5585 - u16 len = (u16) ((RXD_GET_BUFFER0_SIZE(rxdp->Control_2)) >> 48);; 5586 - #endif 5540 + 5587 5541 skb->dev = dev; 5588 5542 if (rxdp->Control_1 & RXD_T_CODE) { 5589 5543 unsigned long long err = rxdp->Control_1 & RXD_T_CODE; ··· 5591 5563 rxdp->Host_Control = 0; 5592 5564 sp->rx_pkt_count++; 5593 5565 sp->stats.rx_packets++; 5594 - #ifndef CONFIG_2BUFF_MODE 5595 - sp->stats.rx_bytes += len; 5596 - #else 5597 - sp->stats.rx_bytes += buf0_len + buf2_len; 5598 - #endif 5566 + if (sp->rxd_mode == RXD_MODE_1) { 5567 + int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2); 5599 5568 5600 - #ifndef CONFIG_2BUFF_MODE 5601 - skb_put(skb, len); 5602 - #else 5603 - buff = skb_push(skb, buf0_len); 5604 - memcpy(buff, ba->ba_0, buf0_len); 5605 - skb_put(skb, buf2_len); 5606 - #endif 5569 + sp->stats.rx_bytes += len; 5570 + skb_put(skb, len); 5571 + 5572 + } else if (sp->rxd_mode >= RXD_MODE_3A) { 5573 + int get_block = ring_data->rx_curr_get_info.block_index; 5574 + int get_off = ring_data->rx_curr_get_info.offset; 5575 + int buf0_len = RXD_GET_BUFFER0_SIZE_3(rxdp->Control_2); 5576 + int buf2_len = RXD_GET_BUFFER2_SIZE_3(rxdp->Control_2); 5577 + unsigned char *buff = skb_push(skb, buf0_len); 5578 + 5579 + buffAdd_t *ba = &ring_data->ba[get_block][get_off]; 5580 + sp->stats.rx_bytes += buf0_len + buf2_len; 5581 + memcpy(buff, ba->ba_0, buf0_len); 5582 + 5583 + if (sp->rxd_mode == RXD_MODE_3A) { 5584 + int buf1_len = RXD_GET_BUFFER1_SIZE_3(rxdp->Control_2); 5585 + 5586 + skb_put(skb, buf1_len); 5587 + skb->len += buf2_len; 5588 + skb->data_len += buf2_len; 5589 + skb->truesize += buf2_len; 5590 + skb_put(skb_shinfo(skb)->frag_list, buf2_len); 5591 + sp->stats.rx_bytes += buf1_len; 5592 + 5593 + } else 5594 + skb_put(skb, buf2_len); 5595 + } 5607 5596 5608 5597 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && 5609 5598 (sp->rx_csum)) { ··· 5756 5711 5757 5712 module_param(tx_fifo_num, int, 0); 5758 5713 module_param(rx_ring_num, int, 0); 5714 + module_param(rx_ring_mode, int, 0); 5759 5715 module_param_array(tx_fifo_len, uint, NULL, 0); 5760 5716 module_param_array(rx_ring_sz, uint, NULL, 0); 5761 5717 module_param_array(rts_frm_len, uint, NULL, 0); ··· 5768 5722 module_param(tmac_util_period, int, 0); 5769 5723 module_param(rmac_util_period, int, 0); 5770 5724 module_param(bimodal, bool, 0); 5725 + module_param(l3l4hdr_size, int , 0); 5771 5726 #ifndef CONFIG_S2IO_NAPI 5772 5727 module_param(indicate_max_pkts, int, 0); 5773 5728 #endif ··· 5890 5843 sp->pdev = pdev; 5891 5844 sp->high_dma_flag = dma_flag; 5892 5845 sp->device_enabled_once = FALSE; 5846 + if (rx_ring_mode == 1) 5847 + sp->rxd_mode = RXD_MODE_1; 5848 + if (rx_ring_mode == 2) 5849 + sp->rxd_mode = RXD_MODE_3B; 5850 + if (rx_ring_mode == 3) 5851 + sp->rxd_mode = RXD_MODE_3A; 5852 + 5893 5853 sp->intr_type = dev_intr_type; 5894 5854 5895 5855 if ((pdev->device == PCI_DEVICE_ID_HERC_WIN) || ··· 5949 5895 config->rx_ring_num = rx_ring_num; 5950 5896 for (i = 0; i < MAX_RX_RINGS; i++) { 5951 5897 config->rx_cfg[i].num_rxd = rx_ring_sz[i] * 5952 - (MAX_RXDS_PER_BLOCK + 1); 5898 + (rxd_count[sp->rxd_mode] + 1); 5953 5899 config->rx_cfg[i].ring_priority = i; 5954 5900 } 5955 5901 ··· 6144 6090 DBG_PRINT(ERR_DBG, "(rev %d), Version %s", 6145 6091 get_xena_rev_id(sp->pdev), 6146 6092 s2io_driver_version); 6147 - #ifdef CONFIG_2BUFF_MODE 6148 - DBG_PRINT(ERR_DBG, ", Buffer mode %d",2); 6149 - #endif 6150 6093 switch(sp->intr_type) { 6151 6094 case INTA: 6152 6095 DBG_PRINT(ERR_DBG, ", Intr type INTA"); ··· 6176 6125 DBG_PRINT(ERR_DBG, "(rev %d), Version %s", 6177 6126 get_xena_rev_id(sp->pdev), 6178 6127 s2io_driver_version); 6179 - #ifdef CONFIG_2BUFF_MODE 6180 - DBG_PRINT(ERR_DBG, ", Buffer mode %d",2); 6181 - #endif 6182 6128 switch(sp->intr_type) { 6183 6129 case INTA: 6184 6130 DBG_PRINT(ERR_DBG, ", Intr type INTA"); ··· 6196 6148 sp->def_mac_addr[0].mac_addr[4], 6197 6149 sp->def_mac_addr[0].mac_addr[5]); 6198 6150 } 6151 + if (sp->rxd_mode == RXD_MODE_3B) 6152 + DBG_PRINT(ERR_DBG, "%s: 2-Buffer mode support has been " 6153 + "enabled\n",dev->name); 6154 + if (sp->rxd_mode == RXD_MODE_3A) 6155 + DBG_PRINT(ERR_DBG, "%s: 3-Buffer mode support has been " 6156 + "enabled\n",dev->name); 6199 6157 6200 6158 /* Initialize device name */ 6201 6159 strcpy(sp->name, dev->name);
+47 -44
drivers/net/s2io.h
··· 418 418 void *list_virt_addr; 419 419 } list_info_hold_t; 420 420 421 - /* Rx descriptor structure */ 421 + /* Rx descriptor structure for 1 buffer mode */ 422 422 typedef struct _RxD_t { 423 423 u64 Host_Control; /* reserved for host */ 424 424 u64 Control_1; ··· 439 439 #define SET_RXD_MARKER vBIT(THE_RXD_MARK, 0, 2) 440 440 #define GET_RXD_MARKER(ctrl) ((ctrl & SET_RXD_MARKER) >> 62) 441 441 442 - #ifndef CONFIG_2BUFF_MODE 443 - #define MASK_BUFFER0_SIZE vBIT(0x3FFF,2,14) 444 - #define SET_BUFFER0_SIZE(val) vBIT(val,2,14) 445 - #else 446 - #define MASK_BUFFER0_SIZE vBIT(0xFF,2,14) 447 - #define MASK_BUFFER1_SIZE vBIT(0xFFFF,16,16) 448 - #define MASK_BUFFER2_SIZE vBIT(0xFFFF,32,16) 449 - #define SET_BUFFER0_SIZE(val) vBIT(val,8,8) 450 - #define SET_BUFFER1_SIZE(val) vBIT(val,16,16) 451 - #define SET_BUFFER2_SIZE(val) vBIT(val,32,16) 452 - #endif 453 - 454 442 #define MASK_VLAN_TAG vBIT(0xFFFF,48,16) 455 443 #define SET_VLAN_TAG(val) vBIT(val,48,16) 456 444 #define SET_NUM_TAG(val) vBIT(val,16,32) 457 445 458 - #ifndef CONFIG_2BUFF_MODE 459 - #define RXD_GET_BUFFER0_SIZE(Control_2) (u64)((Control_2 & vBIT(0x3FFF,2,14))) 460 - #else 461 - #define RXD_GET_BUFFER0_SIZE(Control_2) (u8)((Control_2 & MASK_BUFFER0_SIZE) \ 462 - >> 48) 463 - #define RXD_GET_BUFFER1_SIZE(Control_2) (u16)((Control_2 & MASK_BUFFER1_SIZE) \ 464 - >> 32) 465 - #define RXD_GET_BUFFER2_SIZE(Control_2) (u16)((Control_2 & MASK_BUFFER2_SIZE) \ 466 - >> 16) 446 + 447 + } RxD_t; 448 + /* Rx descriptor structure for 1 buffer mode */ 449 + typedef struct _RxD1_t { 450 + struct _RxD_t h; 451 + 452 + #define MASK_BUFFER0_SIZE_1 vBIT(0x3FFF,2,14) 453 + #define SET_BUFFER0_SIZE_1(val) vBIT(val,2,14) 454 + #define RXD_GET_BUFFER0_SIZE_1(_Control_2) \ 455 + (u16)((_Control_2 & MASK_BUFFER0_SIZE_1) >> 48) 456 + u64 Buffer0_ptr; 457 + } RxD1_t; 458 + /* Rx descriptor structure for 3 or 2 buffer mode */ 459 + 460 + typedef struct _RxD3_t { 461 + struct _RxD_t h; 462 + 463 + #define MASK_BUFFER0_SIZE_3 vBIT(0xFF,2,14) 464 + #define MASK_BUFFER1_SIZE_3 vBIT(0xFFFF,16,16) 465 + #define MASK_BUFFER2_SIZE_3 vBIT(0xFFFF,32,16) 466 + #define SET_BUFFER0_SIZE_3(val) vBIT(val,8,8) 467 + #define SET_BUFFER1_SIZE_3(val) vBIT(val,16,16) 468 + #define SET_BUFFER2_SIZE_3(val) vBIT(val,32,16) 469 + #define RXD_GET_BUFFER0_SIZE_3(Control_2) \ 470 + (u8)((Control_2 & MASK_BUFFER0_SIZE_3) >> 48) 471 + #define RXD_GET_BUFFER1_SIZE_3(Control_2) \ 472 + (u16)((Control_2 & MASK_BUFFER1_SIZE_3) >> 32) 473 + #define RXD_GET_BUFFER2_SIZE_3(Control_2) \ 474 + (u16)((Control_2 & MASK_BUFFER2_SIZE_3) >> 16) 467 475 #define BUF0_LEN 40 468 476 #define BUF1_LEN 1 469 - #endif 470 477 471 478 u64 Buffer0_ptr; 472 - #ifdef CONFIG_2BUFF_MODE 473 479 u64 Buffer1_ptr; 474 480 u64 Buffer2_ptr; 475 - #endif 476 - } RxD_t; 481 + } RxD3_t; 482 + 477 483 478 484 /* Structure that represents the Rx descriptor block which contains 479 485 * 128 Rx descriptors. 480 486 */ 481 - #ifndef CONFIG_2BUFF_MODE 482 487 typedef struct _RxD_block { 483 - #define MAX_RXDS_PER_BLOCK 127 484 - RxD_t rxd[MAX_RXDS_PER_BLOCK]; 488 + #define MAX_RXDS_PER_BLOCK_1 127 489 + RxD1_t rxd[MAX_RXDS_PER_BLOCK_1]; 485 490 486 491 u64 reserved_0; 487 492 #define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL ··· 497 492 * the upper 32 bits should 498 493 * be 0 */ 499 494 } RxD_block_t; 500 - #else 501 - typedef struct _RxD_block { 502 - #define MAX_RXDS_PER_BLOCK 85 503 - RxD_t rxd[MAX_RXDS_PER_BLOCK]; 504 495 505 - #define END_OF_BLOCK 0xFEFFFFFFFFFFFFFFULL 506 - u64 reserved_1; /* 0xFEFFFFFFFFFFFFFF to mark last Rxd 507 - * in this blk */ 508 - u64 pNext_RxD_Blk_physical; /* Phy ponter to next blk. */ 509 - } RxD_block_t; 510 496 #define SIZE_OF_BLOCK 4096 497 + 498 + #define RXD_MODE_1 0 499 + #define RXD_MODE_3A 1 500 + #define RXD_MODE_3B 2 511 501 512 502 /* Structure to hold virtual addresses of Buf0 and Buf1 in 513 503 * 2buf mode. */ ··· 512 512 void *ba_0; 513 513 void *ba_1; 514 514 } buffAdd_t; 515 - #endif 516 515 517 516 /* Structure which stores all the MAC control parameters */ 518 517 ··· 538 539 539 540 typedef tx_curr_get_info_t tx_curr_put_info_t; 540 541 542 + 543 + typedef struct rxd_info { 544 + void *virt_addr; 545 + dma_addr_t dma_addr; 546 + }rxd_info_t; 547 + 541 548 /* Structure that holds the Phy and virt addresses of the Blocks */ 542 549 typedef struct rx_block_info { 543 - RxD_t *block_virt_addr; 550 + void *block_virt_addr; 544 551 dma_addr_t block_dma_addr; 552 + rxd_info_t *rxds; 545 553 } rx_block_info_t; 546 554 547 555 /* pre declaration of the nic structure */ ··· 584 578 int put_pos; 585 579 #endif 586 580 587 - #ifdef CONFIG_2BUFF_MODE 588 581 /* Buffer Address store. */ 589 582 buffAdd_t **ba; 590 - #endif 591 583 nic_t *nic; 592 584 } ring_info_t; 593 585 ··· 651 647 652 648 /* Default Tunable parameters of the NIC. */ 653 649 #define DEFAULT_FIFO_LEN 4096 654 - #define SMALL_RXD_CNT 30 * (MAX_RXDS_PER_BLOCK+1) 655 - #define LARGE_RXD_CNT 100 * (MAX_RXDS_PER_BLOCK+1) 656 650 #define SMALL_BLK_CNT 30 657 651 #define LARGE_BLK_CNT 100 658 652 ··· 680 678 681 679 /* Structure representing one instance of the NIC */ 682 680 struct s2io_nic { 681 + int rxd_mode; 683 682 #ifdef CONFIG_S2IO_NAPI 684 683 /* 685 684 * Count of packets to be processed in a given iteration, it will be indicated