Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

S2io: Enable multi ring support

- Seperate ring specific data
- Initialize all configured rings with equal priority.
- Updated boundary check for number of Rings.
- Updated per ring statistics of rx_bytes and rx_packets.
- Moved lro struct from struct s2io_nic to struct ring_info.
- Access respective rx ring directly in fill_rx_buffers.
- Moved rx_bufs_left struct s2io_nic to struct ring_info.
- Added per ring variables - rxd_mode, rxd_count, dev, pdev.

Signed-off-by: Surjit Reang <surjit.reang@neterion.com>
Signed-off-by: Sreenivasa Honnur <sreenivasa.honnur@neterion.com>
Signed-off-by: Ramkrishna Vepa <ram.vepa@neterion.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>

authored by

Sreenivasa Honnur and committed by
Jeff Garzik
0425b46a dfd44151

+226 -191
+173 -162
drivers/net/s2io.c
··· 809 809 config->rx_cfg[i].num_rxd - 1; 810 810 mac_control->rings[i].nic = nic; 811 811 mac_control->rings[i].ring_no = i; 812 + mac_control->rings[i].lro = lro_enable; 812 813 813 814 blk_cnt = config->rx_cfg[i].num_rxd / 814 815 (rxd_count[nic->rxd_mode] + 1); ··· 1561 1560 writeq(val64, &bar0->tx_fifo_partition_0); 1562 1561 1563 1562 /* Filling the Rx round robin registers as per the 1564 - * number of Rings and steering based on QoS. 1565 - */ 1563 + * number of Rings and steering based on QoS with 1564 + * equal priority. 1565 + */ 1566 1566 switch (config->rx_ring_num) { 1567 1567 case 1: 1568 + val64 = 0x0; 1569 + writeq(val64, &bar0->rx_w_round_robin_0); 1570 + writeq(val64, &bar0->rx_w_round_robin_1); 1571 + writeq(val64, &bar0->rx_w_round_robin_2); 1572 + writeq(val64, &bar0->rx_w_round_robin_3); 1573 + writeq(val64, &bar0->rx_w_round_robin_4); 1574 + 1568 1575 val64 = 0x8080808080808080ULL; 1569 1576 writeq(val64, &bar0->rts_qos_steering); 1570 1577 break; 1571 1578 case 2: 1572 - val64 = 0x0000010000010000ULL; 1579 + val64 = 0x0001000100010001ULL; 1573 1580 writeq(val64, &bar0->rx_w_round_robin_0); 1574 - val64 = 0x0100000100000100ULL; 1575 1581 writeq(val64, &bar0->rx_w_round_robin_1); 1576 - val64 = 0x0001000001000001ULL; 1577 1582 writeq(val64, &bar0->rx_w_round_robin_2); 1578 - val64 = 0x0000010000010000ULL; 1579 1583 writeq(val64, &bar0->rx_w_round_robin_3); 1580 - val64 = 0x0100000000000000ULL; 1584 + val64 = 0x0001000100000000ULL; 1581 1585 writeq(val64, &bar0->rx_w_round_robin_4); 1582 1586 1583 1587 val64 = 0x8080808040404040ULL; 1584 1588 writeq(val64, &bar0->rts_qos_steering); 1585 1589 break; 1586 1590 case 3: 1587 - val64 = 0x0001000102000001ULL; 1591 + val64 = 0x0001020001020001ULL; 1588 1592 writeq(val64, &bar0->rx_w_round_robin_0); 1589 - val64 = 0x0001020000010001ULL; 1593 + val64 = 0x0200010200010200ULL; 1590 1594 writeq(val64, &bar0->rx_w_round_robin_1); 1591 - val64 = 0x0200000100010200ULL; 1595 + val64 = 0x0102000102000102ULL; 1592 1596 writeq(val64, &bar0->rx_w_round_robin_2); 1593 - val64 = 0x0001000102000001ULL; 1597 + val64 = 0x0001020001020001ULL; 1594 1598 writeq(val64, &bar0->rx_w_round_robin_3); 1595 - val64 = 0x0001020000000000ULL; 1599 + val64 = 0x0200010200000000ULL; 1596 1600 writeq(val64, &bar0->rx_w_round_robin_4); 1597 1601 1598 1602 val64 = 0x8080804040402020ULL; 1599 1603 writeq(val64, &bar0->rts_qos_steering); 1600 1604 break; 1601 1605 case 4: 1602 - val64 = 0x0001020300010200ULL; 1606 + val64 = 0x0001020300010203ULL; 1603 1607 writeq(val64, &bar0->rx_w_round_robin_0); 1604 - val64 = 0x0100000102030001ULL; 1605 1608 writeq(val64, &bar0->rx_w_round_robin_1); 1606 - val64 = 0x0200010000010203ULL; 1607 1609 writeq(val64, &bar0->rx_w_round_robin_2); 1608 - val64 = 0x0001020001000001ULL; 1609 1610 writeq(val64, &bar0->rx_w_round_robin_3); 1610 - val64 = 0x0203000100000000ULL; 1611 + val64 = 0x0001020300000000ULL; 1611 1612 writeq(val64, &bar0->rx_w_round_robin_4); 1612 1613 1613 1614 val64 = 0x8080404020201010ULL; 1614 1615 writeq(val64, &bar0->rts_qos_steering); 1615 1616 break; 1616 1617 case 5: 1617 - val64 = 0x0001000203000102ULL; 1618 + val64 = 0x0001020304000102ULL; 1618 1619 writeq(val64, &bar0->rx_w_round_robin_0); 1619 - val64 = 0x0001020001030004ULL; 1620 + val64 = 0x0304000102030400ULL; 1620 1621 writeq(val64, &bar0->rx_w_round_robin_1); 1621 - val64 = 0x0001000203000102ULL; 1622 + val64 = 0x0102030400010203ULL; 1622 1623 writeq(val64, &bar0->rx_w_round_robin_2); 1623 - val64 = 0x0001020001030004ULL; 1624 + val64 = 0x0400010203040001ULL; 1624 1625 writeq(val64, &bar0->rx_w_round_robin_3); 1625 - val64 = 0x0001000000000000ULL; 1626 + val64 = 0x0203040000000000ULL; 1626 1627 writeq(val64, &bar0->rx_w_round_robin_4); 1627 1628 1628 1629 val64 = 0x8080404020201008ULL; 1629 1630 writeq(val64, &bar0->rts_qos_steering); 1630 1631 break; 1631 1632 case 6: 1632 - val64 = 0x0001020304000102ULL; 1633 + val64 = 0x0001020304050001ULL; 1633 1634 writeq(val64, &bar0->rx_w_round_robin_0); 1634 - val64 = 0x0304050001020001ULL; 1635 + val64 = 0x0203040500010203ULL; 1635 1636 writeq(val64, &bar0->rx_w_round_robin_1); 1636 - val64 = 0x0203000100000102ULL; 1637 + val64 = 0x0405000102030405ULL; 1637 1638 writeq(val64, &bar0->rx_w_round_robin_2); 1638 - val64 = 0x0304000102030405ULL; 1639 + val64 = 0x0001020304050001ULL; 1639 1640 writeq(val64, &bar0->rx_w_round_robin_3); 1640 - val64 = 0x0001000200000000ULL; 1641 + val64 = 0x0203040500000000ULL; 1641 1642 writeq(val64, &bar0->rx_w_round_robin_4); 1642 1643 1643 1644 val64 = 0x8080404020100804ULL; 1644 1645 writeq(val64, &bar0->rts_qos_steering); 1645 1646 break; 1646 1647 case 7: 1647 - val64 = 0x0001020001020300ULL; 1648 + val64 = 0x0001020304050600ULL; 1648 1649 writeq(val64, &bar0->rx_w_round_robin_0); 1649 - val64 = 0x0102030400010203ULL; 1650 + val64 = 0x0102030405060001ULL; 1650 1651 writeq(val64, &bar0->rx_w_round_robin_1); 1651 - val64 = 0x0405060001020001ULL; 1652 + val64 = 0x0203040506000102ULL; 1652 1653 writeq(val64, &bar0->rx_w_round_robin_2); 1653 - val64 = 0x0304050000010200ULL; 1654 + val64 = 0x0304050600010203ULL; 1654 1655 writeq(val64, &bar0->rx_w_round_robin_3); 1655 - val64 = 0x0102030000000000ULL; 1656 + val64 = 0x0405060000000000ULL; 1656 1657 writeq(val64, &bar0->rx_w_round_robin_4); 1657 1658 1658 1659 val64 = 0x8080402010080402ULL; 1659 1660 writeq(val64, &bar0->rts_qos_steering); 1660 1661 break; 1661 1662 case 8: 1662 - val64 = 0x0001020300040105ULL; 1663 + val64 = 0x0001020304050607ULL; 1663 1664 writeq(val64, &bar0->rx_w_round_robin_0); 1664 - val64 = 0x0200030106000204ULL; 1665 1665 writeq(val64, &bar0->rx_w_round_robin_1); 1666 - val64 = 0x0103000502010007ULL; 1667 1666 writeq(val64, &bar0->rx_w_round_robin_2); 1668 - val64 = 0x0304010002060500ULL; 1669 1667 writeq(val64, &bar0->rx_w_round_robin_3); 1670 - val64 = 0x0103020400000000ULL; 1668 + val64 = 0x0001020300000000ULL; 1671 1669 writeq(val64, &bar0->rx_w_round_robin_4); 1672 1670 1673 1671 val64 = 0x8040201008040201ULL; ··· 2499 2499 2500 2500 /** 2501 2501 * fill_rx_buffers - Allocates the Rx side skbs 2502 - * @nic: device private variable 2503 - * @ring_no: ring number 2502 + * @ring_info: per ring structure 2504 2503 * Description: 2505 2504 * The function allocates Rx side skbs and puts the physical 2506 2505 * address of these buffers into the RxD buffer pointers, so that the NIC ··· 2517 2518 * SUCCESS on success or an appropriate -ve value on failure. 2518 2519 */ 2519 2520 2520 - static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) 2521 + static int fill_rx_buffers(struct ring_info *ring) 2521 2522 { 2522 - struct net_device *dev = nic->dev; 2523 2523 struct sk_buff *skb; 2524 2524 struct RxD_t *rxdp; 2525 - int off, off1, size, block_no, block_no1; 2525 + int off, size, block_no, block_no1; 2526 2526 u32 alloc_tab = 0; 2527 2527 u32 alloc_cnt; 2528 - struct mac_info *mac_control; 2529 - struct config_param *config; 2530 2528 u64 tmp; 2531 2529 struct buffAdd *ba; 2532 2530 struct RxD_t *first_rxdp = NULL; 2533 2531 u64 Buffer0_ptr = 0, Buffer1_ptr = 0; 2532 + int rxd_index = 0; 2534 2533 struct RxD1 *rxdp1; 2535 2534 struct RxD3 *rxdp3; 2536 - struct swStat *stats = &nic->mac_control.stats_info->sw_stat; 2535 + struct swStat *stats = &ring->nic->mac_control.stats_info->sw_stat; 2537 2536 2538 - mac_control = &nic->mac_control; 2539 - config = &nic->config; 2540 - alloc_cnt = mac_control->rings[ring_no].pkt_cnt - 2541 - atomic_read(&nic->rx_bufs_left[ring_no]); 2537 + alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left; 2542 2538 2543 - block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index; 2544 - off1 = mac_control->rings[ring_no].rx_curr_get_info.offset; 2539 + block_no1 = ring->rx_curr_get_info.block_index; 2545 2540 while (alloc_tab < alloc_cnt) { 2546 - block_no = mac_control->rings[ring_no].rx_curr_put_info. 2547 - block_index; 2548 - off = mac_control->rings[ring_no].rx_curr_put_info.offset; 2541 + block_no = ring->rx_curr_put_info.block_index; 2549 2542 2550 - rxdp = mac_control->rings[ring_no]. 2551 - rx_blocks[block_no].rxds[off].virt_addr; 2543 + off = ring->rx_curr_put_info.offset; 2552 2544 2553 - if ((block_no == block_no1) && (off == off1) && 2554 - (rxdp->Host_Control)) { 2545 + rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr; 2546 + 2547 + rxd_index = off + 1; 2548 + if (block_no) 2549 + rxd_index += (block_no * ring->rxd_count); 2550 + 2551 + if ((block_no == block_no1) && 2552 + (off == ring->rx_curr_get_info.offset) && 2553 + (rxdp->Host_Control)) { 2555 2554 DBG_PRINT(INTR_DBG, "%s: Get and Put", 2556 - dev->name); 2555 + ring->dev->name); 2557 2556 DBG_PRINT(INTR_DBG, " info equated\n"); 2558 2557 goto end; 2559 2558 } 2560 - if (off && (off == rxd_count[nic->rxd_mode])) { 2561 - mac_control->rings[ring_no].rx_curr_put_info. 2562 - block_index++; 2563 - if (mac_control->rings[ring_no].rx_curr_put_info. 2564 - block_index == mac_control->rings[ring_no]. 2565 - block_count) 2566 - mac_control->rings[ring_no].rx_curr_put_info. 2567 - block_index = 0; 2568 - block_no = mac_control->rings[ring_no]. 2569 - rx_curr_put_info.block_index; 2570 - if (off == rxd_count[nic->rxd_mode]) 2571 - off = 0; 2572 - mac_control->rings[ring_no].rx_curr_put_info. 2573 - offset = off; 2574 - rxdp = mac_control->rings[ring_no]. 2575 - rx_blocks[block_no].block_virt_addr; 2559 + if (off && (off == ring->rxd_count)) { 2560 + ring->rx_curr_put_info.block_index++; 2561 + if (ring->rx_curr_put_info.block_index == 2562 + ring->block_count) 2563 + ring->rx_curr_put_info.block_index = 0; 2564 + block_no = ring->rx_curr_put_info.block_index; 2565 + off = 0; 2566 + ring->rx_curr_put_info.offset = off; 2567 + rxdp = ring->rx_blocks[block_no].block_virt_addr; 2576 2568 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n", 2577 - dev->name, rxdp); 2569 + ring->dev->name, rxdp); 2570 + 2578 2571 } 2579 2572 2580 2573 if ((rxdp->Control_1 & RXD_OWN_XENA) && 2581 - ((nic->rxd_mode == RXD_MODE_3B) && 2574 + ((ring->rxd_mode == RXD_MODE_3B) && 2582 2575 (rxdp->Control_2 & s2BIT(0)))) { 2583 - mac_control->rings[ring_no].rx_curr_put_info. 2584 - offset = off; 2576 + ring->rx_curr_put_info.offset = off; 2585 2577 goto end; 2586 2578 } 2587 2579 /* calculate size of skb based on ring mode */ 2588 - size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE + 2580 + size = ring->mtu + HEADER_ETHERNET_II_802_3_SIZE + 2589 2581 HEADER_802_2_SIZE + HEADER_SNAP_SIZE; 2590 - if (nic->rxd_mode == RXD_MODE_1) 2582 + if (ring->rxd_mode == RXD_MODE_1) 2591 2583 size += NET_IP_ALIGN; 2592 2584 else 2593 - size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4; 2585 + size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4; 2594 2586 2595 2587 /* allocate skb */ 2596 2588 skb = dev_alloc_skb(size); 2597 2589 if(!skb) { 2598 - DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name); 2590 + DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name); 2599 2591 DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n"); 2600 2592 if (first_rxdp) { 2601 2593 wmb(); 2602 2594 first_rxdp->Control_1 |= RXD_OWN_XENA; 2603 2595 } 2604 - nic->mac_control.stats_info->sw_stat. \ 2605 - mem_alloc_fail_cnt++; 2596 + stats->mem_alloc_fail_cnt++; 2597 + 2606 2598 return -ENOMEM ; 2607 2599 } 2608 - nic->mac_control.stats_info->sw_stat.mem_allocated 2609 - += skb->truesize; 2610 - if (nic->rxd_mode == RXD_MODE_1) { 2600 + stats->mem_allocated += skb->truesize; 2601 + 2602 + if (ring->rxd_mode == RXD_MODE_1) { 2611 2603 /* 1 buffer mode - normal operation mode */ 2612 2604 rxdp1 = (struct RxD1*)rxdp; 2613 2605 memset(rxdp, 0, sizeof(struct RxD1)); 2614 2606 skb_reserve(skb, NET_IP_ALIGN); 2615 2607 rxdp1->Buffer0_ptr = pci_map_single 2616 - (nic->pdev, skb->data, size - NET_IP_ALIGN, 2608 + (ring->pdev, skb->data, size - NET_IP_ALIGN, 2617 2609 PCI_DMA_FROMDEVICE); 2618 2610 if( (rxdp1->Buffer0_ptr == 0) || 2619 2611 (rxdp1->Buffer0_ptr == ··· 2613 2623 2614 2624 rxdp->Control_2 = 2615 2625 SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); 2616 - 2617 - } else if (nic->rxd_mode == RXD_MODE_3B) { 2626 + rxdp->Host_Control = (unsigned long) (skb); 2627 + } else if (ring->rxd_mode == RXD_MODE_3B) { 2618 2628 /* 2619 2629 * 2 buffer mode - 2620 2630 * 2 buffer mode provides 128 ··· 2630 2640 rxdp3->Buffer0_ptr = Buffer0_ptr; 2631 2641 rxdp3->Buffer1_ptr = Buffer1_ptr; 2632 2642 2633 - ba = &mac_control->rings[ring_no].ba[block_no][off]; 2643 + ba = &ring->ba[block_no][off]; 2634 2644 skb_reserve(skb, BUF0_LEN); 2635 2645 tmp = (u64)(unsigned long) skb->data; 2636 2646 tmp += ALIGN_SIZE; ··· 2640 2650 2641 2651 if (!(rxdp3->Buffer0_ptr)) 2642 2652 rxdp3->Buffer0_ptr = 2643 - pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, 2644 - PCI_DMA_FROMDEVICE); 2653 + pci_map_single(ring->pdev, ba->ba_0, 2654 + BUF0_LEN, PCI_DMA_FROMDEVICE); 2645 2655 else 2646 - pci_dma_sync_single_for_device(nic->pdev, 2656 + pci_dma_sync_single_for_device(ring->pdev, 2647 2657 (dma_addr_t) rxdp3->Buffer0_ptr, 2648 2658 BUF0_LEN, PCI_DMA_FROMDEVICE); 2649 2659 if( (rxdp3->Buffer0_ptr == 0) || ··· 2651 2661 goto pci_map_failed; 2652 2662 2653 2663 rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); 2654 - if (nic->rxd_mode == RXD_MODE_3B) { 2664 + if (ring->rxd_mode == RXD_MODE_3B) { 2655 2665 /* Two buffer mode */ 2656 2666 2657 2667 /* ··· 2659 2669 * L4 payload 2660 2670 */ 2661 2671 rxdp3->Buffer2_ptr = pci_map_single 2662 - (nic->pdev, skb->data, dev->mtu + 4, 2672 + (ring->pdev, skb->data, ring->mtu + 4, 2663 2673 PCI_DMA_FROMDEVICE); 2664 2674 2665 2675 if( (rxdp3->Buffer2_ptr == 0) || 2666 2676 (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) 2667 2677 goto pci_map_failed; 2668 2678 2669 - rxdp3->Buffer1_ptr = 2670 - pci_map_single(nic->pdev, 2679 + if (!rxdp3->Buffer1_ptr) 2680 + rxdp3->Buffer1_ptr = 2681 + pci_map_single(ring->pdev, 2671 2682 ba->ba_1, BUF1_LEN, 2672 2683 PCI_DMA_FROMDEVICE); 2684 + 2673 2685 if( (rxdp3->Buffer1_ptr == 0) || 2674 2686 (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) { 2675 2687 pci_unmap_single 2676 - (nic->pdev, 2677 - (dma_addr_t)rxdp3->Buffer2_ptr, 2678 - dev->mtu + 4, 2688 + (ring->pdev, 2689 + (dma_addr_t)(unsigned long) 2690 + skb->data, 2691 + ring->mtu + 4, 2679 2692 PCI_DMA_FROMDEVICE); 2680 2693 goto pci_map_failed; 2681 2694 } 2682 2695 rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); 2683 2696 rxdp->Control_2 |= SET_BUFFER2_SIZE_3 2684 - (dev->mtu + 4); 2697 + (ring->mtu + 4); 2685 2698 } 2686 2699 rxdp->Control_2 |= s2BIT(0); 2700 + rxdp->Host_Control = (unsigned long) (skb); 2687 2701 } 2688 - rxdp->Host_Control = (unsigned long) (skb); 2689 2702 if (alloc_tab & ((1 << rxsync_frequency) - 1)) 2690 2703 rxdp->Control_1 |= RXD_OWN_XENA; 2691 2704 off++; 2692 - if (off == (rxd_count[nic->rxd_mode] + 1)) 2705 + if (off == (ring->rxd_count + 1)) 2693 2706 off = 0; 2694 - mac_control->rings[ring_no].rx_curr_put_info.offset = off; 2707 + ring->rx_curr_put_info.offset = off; 2695 2708 2696 2709 rxdp->Control_2 |= SET_RXD_MARKER; 2697 2710 if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) { ··· 2704 2711 } 2705 2712 first_rxdp = rxdp; 2706 2713 } 2707 - atomic_inc(&nic->rx_bufs_left[ring_no]); 2714 + ring->rx_bufs_left += 1; 2708 2715 alloc_tab++; 2709 2716 } 2710 2717 ··· 2776 2783 } 2777 2784 sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize; 2778 2785 dev_kfree_skb(skb); 2779 - atomic_dec(&sp->rx_bufs_left[ring_no]); 2786 + mac_control->rings[ring_no].rx_bufs_left -= 1; 2780 2787 } 2781 2788 } 2782 2789 ··· 2807 2814 mac_control->rings[i].rx_curr_get_info.block_index = 0; 2808 2815 mac_control->rings[i].rx_curr_put_info.offset = 0; 2809 2816 mac_control->rings[i].rx_curr_get_info.offset = 0; 2810 - atomic_set(&sp->rx_bufs_left[i], 0); 2817 + mac_control->rings[i].rx_bufs_left = 0; 2811 2818 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n", 2812 2819 dev->name, buf_cnt, i); 2813 2820 } ··· 2857 2864 netif_rx_complete(dev, napi); 2858 2865 2859 2866 for (i = 0; i < config->rx_ring_num; i++) { 2860 - if (fill_rx_buffers(nic, i) == -ENOMEM) { 2867 + if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { 2861 2868 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); 2862 2869 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); 2863 2870 break; ··· 2870 2877 2871 2878 no_rx: 2872 2879 for (i = 0; i < config->rx_ring_num; i++) { 2873 - if (fill_rx_buffers(nic, i) == -ENOMEM) { 2880 + if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { 2874 2881 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); 2875 2882 DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); 2876 2883 break; ··· 2921 2928 rx_intr_handler(&mac_control->rings[i]); 2922 2929 2923 2930 for (i = 0; i < config->rx_ring_num; i++) { 2924 - if (fill_rx_buffers(nic, i) == -ENOMEM) { 2931 + if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { 2925 2932 DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); 2926 2933 DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n"); 2927 2934 break; ··· 2946 2953 */ 2947 2954 static void rx_intr_handler(struct ring_info *ring_data) 2948 2955 { 2949 - struct s2io_nic *nic = ring_data->nic; 2950 - struct net_device *dev = (struct net_device *) nic->dev; 2951 2956 int get_block, put_block; 2952 2957 struct rx_curr_get_info get_info, put_info; 2953 2958 struct RxD_t *rxdp; ··· 2968 2977 */ 2969 2978 if ((get_block == put_block) && 2970 2979 (get_info.offset + 1) == put_info.offset) { 2971 - DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name); 2980 + DBG_PRINT(INTR_DBG, "%s: Ring Full\n", 2981 + ring_data->dev->name); 2972 2982 break; 2973 2983 } 2974 2984 skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); 2975 2985 if (skb == NULL) { 2976 2986 DBG_PRINT(ERR_DBG, "%s: The skb is ", 2977 - dev->name); 2987 + ring_data->dev->name); 2978 2988 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); 2979 2989 return; 2980 2990 } 2981 - if (nic->rxd_mode == RXD_MODE_1) { 2991 + if (ring_data->rxd_mode == RXD_MODE_1) { 2982 2992 rxdp1 = (struct RxD1*)rxdp; 2983 - pci_unmap_single(nic->pdev, (dma_addr_t) 2993 + pci_unmap_single(ring_data->pdev, (dma_addr_t) 2984 2994 rxdp1->Buffer0_ptr, 2985 - dev->mtu + 2995 + ring_data->mtu + 2986 2996 HEADER_ETHERNET_II_802_3_SIZE + 2987 2997 HEADER_802_2_SIZE + 2988 2998 HEADER_SNAP_SIZE, 2989 2999 PCI_DMA_FROMDEVICE); 2990 - } else if (nic->rxd_mode == RXD_MODE_3B) { 3000 + } else if (ring_data->rxd_mode == RXD_MODE_3B) { 2991 3001 rxdp3 = (struct RxD3*)rxdp; 2992 - pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) 3002 + pci_dma_sync_single_for_cpu(ring_data->pdev, (dma_addr_t) 2993 3003 rxdp3->Buffer0_ptr, 2994 3004 BUF0_LEN, PCI_DMA_FROMDEVICE); 2995 - pci_unmap_single(nic->pdev, (dma_addr_t) 3005 + pci_unmap_single(ring_data->pdev, (dma_addr_t) 2996 3006 rxdp3->Buffer2_ptr, 2997 - dev->mtu + 4, 3007 + ring_data->mtu + 4, 2998 3008 PCI_DMA_FROMDEVICE); 2999 3009 } 3000 3010 prefetch(skb->data); ··· 3004 3012 ring_data->rx_curr_get_info.offset = get_info.offset; 3005 3013 rxdp = ring_data->rx_blocks[get_block]. 3006 3014 rxds[get_info.offset].virt_addr; 3007 - if (get_info.offset == rxd_count[nic->rxd_mode]) { 3015 + if (get_info.offset == rxd_count[ring_data->rxd_mode]) { 3008 3016 get_info.offset = 0; 3009 3017 ring_data->rx_curr_get_info.offset = get_info.offset; 3010 3018 get_block++; ··· 3014 3022 rxdp = ring_data->rx_blocks[get_block].block_virt_addr; 3015 3023 } 3016 3024 3017 - nic->pkts_to_process -= 1; 3018 - if ((napi) && (!nic->pkts_to_process)) 3019 - break; 3025 + if(ring_data->nic->config.napi){ 3026 + ring_data->nic->pkts_to_process -= 1; 3027 + if (!ring_data->nic->pkts_to_process) 3028 + break; 3029 + } 3020 3030 pkt_cnt++; 3021 3031 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts)) 3022 3032 break; 3023 3033 } 3024 - if (nic->lro) { 3034 + if (ring_data->lro) { 3025 3035 /* Clear all LRO sessions before exiting */ 3026 3036 for (i=0; i<MAX_LRO_SESSIONS; i++) { 3027 - struct lro *lro = &nic->lro0_n[i]; 3037 + struct lro *lro = &ring_data->lro0_n[i]; 3028 3038 if (lro->in_use) { 3029 - update_L3L4_header(nic, lro); 3039 + update_L3L4_header(ring_data->nic, lro); 3030 3040 queue_rx_frame(lro->parent, lro->vlan_tag); 3031 3041 clear_lro_session(lro); 3032 3042 } ··· 4327 4333 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); 4328 4334 } 4329 4335 4330 - static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n) 4336 + static int s2io_chk_rx_buffers(struct ring_info *ring) 4331 4337 { 4332 - if (fill_rx_buffers(sp, rng_n) == -ENOMEM) { 4333 - DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name); 4338 + if (fill_rx_buffers(ring) == -ENOMEM) { 4339 + DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); 4334 4340 DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); 4335 4341 } 4336 4342 return 0; ··· 4345 4351 return IRQ_HANDLED; 4346 4352 4347 4353 rx_intr_handler(ring); 4348 - s2io_chk_rx_buffers(sp, ring->ring_no); 4354 + s2io_chk_rx_buffers(ring); 4349 4355 4350 4356 return IRQ_HANDLED; 4351 4357 } ··· 4803 4809 */ 4804 4810 if (!config->napi) { 4805 4811 for (i = 0; i < config->rx_ring_num; i++) 4806 - s2io_chk_rx_buffers(sp, i); 4812 + s2io_chk_rx_buffers(&mac_control->rings[i]); 4807 4813 } 4808 4814 writeq(sp->general_int_mask, &bar0->general_int_mask); 4809 4815 readl(&bar0->general_int_status); ··· 4860 4866 struct s2io_nic *sp = dev->priv; 4861 4867 struct mac_info *mac_control; 4862 4868 struct config_param *config; 4869 + int i; 4863 4870 4864 4871 4865 4872 mac_control = &sp->mac_control; ··· 4879 4884 le32_to_cpu(mac_control->stats_info->rmac_vld_mcst_frms); 4880 4885 sp->stats.rx_length_errors = 4881 4886 le64_to_cpu(mac_control->stats_info->rmac_long_frms); 4887 + 4888 + /* collect per-ring rx_packets and rx_bytes */ 4889 + sp->stats.rx_packets = sp->stats.rx_bytes = 0; 4890 + for (i = 0; i < config->rx_ring_num; i++) { 4891 + sp->stats.rx_packets += mac_control->rings[i].rx_packets; 4892 + sp->stats.rx_bytes += mac_control->rings[i].rx_bytes; 4893 + } 4882 4894 4883 4895 return (&sp->stats); 4884 4896 } ··· 7159 7157 config = &sp->config; 7160 7158 7161 7159 for (i = 0; i < config->rx_ring_num; i++) { 7162 - if ((ret = fill_rx_buffers(sp, i))) { 7160 + mac_control->rings[i].mtu = dev->mtu; 7161 + ret = fill_rx_buffers(&mac_control->rings[i]); 7162 + if (ret) { 7163 7163 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", 7164 7164 dev->name); 7165 7165 s2io_reset(sp); ··· 7169 7165 return -ENOMEM; 7170 7166 } 7171 7167 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i, 7172 - atomic_read(&sp->rx_bufs_left[i])); 7168 + mac_control->rings[i].rx_bufs_left); 7173 7169 } 7174 7170 7175 7171 /* Initialise napi */ ··· 7304 7300 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) 7305 7301 { 7306 7302 struct s2io_nic *sp = ring_data->nic; 7307 - struct net_device *dev = (struct net_device *) sp->dev; 7303 + struct net_device *dev = (struct net_device *) ring_data->dev; 7308 7304 struct sk_buff *skb = (struct sk_buff *) 7309 7305 ((unsigned long) rxdp->Host_Control); 7310 7306 int ring_no = ring_data->ring_no; ··· 7381 7377 sp->mac_control.stats_info->sw_stat.mem_freed 7382 7378 += skb->truesize; 7383 7379 dev_kfree_skb(skb); 7384 - atomic_dec(&sp->rx_bufs_left[ring_no]); 7380 + ring_data->rx_bufs_left -= 1; 7385 7381 rxdp->Host_Control = 0; 7386 7382 return 0; 7387 7383 } 7388 7384 } 7389 7385 7390 7386 /* Updating statistics */ 7391 - sp->stats.rx_packets++; 7387 + ring_data->rx_packets++; 7392 7388 rxdp->Host_Control = 0; 7393 7389 if (sp->rxd_mode == RXD_MODE_1) { 7394 7390 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2); 7395 7391 7396 - sp->stats.rx_bytes += len; 7392 + ring_data->rx_bytes += len; 7397 7393 skb_put(skb, len); 7398 7394 7399 7395 } else if (sp->rxd_mode == RXD_MODE_3B) { ··· 7404 7400 unsigned char *buff = skb_push(skb, buf0_len); 7405 7401 7406 7402 struct buffAdd *ba = &ring_data->ba[get_block][get_off]; 7407 - sp->stats.rx_bytes += buf0_len + buf2_len; 7403 + ring_data->rx_bytes += buf0_len + buf2_len; 7408 7404 memcpy(buff, ba->ba_0, buf0_len); 7409 7405 skb_put(skb, buf2_len); 7410 7406 } 7411 7407 7412 - if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) || 7413 - (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) && 7408 + if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!ring_data->lro) || 7409 + (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) && 7414 7410 (sp->rx_csum)) { 7415 7411 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1); 7416 7412 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1); ··· 7421 7417 * a flag in the RxD. 7422 7418 */ 7423 7419 skb->ip_summed = CHECKSUM_UNNECESSARY; 7424 - if (sp->lro) { 7420 + if (ring_data->lro) { 7425 7421 u32 tcp_len; 7426 7422 u8 *tcp; 7427 7423 int ret = 0; 7428 7424 7429 - ret = s2io_club_tcp_session(skb->data, &tcp, 7430 - &tcp_len, &lro, 7431 - rxdp, sp); 7425 + ret = s2io_club_tcp_session(ring_data, 7426 + skb->data, &tcp, &tcp_len, &lro, 7427 + rxdp, sp); 7432 7428 switch (ret) { 7433 7429 case 3: /* Begin anew */ 7434 7430 lro->parent = skb; ··· 7490 7486 queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2)); 7491 7487 dev->last_rx = jiffies; 7492 7488 aggregate: 7493 - atomic_dec(&sp->rx_bufs_left[ring_no]); 7489 + sp->mac_control.rings[ring_no].rx_bufs_left -= 1; 7494 7490 return SUCCESS; 7495 7491 } 7496 7492 ··· 7607 7603 tx_steering_type = NO_STEERING; 7608 7604 } 7609 7605 7610 - if ( rx_ring_num > 8) { 7611 - DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not " 7606 + if (rx_ring_num > MAX_RX_RINGS) { 7607 + DBG_PRINT(ERR_DBG, "s2io: Requested number of rx rings not " 7612 7608 "supported\n"); 7613 - DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n"); 7614 - rx_ring_num = 8; 7609 + DBG_PRINT(ERR_DBG, "s2io: Default to %d rx rings\n", 7610 + MAX_RX_RINGS); 7611 + rx_ring_num = MAX_RX_RINGS; 7615 7612 } 7613 + 7616 7614 if (*dev_intr_type != INTA) 7617 7615 napi = 0; 7618 7616 ··· 7842 7836 7843 7837 /* Rx side parameters. */ 7844 7838 config->rx_ring_num = rx_ring_num; 7845 - for (i = 0; i < MAX_RX_RINGS; i++) { 7839 + for (i = 0; i < config->rx_ring_num; i++) { 7846 7840 config->rx_cfg[i].num_rxd = rx_ring_sz[i] * 7847 7841 (rxd_count[sp->rxd_mode] + 1); 7848 7842 config->rx_cfg[i].ring_priority = i; 7843 + mac_control->rings[i].rx_bufs_left = 0; 7844 + mac_control->rings[i].rxd_mode = sp->rxd_mode; 7845 + mac_control->rings[i].rxd_count = rxd_count[sp->rxd_mode]; 7846 + mac_control->rings[i].pdev = sp->pdev; 7847 + mac_control->rings[i].dev = sp->dev; 7849 7848 } 7850 7849 7851 7850 for (i = 0; i < rx_ring_num; i++) { ··· 7864 7853 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3; 7865 7854 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7; 7866 7855 7867 - 7868 - /* Initialize Ring buffer parameters. */ 7869 - for (i = 0; i < config->rx_ring_num; i++) 7870 - atomic_set(&sp->rx_bufs_left[i], 0); 7871 7856 7872 7857 /* initialize the shared memory used by the NIC and the host */ 7873 7858 if (init_shared_mem(sp)) { ··· 8083 8076 8084 8077 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, 8085 8078 sp->config.tx_fifo_num); 8079 + 8080 + DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name, 8081 + sp->config.rx_ring_num); 8086 8082 8087 8083 switch(sp->config.intr_type) { 8088 8084 case INTA: ··· 8401 8391 } 8402 8392 8403 8393 static int 8404 - s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro, 8405 - struct RxD_t *rxdp, struct s2io_nic *sp) 8394 + s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp, 8395 + u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp, 8396 + struct s2io_nic *sp) 8406 8397 { 8407 8398 struct iphdr *ip; 8408 8399 struct tcphdr *tcph; ··· 8421 8410 tcph = (struct tcphdr *)*tcp; 8422 8411 *tcp_len = get_l4_pyld_length(ip, tcph); 8423 8412 for (i=0; i<MAX_LRO_SESSIONS; i++) { 8424 - struct lro *l_lro = &sp->lro0_n[i]; 8413 + struct lro *l_lro = &ring_data->lro0_n[i]; 8425 8414 if (l_lro->in_use) { 8426 8415 if (check_for_socket_match(l_lro, ip, tcph)) 8427 8416 continue; ··· 8459 8448 } 8460 8449 8461 8450 for (i=0; i<MAX_LRO_SESSIONS; i++) { 8462 - struct lro *l_lro = &sp->lro0_n[i]; 8451 + struct lro *l_lro = &ring_data->lro0_n[i]; 8463 8452 if (!(l_lro->in_use)) { 8464 8453 *lro = l_lro; 8465 8454 ret = 3; /* Begin anew */
+53 -29
drivers/net/s2io.h
··· 678 678 struct rxd_info *rxds; 679 679 }; 680 680 681 + /* Data structure to represent a LRO session */ 682 + struct lro { 683 + struct sk_buff *parent; 684 + struct sk_buff *last_frag; 685 + u8 *l2h; 686 + struct iphdr *iph; 687 + struct tcphdr *tcph; 688 + u32 tcp_next_seq; 689 + __be32 tcp_ack; 690 + int total_len; 691 + int frags_len; 692 + int sg_num; 693 + int in_use; 694 + __be16 window; 695 + u16 vlan_tag; 696 + u32 cur_tsval; 697 + __be32 cur_tsecr; 698 + u8 saw_ts; 699 + } ____cacheline_aligned; 700 + 681 701 /* Ring specific structure */ 682 702 struct ring_info { 683 703 /* The ring number */ 684 704 int ring_no; 705 + 706 + /* per-ring buffer counter */ 707 + u32 rx_bufs_left; 708 + 709 + #define MAX_LRO_SESSIONS 32 710 + struct lro lro0_n[MAX_LRO_SESSIONS]; 711 + u8 lro; 712 + 713 + /* copy of sp->rxd_mode flag */ 714 + int rxd_mode; 715 + 716 + /* Number of rxds per block for the rxd_mode */ 717 + int rxd_count; 718 + 719 + /* copy of sp pointer */ 720 + struct s2io_nic *nic; 721 + 722 + /* copy of sp->dev pointer */ 723 + struct net_device *dev; 724 + 725 + /* copy of sp->pdev pointer */ 726 + struct pci_dev *pdev; 685 727 686 728 /* 687 729 * Place holders for the virtual and physical addresses of ··· 745 703 */ 746 704 struct rx_curr_get_info rx_curr_get_info; 747 705 706 + /* interface MTU value */ 707 + unsigned mtu; 708 + 748 709 /* Buffer Address store. */ 749 710 struct buffAdd **ba; 750 - struct s2io_nic *nic; 751 - }; 711 + 712 + /* per-Ring statistics */ 713 + unsigned long rx_packets; 714 + unsigned long rx_bytes; 715 + } ____cacheline_aligned; 752 716 753 717 /* Fifo specific structure */ 754 718 struct fifo_info { ··· 861 813 u64 data; 862 814 }; 863 815 864 - /* Data structure to represent a LRO session */ 865 - struct lro { 866 - struct sk_buff *parent; 867 - struct sk_buff *last_frag; 868 - u8 *l2h; 869 - struct iphdr *iph; 870 - struct tcphdr *tcph; 871 - u32 tcp_next_seq; 872 - __be32 tcp_ack; 873 - int total_len; 874 - int frags_len; 875 - int sg_num; 876 - int in_use; 877 - __be16 window; 878 - u16 vlan_tag; 879 - u32 cur_tsval; 880 - __be32 cur_tsecr; 881 - u8 saw_ts; 882 - } ____cacheline_aligned; 883 - 884 816 /* These flags represent the devices temporary state */ 885 817 enum s2io_device_state_t 886 818 { ··· 899 871 900 872 /* Space to back up the PCI config space */ 901 873 u32 config_space[256 / sizeof(u32)]; 902 - 903 - atomic_t rx_bufs_left[MAX_RX_RINGS]; 904 874 905 875 #define PROMISC 1 906 876 #define ALL_MULTI 2 ··· 976 950 #define XFRAME_II_DEVICE 2 977 951 u8 device_type; 978 952 979 - #define MAX_LRO_SESSIONS 32 980 - struct lro lro0_n[MAX_LRO_SESSIONS]; 981 953 unsigned long clubbed_frms_cnt; 982 954 unsigned long sending_both; 983 955 u8 lro; ··· 1142 1118 static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int offset); 1143 1119 static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr); 1144 1120 1145 - static int 1146 - s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro, 1147 - struct RxD_t *rxdp, struct s2io_nic *sp); 1121 + static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, 1122 + u8 **tcp, u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp, 1123 + struct s2io_nic *sp); 1148 1124 static void clear_lro_session(struct lro *lro); 1149 1125 static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag); 1150 1126 static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro);