Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: velocity: Rename vptr->dev to vptr->netdev

Improve the clarity of the code in preparation for converting the
dma functions to generic versions, which require a struct device *.

This makes it possible to store a 'struct device *dev' in the
velocity_info structure.

Signed-off-by: Tony Prisk <linux@prisktech.co.nz>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Tony Prisk and committed by
David S. Miller
a9683c94 4fc1ad6f

+35 -35
+33 -33
drivers/net/ethernet/via/via-velocity.c
··· 998 998 { 999 999 1000 1000 if (vptr->mii_status & VELOCITY_LINK_FAIL) { 1001 - VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->dev->name); 1001 + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: failed to detect cable link\n", vptr->netdev->name); 1002 1002 } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) { 1003 - VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->dev->name); 1003 + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link auto-negotiation", vptr->netdev->name); 1004 1004 1005 1005 if (vptr->mii_status & VELOCITY_SPEED_1000) 1006 1006 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps"); ··· 1014 1014 else 1015 1015 VELOCITY_PRT(MSG_LEVEL_INFO, " half duplex\n"); 1016 1016 } else { 1017 - VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->dev->name); 1017 + VELOCITY_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: Link forced", vptr->netdev->name); 1018 1018 switch (vptr->options.spd_dpx) { 1019 1019 case SPD_DPX_1000_FULL: 1020 1020 VELOCITY_PRT(MSG_LEVEL_INFO, " speed 1000M bps full duplex\n"); ··· 1319 1319 case VELOCITY_INIT_RESET: 1320 1320 case VELOCITY_INIT_WOL: 1321 1321 1322 - netif_stop_queue(vptr->dev); 1322 + netif_stop_queue(vptr->netdev); 1323 1323 1324 1324 /* 1325 1325 * Reset RX to prevent RX pointer not on the 4X location ··· 1332 1332 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { 1333 1333 velocity_print_link_status(vptr); 1334 1334 if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) 1335 - netif_wake_queue(vptr->dev); 1335 + netif_wake_queue(vptr->netdev); 1336 1336 } 1337 1337 1338 1338 enable_flow_control_ability(vptr); ··· 1354 1354 1355 1355 mac_eeprom_reload(regs); 1356 1356 for (i = 0; i < 6; i++) 1357 - writeb(vptr->dev->dev_addr[i], &(regs->PAR[i])); 1357 + writeb(vptr->netdev->dev_addr[i], &(regs->PAR[i])); 1358 1358 1359 1359 /* 1360 1360 * clear Pre_ACPI bit. ··· 1377 1377 /* 1378 1378 * Set packet filter: Receive directed and broadcast address 1379 1379 */ 1380 - velocity_set_multi(vptr->dev); 1380 + velocity_set_multi(vptr->netdev); 1381 1381 1382 1382 /* 1383 1383 * Enable MII auto-polling ··· 1404 1404 writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), &regs->CR0Set); 1405 1405 1406 1406 mii_status = velocity_get_opt_media_mode(vptr); 1407 - netif_stop_queue(vptr->dev); 1407 + netif_stop_queue(vptr->netdev); 1408 1408 1409 1409 mii_init(vptr, mii_status); 1410 1410 1411 1411 if (velocity_set_media_mode(vptr, mii_status) != VELOCITY_LINK_CHANGE) { 1412 1412 velocity_print_link_status(vptr); 1413 1413 if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) 1414 - netif_wake_queue(vptr->dev); 1414 + netif_wake_queue(vptr->netdev); 1415 1415 } 1416 1416 1417 1417 enable_flow_control_ability(vptr); ··· 1474 1474 rx_ring_size, &pool_dma); 1475 1475 if (!pool) { 1476 1476 dev_err(&pdev->dev, "%s : DMA memory allocation failed.\n", 1477 - vptr->dev->name); 1477 + vptr->netdev->name); 1478 1478 return -ENOMEM; 1479 1479 } 1480 1480 ··· 1514 1514 struct rx_desc *rd = &(vptr->rx.ring[idx]); 1515 1515 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); 1516 1516 1517 - rd_info->skb = netdev_alloc_skb(vptr->dev, vptr->rx.buf_sz + 64); 1517 + rd_info->skb = netdev_alloc_skb(vptr->netdev, vptr->rx.buf_sz + 64); 1518 1518 if (rd_info->skb == NULL) 1519 1519 return -ENOMEM; 1520 1520 ··· 1620 1620 1621 1621 if (velocity_rx_refill(vptr) != vptr->options.numrx) { 1622 1622 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_ERR 1623 - "%s: failed to allocate RX buffer.\n", vptr->dev->name); 1623 + "%s: failed to allocate RX buffer.\n", vptr->netdev->name); 1624 1624 velocity_free_rd_ring(vptr); 1625 1625 goto out; 1626 1626 } ··· 1809 1809 printk(KERN_ERR "TD structure error TDindex=%hx\n", readw(&regs->TDIdx[0])); 1810 1810 BYTE_REG_BITS_ON(TXESR_TDSTR, &regs->TXESR); 1811 1811 writew(TRDCSR_RUN, &regs->TDCSRClr); 1812 - netif_stop_queue(vptr->dev); 1812 + netif_stop_queue(vptr->netdev); 1813 1813 1814 1814 /* FIXME: port over the pci_device_failed code and use it 1815 1815 here */ ··· 1850 1850 1851 1851 if (linked) { 1852 1852 vptr->mii_status &= ~VELOCITY_LINK_FAIL; 1853 - netif_carrier_on(vptr->dev); 1853 + netif_carrier_on(vptr->netdev); 1854 1854 } else { 1855 1855 vptr->mii_status |= VELOCITY_LINK_FAIL; 1856 - netif_carrier_off(vptr->dev); 1856 + netif_carrier_off(vptr->netdev); 1857 1857 } 1858 1858 1859 1859 velocity_print_link_status(vptr); ··· 1867 1867 enable_mii_autopoll(regs); 1868 1868 1869 1869 if (vptr->mii_status & VELOCITY_LINK_FAIL) 1870 - netif_stop_queue(vptr->dev); 1870 + netif_stop_queue(vptr->netdev); 1871 1871 else 1872 - netif_wake_queue(vptr->dev); 1872 + netif_wake_queue(vptr->netdev); 1873 1873 1874 1874 } 1875 1875 if (status & ISR_MIBFI) ··· 1894 1894 int idx; 1895 1895 int works = 0; 1896 1896 struct velocity_td_info *tdinfo; 1897 - struct net_device_stats *stats = &vptr->dev->stats; 1897 + struct net_device_stats *stats = &vptr->netdev->stats; 1898 1898 1899 1899 for (qnum = 0; qnum < vptr->tx.numq; qnum++) { 1900 1900 for (idx = vptr->tx.tail[qnum]; vptr->tx.used[qnum] > 0; ··· 1939 1939 * Look to see if we should kick the transmit network 1940 1940 * layer for more work. 1941 1941 */ 1942 - if (netif_queue_stopped(vptr->dev) && (full == 0) && 1942 + if (netif_queue_stopped(vptr->netdev) && (full == 0) && 1943 1943 (!(vptr->mii_status & VELOCITY_LINK_FAIL))) { 1944 - netif_wake_queue(vptr->dev); 1944 + netif_wake_queue(vptr->netdev); 1945 1945 } 1946 1946 return works; 1947 1947 } ··· 1989 1989 if (pkt_size < rx_copybreak) { 1990 1990 struct sk_buff *new_skb; 1991 1991 1992 - new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size); 1992 + new_skb = netdev_alloc_skb_ip_align(vptr->netdev, pkt_size); 1993 1993 if (new_skb) { 1994 1994 new_skb->ip_summed = rx_skb[0]->ip_summed; 1995 1995 skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size); ··· 2030 2030 static int velocity_receive_frame(struct velocity_info *vptr, int idx) 2031 2031 { 2032 2032 void (*pci_action)(struct pci_dev *, dma_addr_t, size_t, int); 2033 - struct net_device_stats *stats = &vptr->dev->stats; 2033 + struct net_device_stats *stats = &vptr->netdev->stats; 2034 2034 struct velocity_rd_info *rd_info = &(vptr->rx.info[idx]); 2035 2035 struct rx_desc *rd = &(vptr->rx.ring[idx]); 2036 2036 int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff; 2037 2037 struct sk_buff *skb; 2038 2038 2039 2039 if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) { 2040 - VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->dev->name); 2040 + VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame span multple RDs.\n", vptr->netdev->name); 2041 2041 stats->rx_length_errors++; 2042 2042 return -EINVAL; 2043 2043 } ··· 2075 2075 PCI_DMA_FROMDEVICE); 2076 2076 2077 2077 skb_put(skb, pkt_len - 4); 2078 - skb->protocol = eth_type_trans(skb, vptr->dev); 2078 + skb->protocol = eth_type_trans(skb, vptr->netdev); 2079 2079 2080 2080 if (rd->rdesc0.RSR & RSR_DETAG) { 2081 2081 u16 vid = swab16(le16_to_cpu(rd->rdesc1.PQTAG)); ··· 2100 2100 */ 2101 2101 static int velocity_rx_srv(struct velocity_info *vptr, int budget_left) 2102 2102 { 2103 - struct net_device_stats *stats = &vptr->dev->stats; 2103 + struct net_device_stats *stats = &vptr->netdev->stats; 2104 2104 int rd_curr = vptr->rx.curr; 2105 2105 int works = 0; 2106 2106 ··· 2292 2292 2293 2293 if ((new_mtu < VELOCITY_MIN_MTU) || new_mtu > (VELOCITY_MAX_MTU)) { 2294 2294 VELOCITY_PRT(MSG_LEVEL_ERR, KERN_NOTICE "%s: Invalid MTU.\n", 2295 - vptr->dev->name); 2295 + vptr->netdev->name); 2296 2296 ret = -EINVAL; 2297 2297 goto out_0; 2298 2298 } ··· 2314 2314 goto out_0; 2315 2315 } 2316 2316 2317 - tmp_vptr->dev = dev; 2317 + tmp_vptr->netdev = dev; 2318 2318 tmp_vptr->pdev = vptr->pdev; 2319 2319 tmp_vptr->options = vptr->options; 2320 2320 tmp_vptr->tx.numq = vptr->tx.numq; ··· 2692 2692 */ 2693 2693 static void velocity_print_info(struct velocity_info *vptr) 2694 2694 { 2695 - struct net_device *dev = vptr->dev; 2695 + struct net_device *dev = vptr->netdev; 2696 2696 2697 2697 printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id)); 2698 2698 printk(KERN_INFO "%s: Ethernet Address: %pM\n", ··· 2755 2755 2756 2756 velocity_init_info(pdev, vptr, info); 2757 2757 2758 - vptr->dev = dev; 2758 + vptr->netdev = dev; 2759 2759 2760 2760 ret = pci_enable_device(pdev); 2761 2761 if (ret < 0) ··· 3010 3010 struct velocity_info *vptr = netdev_priv(dev); 3011 3011 unsigned long flags; 3012 3012 3013 - if (!netif_running(vptr->dev)) 3013 + if (!netif_running(vptr->netdev)) 3014 3014 return 0; 3015 3015 3016 - netif_device_detach(vptr->dev); 3016 + netif_device_detach(vptr->netdev); 3017 3017 3018 3018 spin_lock_irqsave(&vptr->lock, flags); 3019 3019 pci_save_state(pdev); ··· 3078 3078 unsigned long flags; 3079 3079 int i; 3080 3080 3081 - if (!netif_running(vptr->dev)) 3081 + if (!netif_running(vptr->netdev)) 3082 3082 return 0; 3083 3083 3084 3084 pci_set_power_state(pdev, PCI_D0); ··· 3101 3101 3102 3102 mac_enable_int(vptr->mac_regs); 3103 3103 spin_unlock_irqrestore(&vptr->lock, flags); 3104 - netif_device_attach(vptr->dev); 3104 + netif_device_attach(vptr->netdev); 3105 3105 3106 3106 return 0; 3107 3107 }
+2 -2
drivers/net/ethernet/via/via-velocity.h
··· 1435 1435 1436 1436 struct velocity_info { 1437 1437 struct pci_dev *pdev; 1438 - struct net_device *dev; 1438 + struct net_device *netdev; 1439 1439 1440 1440 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; 1441 1441 u8 ip_addr[4]; ··· 1514 1514 int res = -ENOENT; 1515 1515 1516 1516 rcu_read_lock(); 1517 - in_dev = __in_dev_get_rcu(vptr->dev); 1517 + in_dev = __in_dev_get_rcu(vptr->netdev); 1518 1518 if (in_dev != NULL) { 1519 1519 ifa = (struct in_ifaddr *) in_dev->ifa_list; 1520 1520 if (ifa != NULL) {