Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tlan: wrap source lines

Make driver more readable on standard 80 col windows.

Signed-off-by: Stephen Hemminger <shemminger@vyatta.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>

authored by

Stephen Hemminger and committed by
Jeff Garzik
dfc2c0a6 a3ccc789

+202 -114
+187 -105
drivers/net/tlan.c
··· 13 13 * This software may be used and distributed according to the terms 14 14 * of the GNU General Public License, incorporated herein by reference. 15 15 * 16 - ** This file is best viewed/edited with columns>=132. 17 - * 18 16 ** Useful (if not required) reading: 19 17 * 20 18 * Texas Instruments, ThunderLAN Programmer's Guide, ··· 234 236 { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 235 237 { "Compaq Netelligent 10/100 TX PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 236 238 { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 }, 237 - { "Compaq NetFlex-3/P", TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, 239 + { "Compaq NetFlex-3/P", 240 + TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, 238 241 { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 }, 239 - { "Compaq Netelligent Integrated 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 242 + { "Compaq Netelligent Integrated 10/100 TX UTP", 243 + TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 240 244 { "Compaq Netelligent Dual 10/100 TX PCI UTP", TLAN_ADAPTER_NONE, 0x83 }, 241 245 { "Compaq Netelligent 10/100 TX Embedded UTP", TLAN_ADAPTER_NONE, 0x83 }, 242 246 { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 }, ··· 246 246 { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xF8 }, 247 247 { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, 248 248 { "Compaq Netelligent 10 T/2 PCI UTP/Coax", TLAN_ADAPTER_NONE, 0x83 }, 249 - { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */ 250 - TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, 249 + { "Compaq NetFlex-3/E", 250 + TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */ 251 + TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 }, 251 252 { "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */ 252 253 }; 253 254 ··· 293 292 static struct net_device_stats *TLan_GetStats( struct net_device *); 294 293 static void TLan_SetMulticastList( struct net_device *); 295 294 static int TLan_ioctl( struct net_device *dev, struct ifreq *rq, int cmd); 296 - static int TLan_probe1( struct pci_dev *pdev, long ioaddr, int irq, int rev, const struct pci_device_id *ent); 295 + static int TLan_probe1( struct pci_dev *pdev, long ioaddr, 296 + int irq, int rev, const struct pci_device_id *ent); 297 297 static void TLan_tx_timeout( struct net_device *dev); 298 298 static void TLan_tx_timeout_work(struct work_struct *work); 299 299 static int tlan_init_one( struct pci_dev *pdev, const struct pci_device_id *ent); ··· 441 439 unregister_netdev( dev ); 442 440 443 441 if ( priv->dmaStorage ) { 444 - pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage, priv->dmaStorageDMA ); 442 + pci_free_consistent(priv->pciDev, 443 + priv->dmaSize, priv->dmaStorage, 444 + priv->dmaStorageDMA ); 445 445 } 446 446 447 447 #ifdef CONFIG_PCI ··· 526 522 **************************************************************/ 527 523 528 524 static int __devinit TLan_probe1(struct pci_dev *pdev, 529 - long ioaddr, int irq, int rev, const struct pci_device_id *ent ) 525 + long ioaddr, int irq, int rev, 526 + const struct pci_device_id *ent ) 530 527 { 531 528 532 529 struct net_device *dev; ··· 613 608 /* Kernel parameters */ 614 609 if (dev->mem_start) { 615 610 priv->aui = dev->mem_start & 0x01; 616 - priv->duplex = ((dev->mem_start & 0x06) == 0x06) ? 0 : (dev->mem_start & 0x06) >> 1; 617 - priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0 : (dev->mem_start & 0x18) >> 3; 611 + priv->duplex = ((dev->mem_start & 0x06) == 0x06) ? 0 612 + : (dev->mem_start & 0x06) >> 1; 613 + priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0 614 + : (dev->mem_start & 0x18) >> 3; 618 615 619 616 if (priv->speed == 0x1) { 620 617 priv->speed = TLAN_SPEED_10; ··· 696 689 dev = TLan_Eisa_Devices; 697 690 priv = netdev_priv(dev); 698 691 if (priv->dmaStorage) { 699 - pci_free_consistent(priv->pciDev, priv->dmaSize, priv->dmaStorage, priv->dmaStorageDMA ); 692 + pci_free_consistent(priv->pciDev, priv->dmaSize, 693 + priv->dmaStorage, priv->dmaStorageDMA ); 700 694 } 701 695 release_region( dev->base_addr, 0x10); 702 696 unregister_netdev( dev ); ··· 752 744 /* Loop through all slots of the EISA bus */ 753 745 for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) { 754 746 755 - TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n", (int) ioaddr + 0xC80, inw(ioaddr + EISA_ID)); 756 - TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n", (int) ioaddr + 0xC82, inw(ioaddr + EISA_ID2)); 747 + TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n", 748 + (int) ioaddr + 0xC80, inw(ioaddr + EISA_ID)); 749 + TLAN_DBG(TLAN_DEBUG_PROBE,"EISA_ID 0x%4x: 0x%4x\n", 750 + (int) ioaddr + 0xC82, inw(ioaddr + EISA_ID2)); 757 751 758 752 759 753 TLAN_DBG(TLAN_DEBUG_PROBE, "Probing for EISA adapter at IO: 0x%4x : ", ··· 882 872 883 873 if ( bbuf ) { 884 874 priv->rxBuffer = (u8 *) ( priv->txList + TLAN_NUM_TX_LISTS ); 885 - priv->rxBufferDMA =priv->txListDMA + sizeof(TLanList) * TLAN_NUM_TX_LISTS; 886 - priv->txBuffer = priv->rxBuffer + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE ); 887 - priv->txBufferDMA = priv->rxBufferDMA + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE ); 875 + priv->rxBufferDMA =priv->txListDMA 876 + + sizeof(TLanList) * TLAN_NUM_TX_LISTS; 877 + priv->txBuffer = priv->rxBuffer 878 + + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE ); 879 + priv->txBufferDMA = priv->rxBufferDMA 880 + + ( TLAN_NUM_RX_LISTS * TLAN_MAX_FRAME_SIZE ); 888 881 } 889 882 890 883 err = 0; ··· 951 938 dev->name, dev ); 952 939 953 940 if ( err ) { 954 - printk(KERN_ERR "TLAN: Cannot open %s because IRQ %d is already in use.\n", dev->name, dev->irq ); 941 + pr_err("TLAN: Cannot open %s because IRQ %d is already in use.\n", 942 + dev->name, dev->irq ); 955 943 return err; 956 944 } 957 945 ··· 966 952 TLan_ReadAndClearStats( dev, TLAN_IGNORE ); 967 953 TLan_ResetAdapter( dev ); 968 954 969 - TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n", dev->name, priv->tlanRev ); 955 + TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n", 956 + dev->name, priv->tlanRev ); 970 957 971 958 return 0; 972 959 ··· 1005 990 1006 991 1007 992 case SIOCGMIIREG: /* Read MII PHY register. */ 1008 - TLan_MiiReadReg(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, &data->val_out); 993 + TLan_MiiReadReg(dev, data->phy_id & 0x1f, 994 + data->reg_num & 0x1f, &data->val_out); 1009 995 return 0; 1010 996 1011 997 1012 998 case SIOCSMIIREG: /* Write MII PHY register. */ 1013 999 if (!capable(CAP_NET_ADMIN)) 1014 1000 return -EPERM; 1015 - TLan_MiiWriteReg(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in); 1001 + TLan_MiiWriteReg(dev, data->phy_id & 0x1f, 1002 + data->reg_num & 0x1f, data->val_in); 1016 1003 return 0; 1017 1004 default: 1018 1005 return -EOPNOTSUPP; ··· 1099 1082 unsigned long flags; 1100 1083 1101 1084 if ( ! priv->phyOnline ) { 1102 - TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n", dev->name ); 1085 + TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n", 1086 + dev->name ); 1103 1087 dev_kfree_skb_any(skb); 1104 1088 return 0; 1105 1089 } ··· 1112 1094 tail_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txTail; 1113 1095 1114 1096 if ( tail_list->cStat != TLAN_CSTAT_UNUSED ) { 1115 - TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s is busy (Head=%d Tail=%d)\n", dev->name, priv->txHead, priv->txTail ); 1097 + TLAN_DBG( TLAN_DEBUG_TX, 1098 + "TRANSMIT: %s is busy (Head=%d Tail=%d)\n", 1099 + dev->name, priv->txHead, priv->txTail ); 1116 1100 netif_stop_queue(dev); 1117 1101 priv->txBusyCount++; 1118 1102 return 1; ··· 1126 1106 tail_buffer = priv->txBuffer + ( priv->txTail * TLAN_MAX_FRAME_SIZE ); 1127 1107 skb_copy_from_linear_data(skb, tail_buffer, skb->len); 1128 1108 } else { 1129 - tail_list->buffer[0].address = pci_map_single(priv->pciDev, skb->data, skb->len, PCI_DMA_TODEVICE); 1109 + tail_list->buffer[0].address = pci_map_single(priv->pciDev, 1110 + skb->data, skb->len, 1111 + PCI_DMA_TODEVICE); 1130 1112 TLan_StoreSKB(tail_list, skb); 1131 1113 } 1132 1114 ··· 1141 1119 tail_list->cStat = TLAN_CSTAT_READY; 1142 1120 if ( ! priv->txInProgress ) { 1143 1121 priv->txInProgress = 1; 1144 - TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Starting TX on buffer %d\n", priv->txTail ); 1122 + TLAN_DBG( TLAN_DEBUG_TX, 1123 + "TRANSMIT: Starting TX on buffer %d\n", priv->txTail ); 1145 1124 outl( tail_list_phys, dev->base_addr + TLAN_CH_PARM ); 1146 1125 outl( TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD ); 1147 1126 } else { 1148 - TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Adding buffer %d to TX channel\n", priv->txTail ); 1127 + TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Adding buffer %d to TX channel\n", 1128 + priv->txTail ); 1149 1129 if ( priv->txTail == 0 ) { 1150 - ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward = tail_list_phys; 1130 + ( priv->txList + ( TLAN_NUM_TX_LISTS - 1 ) )->forward 1131 + = tail_list_phys; 1151 1132 } else { 1152 - ( priv->txList + ( priv->txTail - 1 ) )->forward = tail_list_phys; 1133 + ( priv->txList + ( priv->txTail - 1 ) )->forward 1134 + = tail_list_phys; 1153 1135 } 1154 1136 } 1155 1137 spin_unlock_irqrestore(&priv->lock, flags); ··· 1286 1260 /* Should only read stats if open ? */ 1287 1261 TLan_ReadAndClearStats( dev, TLAN_RECORD ); 1288 1262 1289 - TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name, priv->rxEocCount ); 1290 - TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name, priv->txBusyCount ); 1263 + TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name, 1264 + priv->rxEocCount ); 1265 + TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name, 1266 + priv->txBusyCount ); 1291 1267 if ( debug & TLAN_DEBUG_GNRL ) { 1292 1268 TLan_PrintDio( dev->base_addr ); 1293 1269 TLan_PhyPrint( dev ); ··· 1339 1311 1340 1312 if ( dev->flags & IFF_PROMISC ) { 1341 1313 tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD ); 1342 - TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF ); 1314 + TLan_DioWrite8( dev->base_addr, 1315 + TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF ); 1343 1316 } else { 1344 1317 tmp = TLan_DioRead8( dev->base_addr, TLAN_NET_CMD ); 1345 - TLan_DioWrite8( dev->base_addr, TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF ); 1318 + TLan_DioWrite8( dev->base_addr, 1319 + TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF ); 1346 1320 if ( dev->flags & IFF_ALLMULTI ) { 1347 1321 for ( i = 0; i < 3; i++ ) 1348 1322 TLan_SetMac( dev, i + 1, NULL ); ··· 1353 1323 } else { 1354 1324 for ( i = 0; i < dev->mc_count; i++ ) { 1355 1325 if ( i < 3 ) { 1356 - TLan_SetMac( dev, i + 1, (char *) &dmi->dmi_addr ); 1326 + TLan_SetMac( dev, i + 1, 1327 + (char *) &dmi->dmi_addr ); 1357 1328 } else { 1358 1329 offset = TLan_HashFunc( (u8 *) &dmi->dmi_addr ); 1359 1330 if ( offset < 32 ) ··· 1421 1390 u32 ack = 0; 1422 1391 u16 tmpCStat; 1423 1392 1424 - TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n", priv->txHead, priv->txTail ); 1393 + TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n", 1394 + priv->txHead, priv->txTail ); 1425 1395 head_list = priv->txList + priv->txHead; 1426 1396 1427 1397 while (((tmpCStat = head_list->cStat ) & TLAN_CSTAT_FRM_CMP) && (ack < 255)) { 1428 1398 ack++; 1429 1399 if ( ! bbuf ) { 1430 1400 struct sk_buff *skb = TLan_GetSKB(head_list); 1431 - pci_unmap_single(priv->pciDev, head_list->buffer[0].address, skb->len, PCI_DMA_TODEVICE); 1401 + pci_unmap_single(priv->pciDev, head_list->buffer[0].address, 1402 + skb->len, PCI_DMA_TODEVICE); 1432 1403 dev_kfree_skb_any(skb); 1433 1404 head_list->buffer[8].address = 0; 1434 1405 head_list->buffer[9].address = 0; ··· 1451 1418 printk(KERN_INFO "TLAN: Received interrupt for uncompleted TX frame.\n"); 1452 1419 1453 1420 if ( eoc ) { 1454 - TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOC (Head=%d Tail=%d)\n", priv->txHead, priv->txTail ); 1421 + TLAN_DBG( TLAN_DEBUG_TX, 1422 + "TRANSMIT: Handling TX EOC (Head=%d Tail=%d)\n", 1423 + priv->txHead, priv->txTail ); 1455 1424 head_list = priv->txList + priv->txHead; 1456 1425 head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead; 1457 1426 if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) { ··· 1465 1430 } 1466 1431 1467 1432 if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) { 1468 - TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); 1433 + TLan_DioWrite8( dev->base_addr, 1434 + TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); 1469 1435 if ( priv->timer.function == NULL ) { 1470 1436 priv->timer.function = &TLan_Timer; 1471 1437 priv->timer.data = (unsigned long) dev; ··· 1551 1515 u16 tmpCStat; 1552 1516 dma_addr_t head_list_phys; 1553 1517 1554 - TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOF (Head=%d Tail=%d)\n", priv->rxHead, priv->rxTail ); 1518 + TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOF (Head=%d Tail=%d)\n", 1519 + priv->rxHead, priv->rxTail ); 1555 1520 head_list = priv->rxList + priv->rxHead; 1556 1521 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1557 1522 ··· 1565 1528 1566 1529 if (bbuf) { 1567 1530 skb = netdev_alloc_skb(dev, frameSize + 7); 1568 - if ( skb ) { 1569 - head_buffer = priv->rxBuffer + (priv->rxHead * TLAN_MAX_FRAME_SIZE); 1570 - skb_reserve(skb, 2); 1571 - pci_dma_sync_single_for_cpu(priv->pciDev, 1572 - frameDma, frameSize, 1573 - PCI_DMA_FROMDEVICE); 1574 - skb_copy_from_linear_data(skb, head_buffer, frameSize); 1575 - skb_put(skb, frameSize); 1576 - dev->stats.rx_bytes += frameSize; 1531 + if ( !skb ) 1532 + goto drop_and_reuse; 1577 1533 1578 - skb->protocol = eth_type_trans( skb, dev ); 1579 - netif_rx( skb ); 1580 - } 1534 + head_buffer = priv->rxBuffer 1535 + + (priv->rxHead * TLAN_MAX_FRAME_SIZE); 1536 + skb_reserve(skb, 2); 1537 + pci_dma_sync_single_for_cpu(priv->pciDev, 1538 + frameDma, frameSize, 1539 + PCI_DMA_FROMDEVICE); 1540 + skb_copy_from_linear_data(skb, head_buffer, frameSize); 1541 + skb_put(skb, frameSize); 1542 + dev->stats.rx_bytes += frameSize; 1543 + 1544 + skb->protocol = eth_type_trans( skb, dev ); 1545 + netif_rx( skb ); 1581 1546 } else { 1582 1547 struct sk_buff *new_skb; 1583 1548 1584 1549 new_skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 ); 1585 - if ( new_skb ) { 1586 - skb = TLan_GetSKB(head_list); 1587 - pci_unmap_single(priv->pciDev, frameDma, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); 1588 - skb_put( skb, frameSize ); 1550 + if ( !new_skb ) 1551 + goto drop_and_reuse; 1589 1552 1590 - dev->stats.rx_bytes += frameSize; 1553 + skb = TLan_GetSKB(head_list); 1554 + pci_unmap_single(priv->pciDev, frameDma, 1555 + TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); 1556 + skb_put( skb, frameSize ); 1591 1557 1592 - skb->protocol = eth_type_trans( skb, dev ); 1593 - netif_rx( skb ); 1558 + dev->stats.rx_bytes += frameSize; 1594 1559 1595 - skb_reserve( new_skb, NET_IP_ALIGN ); 1596 - head_list->buffer[0].address = pci_map_single(priv->pciDev, new_skb->data, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); 1560 + skb->protocol = eth_type_trans( skb, dev ); 1561 + netif_rx( skb ); 1597 1562 1598 - TLan_StoreSKB(head_list, new_skb); 1599 - } 1563 + skb_reserve( new_skb, NET_IP_ALIGN ); 1564 + head_list->buffer[0].address = pci_map_single(priv->pciDev, 1565 + new_skb->data, 1566 + TLAN_MAX_FRAME_SIZE, 1567 + PCI_DMA_FROMDEVICE); 1568 + 1569 + TLan_StoreSKB(head_list, new_skb); 1600 1570 1601 1571 } 1602 - 1572 + drop_and_reuse: 1603 1573 head_list->forward = 0; 1604 1574 head_list->cStat = 0; 1605 1575 tail_list = priv->rxList + priv->rxTail; ··· 1622 1578 printk(KERN_INFO "TLAN: Received interrupt for uncompleted RX frame.\n"); 1623 1579 1624 1580 1625 - 1626 - 1627 1581 if ( eoc ) { 1628 - TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOC (Head=%d Tail=%d)\n", priv->rxHead, priv->rxTail ); 1582 + TLAN_DBG( TLAN_DEBUG_RX, 1583 + "RECEIVE: Handling RX EOC (Head=%d Tail=%d)\n", 1584 + priv->rxHead, priv->rxTail ); 1629 1585 head_list = priv->rxList + priv->rxHead; 1630 1586 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1631 1587 outl(head_list_phys, dev->base_addr + TLAN_CH_PARM ); ··· 1634 1590 } 1635 1591 1636 1592 if ( priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED ) { 1637 - TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); 1593 + TLan_DioWrite8( dev->base_addr, 1594 + TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT ); 1638 1595 if ( priv->timer.function == NULL ) { 1639 1596 priv->timer.function = &TLan_Timer; 1640 1597 priv->timer.data = (unsigned long) dev; ··· 1713 1668 1714 1669 host_int = 0; 1715 1670 if ( priv->tlanRev < 0x30 ) { 1716 - TLAN_DBG( TLAN_DEBUG_TX, "TRANSMIT: Handling TX EOC (Head=%d Tail=%d) -- IRQ\n", priv->txHead, priv->txTail ); 1671 + TLAN_DBG( TLAN_DEBUG_TX, 1672 + "TRANSMIT: Handling TX EOC (Head=%d Tail=%d) -- IRQ\n", 1673 + priv->txHead, priv->txTail ); 1717 1674 head_list = priv->txList + priv->txHead; 1718 1675 head_list_phys = priv->txListDMA + sizeof(TLanList) * priv->txHead; 1719 1676 if ( ( head_list->cStat & TLAN_CSTAT_READY ) == TLAN_CSTAT_READY ) { ··· 1783 1736 net_sts = TLan_DioRead8( dev->base_addr, TLAN_NET_STS ); 1784 1737 if ( net_sts ) { 1785 1738 TLan_DioWrite8( dev->base_addr, TLAN_NET_STS, net_sts ); 1786 - TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n", dev->name, (unsigned) net_sts ); 1739 + TLAN_DBG( TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n", 1740 + dev->name, (unsigned) net_sts ); 1787 1741 } 1788 1742 if ( ( net_sts & TLAN_NET_STS_MIRQ ) && ( priv->phyNum == 0 ) ) { 1789 1743 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_STS, &tlphy_sts ); 1790 1744 TLan_MiiReadReg( dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl ); 1791 - if ( ! ( tlphy_sts & TLAN_TS_POLOK ) && ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) { 1745 + if ( ! ( tlphy_sts & TLAN_TS_POLOK ) && 1746 + ! ( tlphy_ctl & TLAN_TC_SWAPOL ) ) { 1792 1747 tlphy_ctl |= TLAN_TC_SWAPOL; 1793 1748 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl); 1794 - } else if ( ( tlphy_sts & TLAN_TS_POLOK ) && ( tlphy_ctl & TLAN_TC_SWAPOL ) ) { 1749 + } else if ( ( tlphy_sts & TLAN_TS_POLOK ) 1750 + && ( tlphy_ctl & TLAN_TC_SWAPOL ) ) { 1795 1751 tlphy_ctl &= ~TLAN_TC_SWAPOL; 1796 1752 TLan_MiiWriteReg( dev, phy, TLAN_TLPHY_CTL, tlphy_ctl); 1797 1753 } ··· 1839 1789 u32 ack = 1; 1840 1790 1841 1791 if ( priv->tlanRev < 0x30 ) { 1842 - TLAN_DBG( TLAN_DEBUG_RX, "RECEIVE: Handling RX EOC (Head=%d Tail=%d) -- IRQ\n", priv->rxHead, priv->rxTail ); 1792 + TLAN_DBG( TLAN_DEBUG_RX, 1793 + "RECEIVE: Handling RX EOC (Head=%d Tail=%d) -- IRQ\n", 1794 + priv->rxHead, priv->rxTail ); 1843 1795 head_list_phys = priv->rxListDMA + sizeof(TLanList) * priv->rxHead; 1844 1796 outl( head_list_phys, dev->base_addr + TLAN_CH_PARM ); 1845 1797 ack |= TLAN_HC_GO | TLAN_HC_RT; ··· 1932 1880 if ( priv->timer.function == NULL ) { 1933 1881 elapsed = jiffies - priv->timerSetAt; 1934 1882 if ( elapsed >= TLAN_TIMER_ACT_DELAY ) { 1935 - TLan_DioWrite8( dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK ); 1883 + TLan_DioWrite8( dev->base_addr, 1884 + TLAN_LED_REG, TLAN_LED_LINK ); 1936 1885 } else { 1937 1886 priv->timer.function = &TLan_Timer; 1938 - priv->timer.expires = priv->timerSetAt + TLAN_TIMER_ACT_DELAY; 1887 + priv->timer.expires = priv->timerSetAt 1888 + + TLAN_TIMER_ACT_DELAY; 1939 1889 spin_unlock_irqrestore(&priv->lock, flags); 1940 1890 add_timer( &priv->timer ); 1941 1891 break; ··· 1992 1938 list = priv->txList + i; 1993 1939 list->cStat = TLAN_CSTAT_UNUSED; 1994 1940 if ( bbuf ) { 1995 - list->buffer[0].address = priv->txBufferDMA + ( i * TLAN_MAX_FRAME_SIZE ); 1941 + list->buffer[0].address = priv->txBufferDMA 1942 + + ( i * TLAN_MAX_FRAME_SIZE ); 1996 1943 } else { 1997 1944 list->buffer[0].address = 0; 1998 1945 } ··· 2012 1957 list->frameSize = TLAN_MAX_FRAME_SIZE; 2013 1958 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; 2014 1959 if ( bbuf ) { 2015 - list->buffer[0].address = priv->rxBufferDMA + ( i * TLAN_MAX_FRAME_SIZE ); 1960 + list->buffer[0].address = priv->rxBufferDMA 1961 + + ( i * TLAN_MAX_FRAME_SIZE ); 2016 1962 } else { 2017 1963 skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 ); 2018 1964 if ( !skb ) { 2019 - printk( "TLAN: Couldn't allocate memory for received data.\n" ); 1965 + pr_err("TLAN: out of memory for received data.\n" ); 2020 1966 break; 2021 1967 } 2022 1968 2023 1969 skb_reserve( skb, NET_IP_ALIGN ); 2024 - list->buffer[0].address = pci_map_single(priv->pciDev, t, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); 1970 + list->buffer[0].address = pci_map_single(priv->pciDev, t, 1971 + TLAN_MAX_FRAME_SIZE, 1972 + PCI_DMA_FROMDEVICE); 2025 1973 TLan_StoreSKB(list, skb); 2026 1974 } 2027 1975 list->buffer[1].count = 0; ··· 2054 1996 list = priv->txList + i; 2055 1997 skb = TLan_GetSKB(list); 2056 1998 if ( skb ) { 2057 - pci_unmap_single(priv->pciDev, list->buffer[0].address, skb->len, PCI_DMA_TODEVICE); 1999 + pci_unmap_single(priv->pciDev, 2000 + list->buffer[0].address, skb->len, 2001 + PCI_DMA_TODEVICE); 2058 2002 dev_kfree_skb_any( skb ); 2059 2003 list->buffer[8].address = 0; 2060 2004 list->buffer[9].address = 0; ··· 2067 2007 list = priv->rxList + i; 2068 2008 skb = TLan_GetSKB(list); 2069 2009 if ( skb ) { 2070 - pci_unmap_single(priv->pciDev, list->buffer[0].address, TLAN_MAX_FRAME_SIZE, PCI_DMA_FROMDEVICE); 2010 + pci_unmap_single(priv->pciDev, 2011 + list->buffer[0].address, 2012 + TLAN_MAX_FRAME_SIZE, 2013 + PCI_DMA_FROMDEVICE); 2071 2014 dev_kfree_skb_any( skb ); 2072 2015 list->buffer[8].address = 0; 2073 2016 list->buffer[9].address = 0; ··· 2101 2038 u32 data0, data1; 2102 2039 int i; 2103 2040 2104 - printk( "TLAN: Contents of internal registers for io base 0x%04hx.\n", io_base ); 2041 + printk( "TLAN: Contents of internal registers for io base 0x%04hx.\n", 2042 + io_base ); 2105 2043 printk( "TLAN: Off. +0 +4\n" ); 2106 2044 for ( i = 0; i < 0x4C; i+= 8 ) { 2107 2045 data0 = TLan_DioRead32( io_base, i ); ··· 2142 2078 printk( "TLAN: Frame Size = 0x%04hx\n", list->frameSize ); 2143 2079 /* for ( i = 0; i < 10; i++ ) { */ 2144 2080 for ( i = 0; i < 2; i++ ) { 2145 - printk( "TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n", i, list->buffer[i].count, list->buffer[i].address ); 2081 + printk( "TLAN: Buffer[%d].count, addr = 0x%08x, 0x%08x\n", 2082 + i, list->buffer[i].count, list->buffer[i].address ); 2146 2083 } 2147 2084 2148 2085 } /* TLan_PrintList */ ··· 2359 2294 TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &tlphy_id1 ); 2360 2295 TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &tlphy_id2 ); 2361 2296 2362 - if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) || ( priv->aui ) ) { 2297 + if ( ( priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY ) || 2298 + ( priv->aui ) ) { 2363 2299 status = MII_GS_LINK; 2364 2300 printk( "TLAN: %s: Link forced.\n", dev->name ); 2365 2301 } else { 2366 2302 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2367 2303 udelay( 1000 ); 2368 2304 TLan_MiiReadReg( dev, phy, MII_GEN_STS, &status ); 2369 - if ( (status & MII_GS_LINK) && /* We only support link info on Nat.Sem. PHY's */ 2305 + if ( (status & MII_GS_LINK) && 2306 + /* We only support link info on Nat.Sem. PHY's */ 2370 2307 (tlphy_id1 == NAT_SEM_ID1) && 2371 2308 (tlphy_id2 == NAT_SEM_ID2) ) { 2372 2309 TLan_MiiReadReg( dev, phy, MII_AN_LPA, &partner ); ··· 2377 2310 printk( "TLAN: %s: Link active with ", dev->name ); 2378 2311 if (!(tlphy_par & TLAN_PHY_AN_EN_STAT)) { 2379 2312 printk( "forced 10%sMbps %s-Duplex\n", 2380 - tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0", 2381 - tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half"); 2313 + tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0", 2314 + tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half"); 2382 2315 } else { 2383 2316 printk( "AutoNegotiation enabled, at 10%sMbps %s-Duplex\n", 2384 - tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0", 2385 - tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half"); 2317 + tlphy_par & TLAN_PHY_SPEED_100 ? "" : "0", 2318 + tlphy_par & TLAN_PHY_DUPLEX_FULL ? "Full" : "Half"); 2386 2319 printk("TLAN: Partner capability: "); 2387 2320 for (i = 5; i <= 10; i++) 2388 2321 if (partner & (1<<i)) ··· 2423 2356 outl( TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD ); 2424 2357 netif_carrier_on(dev); 2425 2358 } else { 2426 - printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n", dev->name ); 2359 + printk( "TLAN: %s: Link inactive, will retry in 10 secs...\n", 2360 + dev->name ); 2427 2361 TLan_SetTimer( dev, (10*HZ), TLAN_TIMER_FINISH_RESET ); 2428 2362 return; 2429 2363 } ··· 2464 2396 2465 2397 if ( mac != NULL ) { 2466 2398 for ( i = 0; i < 6; i++ ) 2467 - TLan_DioWrite8( dev->base_addr, TLAN_AREG_0 + areg + i, mac[i] ); 2399 + TLan_DioWrite8( dev->base_addr, 2400 + TLAN_AREG_0 + areg + i, mac[i] ); 2468 2401 } else { 2469 2402 for ( i = 0; i < 6; i++ ) 2470 - TLan_DioWrite8( dev->base_addr, TLAN_AREG_0 + areg + i, 0 ); 2403 + TLan_DioWrite8( dev->base_addr, 2404 + TLAN_AREG_0 + areg + i, 0 ); 2471 2405 } 2472 2406 2473 2407 } /* TLan_SetMac */ ··· 2575 2505 TLan_MiiReadReg( dev, phy, MII_GEN_CTL, &control ); 2576 2506 TLan_MiiReadReg( dev, phy, MII_GEN_ID_HI, &hi ); 2577 2507 TLan_MiiReadReg( dev, phy, MII_GEN_ID_LO, &lo ); 2578 - if ( ( control != 0xFFFF ) || ( hi != 0xFFFF ) || ( lo != 0xFFFF ) ) { 2579 - TLAN_DBG( TLAN_DEBUG_GNRL, "PHY found at %02x %04x %04x %04x\n", phy, control, hi, lo ); 2580 - if ( ( priv->phy[1] == TLAN_PHY_NONE ) && ( phy != TLAN_PHY_MAX_ADDR ) ) { 2508 + if ( ( control != 0xFFFF ) || 2509 + ( hi != 0xFFFF ) || ( lo != 0xFFFF ) ) { 2510 + TLAN_DBG( TLAN_DEBUG_GNRL, 2511 + "PHY found at %02x %04x %04x %04x\n", 2512 + phy, control, hi, lo ); 2513 + if ( ( priv->phy[1] == TLAN_PHY_NONE ) && 2514 + ( phy != TLAN_PHY_MAX_ADDR ) ) { 2581 2515 priv->phy[1] = phy; 2582 2516 } 2583 2517 } ··· 2609 2535 value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE; 2610 2536 TLan_MiiSync( dev->base_addr ); 2611 2537 TLan_MiiWriteReg( dev, priv->phy[priv->phyNum], MII_GEN_CTL, value ); 2612 - if ( ( priv->phyNum == 0 ) && ( priv->phy[1] != TLAN_PHY_NONE ) && ( ! ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) ) ) { 2538 + if ( ( priv->phyNum == 0 ) && 2539 + ( priv->phy[1] != TLAN_PHY_NONE ) && 2540 + ( ! ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) ) ) { 2613 2541 TLan_MiiSync( dev->base_addr ); 2614 2542 TLan_MiiWriteReg( dev, priv->phy[1], MII_GEN_CTL, value ); 2615 2543 } ··· 2784 2708 * more time. Perhaps we should fail after a while. 2785 2709 */ 2786 2710 if (!priv->neg_be_verbose++) { 2787 - printk(KERN_INFO "TLAN: Giving autonegotiation more time.\n"); 2788 - printk(KERN_INFO "TLAN: Please check that your adapter has\n"); 2789 - printk(KERN_INFO "TLAN: been properly connected to a HUB or Switch.\n"); 2790 - printk(KERN_INFO "TLAN: Trying to establish link in the background...\n"); 2711 + pr_info("TLAN: Giving autonegotiation more time.\n"); 2712 + pr_info("TLAN: Please check that your adapter has\n"); 2713 + pr_info("TLAN: been properly connected to a HUB or Switch.\n"); 2714 + pr_info("TLAN: Trying to establish link in the background...\n"); 2791 2715 } 2792 2716 TLan_SetTimer( dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN ); 2793 2717 return; ··· 2803 2727 priv->tlanFullDuplex = TRUE; 2804 2728 } 2805 2729 2806 - if ( ( ! ( mode & 0x0180 ) ) && ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) && ( priv->phyNum != 0 ) ) { 2730 + if ( ( ! ( mode & 0x0180 ) ) && 2731 + ( priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10 ) && 2732 + ( priv->phyNum != 0 ) ) { 2807 2733 priv->phyNum = 0; 2808 2734 data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN; 2809 2735 TLan_DioWrite16( dev->base_addr, TLAN_NET_CONFIG, data ); ··· 2814 2736 } 2815 2737 2816 2738 if ( priv->phyNum == 0 ) { 2817 - if ( ( priv->duplex == TLAN_DUPLEX_FULL ) || ( an_adv & an_lpa & 0x0040 ) ) { 2818 - TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB | MII_GC_DUPLEX ); 2819 - printk( "TLAN: Starting internal PHY with FULL-DUPLEX\n" ); 2739 + if ( ( priv->duplex == TLAN_DUPLEX_FULL ) || 2740 + ( an_adv & an_lpa & 0x0040 ) ) { 2741 + TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 2742 + MII_GC_AUTOENB | MII_GC_DUPLEX ); 2743 + pr_info("TLAN: Starting internal PHY with FULL-DUPLEX\n" ); 2820 2744 } else { 2821 2745 TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, MII_GC_AUTOENB ); 2822 - printk( "TLAN: Starting internal PHY with HALF-DUPLEX\n" ); 2746 + pr_info( "TLAN: Starting internal PHY with HALF-DUPLEX\n" ); 2823 2747 } 2824 2748 } 2825 2749 ··· 3229 3149 TLan_SetBit( TLAN_NET_SIO_ETXEN, sio ); 3230 3150 3231 3151 if ( ( ! err ) && stop ) { 3232 - TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* STOP, raise data while clock is high */ 3152 + /* STOP, raise data while clock is high */ 3153 + TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); 3233 3154 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3234 3155 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3235 3156 } ··· 3293 3212 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); /* No ack = 1 (?) */ 3294 3213 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3295 3214 TLan_ClearBit( TLAN_NET_SIO_ECLOK, sio ); 3296 - TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); /* STOP, raise data while clock is high */ 3215 + /* STOP, raise data while clock is high */ 3216 + TLan_ClearBit( TLAN_NET_SIO_EDATA, sio ); 3297 3217 TLan_SetBit( TLAN_NET_SIO_ECLOK, sio ); 3298 3218 TLan_SetBit( TLAN_NET_SIO_EDATA, sio ); 3299 3219 }
+15 -9
drivers/net/tlan.h
··· 13 13 * This software may be used and distributed according to the terms 14 14 * of the GNU General Public License, incorporated herein by reference. 15 15 * 16 - ** This file is best viewed/edited with tabstop=4, colums>=132 17 - * 18 16 * 19 17 * Dec 10, 1999 Torben Mathiasen <torben.mathiasen@compaq.com> 20 18 * New Maintainer ··· 43 45 #define TLAN_IGNORE 0 44 46 #define TLAN_RECORD 1 45 47 46 - #define TLAN_DBG(lvl, format, args...) if (debug&lvl) printk(KERN_DEBUG "TLAN: " format, ##args ); 48 + #define TLAN_DBG(lvl, format, args...) \ 49 + do { if (debug&lvl) printk(KERN_DEBUG "TLAN: " format, ##args ); } while(0) 50 + 47 51 #define TLAN_DEBUG_GNRL 0x0001 48 52 #define TLAN_DEBUG_TX 0x0002 49 53 #define TLAN_DEBUG_RX 0x0004 ··· 515 515 * xor( a, xor( b, xor( c, xor( d, xor( e, xor( f, xor( g, h ) ) ) ) ) ) ) 516 516 * #define DA( a, bit ) ( ( (u8) a[bit/8] ) & ( (u8) ( 1 << bit%8 ) ) ) 517 517 * 518 - * hash = XOR8( DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24), DA(a,30), DA(a,36), DA(a,42) ); 519 - * hash |= XOR8( DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25), DA(a,31), DA(a,37), DA(a,43) ) << 1; 520 - * hash |= XOR8( DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26), DA(a,32), DA(a,38), DA(a,44) ) << 2; 521 - * hash |= XOR8( DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27), DA(a,33), DA(a,39), DA(a,45) ) << 3; 522 - * hash |= XOR8( DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28), DA(a,34), DA(a,40), DA(a,46) ) << 4; 523 - * hash |= XOR8( DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29), DA(a,35), DA(a,41), DA(a,47) ) << 5; 518 + * hash = XOR8( DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24), 519 + * DA(a,30), DA(a,36), DA(a,42) ); 520 + * hash |= XOR8( DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25), 521 + * DA(a,31), DA(a,37), DA(a,43) ) << 1; 522 + * hash |= XOR8( DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26), 523 + * DA(a,32), DA(a,38), DA(a,44) ) << 2; 524 + * hash |= XOR8( DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27), 525 + * DA(a,33), DA(a,39), DA(a,45) ) << 3; 526 + * hash |= XOR8( DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28), 527 + * DA(a,34), DA(a,40), DA(a,46) ) << 4; 528 + * hash |= XOR8( DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29), 529 + * DA(a,35), DA(a,41), DA(a,47) ) << 5; 524 530 * 525 531 */ 526 532 static inline u32 TLan_HashFunc( const u8 *a )