[NET]: Remove gratuitous use of skb->tail in network drivers.

Many drivers use skb->tail unnecessarily.

In these situations, the code roughly looks like:

dev = dev_alloc_skb(...);

[optional] skb_reserve(skb, ...);

... skb->tail ...

But even if the skb_reserve() happens, skb->data equals
skb->tail. So it doesn't make any sense to use anything
other than skb->data in these cases.

Another case was the s2io.c driver directly mucking with
the skb->data and skb->tail pointers. It really just wanted
to do an skb_reserve(), so that's what the code was changed
to do instead.

Another reason I'm making this change as it allows some SKB
cleanups I have planned simpler to merge. In those cleanups,
skb->head, skb->tail, and skb->end pointers are removed, and
replaced with skb->head_room and skb->tail_room integers.

Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Jeff Garzik <jgarzik@pobox.com>

+95 -97
+2 -2
drivers/net/3c515.c
··· 822 822 break; /* Bad news! */ 823 823 skb->dev = dev; /* Mark as being used by this device. */ 824 824 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 825 - vp->rx_ring[i].addr = isa_virt_to_bus(skb->tail); 825 + vp->rx_ring[i].addr = isa_virt_to_bus(skb->data); 826 826 } 827 827 vp->rx_ring[i - 1].next = isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */ 828 828 outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr); ··· 1406 1406 break; /* Bad news! */ 1407 1407 skb->dev = dev; /* Mark as being used by this device. */ 1408 1408 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1409 - vp->rx_ring[entry].addr = isa_virt_to_bus(skb->tail); 1409 + vp->rx_ring[entry].addr = isa_virt_to_bus(skb->data); 1410 1410 vp->rx_skbuff[entry] = skb; 1411 1411 } 1412 1412 vp->rx_ring[entry].status = 0; /* Clear complete bit. */
+3 -3
drivers/net/3c59x.c
··· 1802 1802 break; /* Bad news! */ 1803 1803 skb->dev = dev; /* Mark as being used by this device. */ 1804 1804 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1805 - vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 1805 + vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 1806 1806 } 1807 1807 if (i != RX_RING_SIZE) { 1808 1808 int j; ··· 2632 2632 pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2633 2633 /* 'skb_put()' points to the start of sk_buff data area. */ 2634 2634 memcpy(skb_put(skb, pkt_len), 2635 - vp->rx_skbuff[entry]->tail, 2635 + vp->rx_skbuff[entry]->data, 2636 2636 pkt_len); 2637 2637 pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2638 2638 vp->rx_copy++; ··· 2678 2678 } 2679 2679 skb->dev = dev; /* Mark as being used by this device. */ 2680 2680 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 2681 - vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 2681 + vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 2682 2682 vp->rx_skbuff[entry] = skb; 2683 2683 } 2684 2684 vp->rx_ring[entry].status = 0; /* Clear complete bit. */
+2 -2
drivers/net/8139cp.c
··· 596 596 597 597 mapping = 598 598 cp->rx_skb[rx_tail].mapping = 599 - pci_map_single(cp->pdev, new_skb->tail, 599 + pci_map_single(cp->pdev, new_skb->data, 600 600 buflen, PCI_DMA_FROMDEVICE); 601 601 cp->rx_skb[rx_tail].skb = new_skb; 602 602 ··· 1101 1101 skb_reserve(skb, RX_OFFSET); 1102 1102 1103 1103 cp->rx_skb[i].mapping = pci_map_single(cp->pdev, 1104 - skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1104 + skb->data, cp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1105 1105 cp->rx_skb[i].skb = skb; 1106 1106 1107 1107 cp->rx_ring[i].opts2 = 0;
+7 -7
drivers/net/82596.c
··· 546 546 rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1)); 547 547 rbd->b_addr = WSWAPrbd(virt_to_bus(rbd)); 548 548 rbd->skb = skb; 549 - rbd->v_data = skb->tail; 550 - rbd->b_data = WSWAPchar(virt_to_bus(skb->tail)); 549 + rbd->v_data = skb->data; 550 + rbd->b_data = WSWAPchar(virt_to_bus(skb->data)); 551 551 rbd->size = PKT_BUF_SZ; 552 552 #ifdef __mc68000__ 553 - cache_clear(virt_to_phys(skb->tail), PKT_BUF_SZ); 553 + cache_clear(virt_to_phys(skb->data), PKT_BUF_SZ); 554 554 #endif 555 555 } 556 556 lp->rbd_head = lp->rbds; ··· 816 816 rx_in_place = 1; 817 817 rbd->skb = newskb; 818 818 newskb->dev = dev; 819 - rbd->v_data = newskb->tail; 820 - rbd->b_data = WSWAPchar(virt_to_bus(newskb->tail)); 819 + rbd->v_data = newskb->data; 820 + rbd->b_data = WSWAPchar(virt_to_bus(newskb->data)); 821 821 #ifdef __mc68000__ 822 - cache_clear(virt_to_phys(newskb->tail), PKT_BUF_SZ); 822 + cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ); 823 823 #endif 824 824 } 825 825 else ··· 840 840 skb->protocol=eth_type_trans(skb,dev); 841 841 skb->len = pkt_len; 842 842 #ifdef __mc68000__ 843 - cache_clear(virt_to_phys(rbd->skb->tail), 843 + cache_clear(virt_to_phys(rbd->skb->data), 844 844 pkt_len); 845 845 #endif 846 846 netif_rx(skb);
+4 -4
drivers/net/dl2k.c
··· 547 547 skb_reserve (skb, 2); 548 548 np->rx_ring[entry].fraginfo = 549 549 cpu_to_le64 (pci_map_single 550 - (np->pdev, skb->tail, np->rx_buf_sz, 550 + (np->pdev, skb->data, np->rx_buf_sz, 551 551 PCI_DMA_FROMDEVICE)); 552 552 } 553 553 np->rx_ring[entry].fraginfo |= ··· 618 618 /* Rubicon now supports 40 bits of addressing space. */ 619 619 np->rx_ring[i].fraginfo = 620 620 cpu_to_le64 ( pci_map_single ( 621 - np->pdev, skb->tail, np->rx_buf_sz, 621 + np->pdev, skb->data, np->rx_buf_sz, 622 622 PCI_DMA_FROMDEVICE)); 623 623 np->rx_ring[i].fraginfo |= cpu_to_le64 (np->rx_buf_sz) << 48; 624 624 } ··· 906 906 /* 16 byte align the IP header */ 907 907 skb_reserve (skb, 2); 908 908 eth_copy_and_sum (skb, 909 - np->rx_skbuff[entry]->tail, 909 + np->rx_skbuff[entry]->data, 910 910 pkt_len, 0); 911 911 skb_put (skb, pkt_len); 912 912 pci_dma_sync_single_for_device(np->pdev, ··· 950 950 skb_reserve (skb, 2); 951 951 np->rx_ring[entry].fraginfo = 952 952 cpu_to_le64 (pci_map_single 953 - (np->pdev, skb->tail, np->rx_buf_sz, 953 + (np->pdev, skb->data, np->rx_buf_sz, 954 954 PCI_DMA_FROMDEVICE)); 955 955 } 956 956 np->rx_ring[entry].fraginfo |=
+4 -4
drivers/net/eepro100.c
··· 1269 1269 if (skb == NULL) 1270 1270 break; /* OK. Just initially short of Rx bufs. */ 1271 1271 skb->dev = dev; /* Mark as being used by this device. */ 1272 - rxf = (struct RxFD *)skb->tail; 1272 + rxf = (struct RxFD *)skb->data; 1273 1273 sp->rx_ringp[i] = rxf; 1274 1274 sp->rx_ring_dma[i] = 1275 1275 pci_map_single(sp->pdev, rxf, ··· 1661 1661 sp->rx_ringp[entry] = NULL; 1662 1662 return NULL; 1663 1663 } 1664 - rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail; 1664 + rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->data; 1665 1665 sp->rx_ring_dma[entry] = 1666 1666 pci_map_single(sp->pdev, rxf, 1667 1667 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE); ··· 1808 1808 1809 1809 #if 1 || USE_IP_CSUM 1810 1810 /* Packet is in one chunk -- we can copy + cksum. */ 1811 - eth_copy_and_sum(skb, sp->rx_skbuff[entry]->tail, pkt_len, 0); 1811 + eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0); 1812 1812 skb_put(skb, pkt_len); 1813 1813 #else 1814 - memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->tail, 1814 + memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->data, 1815 1815 pkt_len); 1816 1816 #endif 1817 1817 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
+3 -3
drivers/net/epic100.c
··· 1003 1003 skb->dev = dev; /* Mark as being used by this device. */ 1004 1004 skb_reserve(skb, 2); /* 16 byte align the IP header. */ 1005 1005 ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev, 1006 - skb->tail, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); 1006 + skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); 1007 1007 ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn); 1008 1008 } 1009 1009 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE); ··· 1274 1274 ep->rx_ring[entry].bufaddr, 1275 1275 ep->rx_buf_sz, 1276 1276 PCI_DMA_FROMDEVICE); 1277 - eth_copy_and_sum(skb, ep->rx_skbuff[entry]->tail, pkt_len, 0); 1277 + eth_copy_and_sum(skb, ep->rx_skbuff[entry]->data, pkt_len, 0); 1278 1278 skb_put(skb, pkt_len); 1279 1279 pci_dma_sync_single_for_device(ep->pci_dev, 1280 1280 ep->rx_ring[entry].bufaddr, ··· 1308 1308 skb->dev = dev; /* Mark as being used by this device. */ 1309 1309 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1310 1310 ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, 1311 - skb->tail, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); 1311 + skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); 1312 1312 work_done++; 1313 1313 } 1314 1314 ep->rx_ring[entry].rxstatus = cpu_to_le32(DescOwn);
+4 -4
drivers/net/fealnx.c
··· 1107 1107 1108 1108 skb->dev = dev; /* Mark as being used by this device. */ 1109 1109 np->lack_rxbuf->skbuff = skb; 1110 - np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->tail, 1110 + np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data, 1111 1111 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1112 1112 np->lack_rxbuf->status = RXOWN; 1113 1113 ++np->really_rx_count; ··· 1300 1300 ++np->really_rx_count; 1301 1301 np->rx_ring[i].skbuff = skb; 1302 1302 skb->dev = dev; /* Mark as being used by this device. */ 1303 - np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->tail, 1303 + np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data, 1304 1304 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1305 1305 np->rx_ring[i].status = RXOWN; 1306 1306 np->rx_ring[i].control |= RXIC; ··· 1737 1737 1738 1738 #if ! defined(__alpha__) 1739 1739 eth_copy_and_sum(skb, 1740 - np->cur_rx->skbuff->tail, pkt_len, 0); 1740 + np->cur_rx->skbuff->data, pkt_len, 0); 1741 1741 skb_put(skb, pkt_len); 1742 1742 #else 1743 1743 memcpy(skb_put(skb, pkt_len), 1744 - np->cur_rx->skbuff->tail, pkt_len); 1744 + np->cur_rx->skbuff->data, pkt_len); 1745 1745 #endif 1746 1746 pci_dma_sync_single_for_device(np->pci_dev, 1747 1747 np->cur_rx->buffer,
+6 -6
drivers/net/hamachi.c
··· 1149 1149 skb->dev = dev; /* Mark as being used by this device. */ 1150 1150 skb_reserve(skb, 2); /* 16 byte align the IP header. */ 1151 1151 hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, 1152 - skb->tail, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1152 + skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1153 1153 hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn | 1154 1154 DescEndPacket | DescIntr | (hmp->rx_buf_sz - 2)); 1155 1155 } ··· 1210 1210 skb->dev = dev; /* Mark as being used by this device. */ 1211 1211 skb_reserve(skb, 2); /* 16 byte align the IP header. */ 1212 1212 hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, 1213 - skb->tail, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1213 + skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1214 1214 /* -2 because it doesn't REALLY have that first 2 bytes -KDU */ 1215 1215 hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn | 1216 1216 DescEndPacket | DescIntr | (hmp->rx_buf_sz -2)); ··· 1509 1509 desc->addr, 1510 1510 hmp->rx_buf_sz, 1511 1511 PCI_DMA_FROMDEVICE); 1512 - buf_addr = (u8 *) hmp->rx_skbuff[entry]->tail; 1512 + buf_addr = (u8 *) hmp->rx_skbuff[entry]->data; 1513 1513 frame_status = le32_to_cpu(get_unaligned((s32*)&(buf_addr[data_size - 12]))); 1514 1514 if (hamachi_debug > 4) 1515 1515 printk(KERN_DEBUG " hamachi_rx() status was %8.8x.\n", ··· 1678 1678 skb->dev = dev; /* Mark as being used by this device. */ 1679 1679 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1680 1680 desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, 1681 - skb->tail, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1681 + skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1682 1682 } 1683 1683 desc->status_n_length = cpu_to_le32(hmp->rx_buf_sz); 1684 1684 if (entry >= RX_RING_SIZE-1) ··· 1772 1772 readl(ioaddr + RxCurPtr) == (long)&hmp->rx_ring[i] ? '>' : ' ', 1773 1773 i, hmp->rx_ring[i].status_n_length, hmp->rx_ring[i].addr); 1774 1774 if (hamachi_debug > 6) { 1775 - if (*(u8*)hmp->rx_skbuff[i]->tail != 0x69) { 1775 + if (*(u8*)hmp->rx_skbuff[i]->data != 0x69) { 1776 1776 u16 *addr = (u16 *) 1777 - hmp->rx_skbuff[i]->tail; 1777 + hmp->rx_skbuff[i]->data; 1778 1778 int j; 1779 1779 1780 1780 for (j = 0; j < 0x50; j++)
+1 -1
drivers/net/lance.c
··· 862 862 lp->rx_skbuff[i] = skb; 863 863 if (skb) { 864 864 skb->dev = dev; 865 - rx_buff = skb->tail; 865 + rx_buff = skb->data; 866 866 } else 867 867 rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp); 868 868 if (rx_buff == NULL)
+4 -4
drivers/net/lasi_82596.c
··· 553 553 if (skb == NULL) 554 554 panic("%s: alloc_skb() failed", __FILE__); 555 555 skb_reserve(skb, 2); 556 - dma_addr = dma_map_single(lp->dev, skb->tail,PKT_BUF_SZ, 556 + dma_addr = dma_map_single(lp->dev, skb->data,PKT_BUF_SZ, 557 557 DMA_FROM_DEVICE); 558 558 skb->dev = dev; 559 559 rbd->v_next = rbd+1; 560 560 rbd->b_next = WSWAPrbd(virt_to_dma(lp,rbd+1)); 561 561 rbd->b_addr = WSWAPrbd(virt_to_dma(lp,rbd)); 562 562 rbd->skb = skb; 563 - rbd->v_data = skb->tail; 563 + rbd->v_data = skb->data; 564 564 rbd->b_data = WSWAPchar(dma_addr); 565 565 rbd->size = PKT_BUF_SZ; 566 566 } ··· 783 783 rx_in_place = 1; 784 784 rbd->skb = newskb; 785 785 newskb->dev = dev; 786 - dma_addr = dma_map_single(lp->dev, newskb->tail, PKT_BUF_SZ, DMA_FROM_DEVICE); 787 - rbd->v_data = newskb->tail; 786 + dma_addr = dma_map_single(lp->dev, newskb->data, PKT_BUF_SZ, DMA_FROM_DEVICE); 787 + rbd->v_data = newskb->data; 788 788 rbd->b_data = WSWAPchar(dma_addr); 789 789 CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd)); 790 790 }
+2 -2
drivers/net/natsemi.c
··· 1926 1926 break; /* Better luck next round. */ 1927 1927 skb->dev = dev; /* Mark as being used by this device. */ 1928 1928 np->rx_dma[entry] = pci_map_single(np->pci_dev, 1929 - skb->tail, buflen, PCI_DMA_FROMDEVICE); 1929 + skb->data, buflen, PCI_DMA_FROMDEVICE); 1930 1930 np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]); 1931 1931 } 1932 1932 np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz); ··· 2280 2280 buflen, 2281 2281 PCI_DMA_FROMDEVICE); 2282 2282 eth_copy_and_sum(skb, 2283 - np->rx_skbuff[entry]->tail, pkt_len, 0); 2283 + np->rx_skbuff[entry]->data, pkt_len, 0); 2284 2284 skb_put(skb, pkt_len); 2285 2285 pci_dma_sync_single_for_device(np->pci_dev, 2286 2286 np->rx_dma[entry],
+2 -2
drivers/net/ns83820.c
··· 574 574 575 575 dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC; 576 576 cmdsts = REAL_RX_BUF_SIZE | CMDSTS_INTR; 577 - buf = pci_map_single(dev->pci_dev, skb->tail, 577 + buf = pci_map_single(dev->pci_dev, skb->data, 578 578 REAL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 579 579 build_rx_desc(dev, sg, 0, buf, cmdsts, 0); 580 580 /* update link of previous rx */ ··· 604 604 if (unlikely(!skb)) 605 605 break; 606 606 607 - res = (long)skb->tail & 0xf; 607 + res = (long)skb->data & 0xf; 608 608 res = 0x10 - res; 609 609 res &= 0xf; 610 610 skb_reserve(skb, res);
+3 -3
drivers/net/pcnet32.c
··· 1602 1602 1603 1603 rmb(); 1604 1604 if (lp->rx_dma_addr[i] == 0) 1605 - lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->tail, 1605 + lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->data, 1606 1606 PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE); 1607 1607 lp->rx_ring[i].base = (u32)le32_to_cpu(lp->rx_dma_addr[i]); 1608 1608 lp->rx_ring[i].buf_length = le16_to_cpu(2-PKT_BUF_SZ); ··· 1983 1983 lp->rx_skbuff[entry] = newskb; 1984 1984 newskb->dev = dev; 1985 1985 lp->rx_dma_addr[entry] = 1986 - pci_map_single(lp->pci_dev, newskb->tail, 1986 + pci_map_single(lp->pci_dev, newskb->data, 1987 1987 PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE); 1988 1988 lp->rx_ring[entry].base = le32_to_cpu(lp->rx_dma_addr[entry]); 1989 1989 rx_in_place = 1; ··· 2020 2020 PKT_BUF_SZ-2, 2021 2021 PCI_DMA_FROMDEVICE); 2022 2022 eth_copy_and_sum(skb, 2023 - (unsigned char *)(lp->rx_skbuff[entry]->tail), 2023 + (unsigned char *)(lp->rx_skbuff[entry]->data), 2024 2024 pkt_len,0); 2025 2025 pci_dma_sync_single_for_device(lp->pci_dev, 2026 2026 lp->rx_dma_addr[entry],
+2 -2
drivers/net/r8169.c
··· 1876 1876 skb_reserve(skb, NET_IP_ALIGN); 1877 1877 *sk_buff = skb; 1878 1878 1879 - mapping = pci_map_single(pdev, skb->tail, rx_buf_sz, 1879 + mapping = pci_map_single(pdev, skb->data, rx_buf_sz, 1880 1880 PCI_DMA_FROMDEVICE); 1881 1881 1882 1882 rtl8169_map_to_asic(desc, mapping, rx_buf_sz); ··· 2336 2336 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN); 2337 2337 if (skb) { 2338 2338 skb_reserve(skb, NET_IP_ALIGN); 2339 - eth_copy_and_sum(skb, sk_buff[0]->tail, pkt_size, 0); 2339 + eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0); 2340 2340 *sk_buff = skb; 2341 2341 rtl8169_mark_to_asic(desc, rx_buf_sz); 2342 2342 ret = 0;
+3 -5
drivers/net/s2io.c
··· 1699 1699 #else 1700 1700 ba = &nic->ba[ring_no][block_no][off]; 1701 1701 skb_reserve(skb, BUF0_LEN); 1702 - tmp = (unsigned long) skb->data; 1703 - tmp += ALIGN_SIZE; 1704 - tmp &= ~ALIGN_SIZE; 1705 - skb->data = (void *) tmp; 1706 - skb->tail = (void *) tmp; 1702 + tmp = ((unsigned long) skb->data & ALIGN_SIZE); 1703 + if (tmp) 1704 + skb_reserve(skb, (ALIGN_SIZE + 1) - tmp); 1707 1705 1708 1706 memset(rxdp, 0, sizeof(RxD_t)); 1709 1707 rxdp->Buffer2_ptr = pci_map_single
+2 -2
drivers/net/sb1250-mac.c
··· 963 963 /* 964 964 * Do not interrupt per DMA transfer. 965 965 */ 966 - dsc->dscr_a = virt_to_phys(sb_new->tail) | 966 + dsc->dscr_a = virt_to_phys(sb_new->data) | 967 967 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 968 968 0; 969 969 #else 970 - dsc->dscr_a = virt_to_phys(sb_new->tail) | 970 + dsc->dscr_a = virt_to_phys(sb_new->data) | 971 971 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 972 972 M_DMA_DSCRA_INTERRUPT; 973 973 #endif
+3 -3
drivers/net/sis900.c
··· 1154 1154 sis_priv->rx_skbuff[i] = skb; 1155 1155 sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE; 1156 1156 sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev, 1157 - skb->tail, RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 1157 + skb->data, RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 1158 1158 } 1159 1159 sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC); 1160 1160 ··· 1776 1776 sis_priv->rx_skbuff[entry] = skb; 1777 1777 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; 1778 1778 sis_priv->rx_ring[entry].bufptr = 1779 - pci_map_single(sis_priv->pci_dev, skb->tail, 1779 + pci_map_single(sis_priv->pci_dev, skb->data, 1780 1780 RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 1781 1781 sis_priv->dirty_rx++; 1782 1782 } ··· 1809 1809 sis_priv->rx_skbuff[entry] = skb; 1810 1810 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; 1811 1811 sis_priv->rx_ring[entry].bufptr = 1812 - pci_map_single(sis_priv->pci_dev, skb->tail, 1812 + pci_map_single(sis_priv->pci_dev, skb->data, 1813 1813 RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 1814 1814 } 1815 1815 }
+3 -3
drivers/net/starfire.c
··· 1286 1286 np->rx_info[i].skb = skb; 1287 1287 if (skb == NULL) 1288 1288 break; 1289 - np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1289 + np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1290 1290 skb->dev = dev; /* Mark as being used by this device. */ 1291 1291 /* Grrr, we cannot offset to correctly align the IP header. */ 1292 1292 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); ··· 1572 1572 pci_dma_sync_single_for_cpu(np->pci_dev, 1573 1573 np->rx_info[entry].mapping, 1574 1574 pkt_len, PCI_DMA_FROMDEVICE); 1575 - eth_copy_and_sum(skb, np->rx_info[entry].skb->tail, pkt_len, 0); 1575 + eth_copy_and_sum(skb, np->rx_info[entry].skb->data, pkt_len, 0); 1576 1576 pci_dma_sync_single_for_device(np->pci_dev, 1577 1577 np->rx_info[entry].mapping, 1578 1578 pkt_len, PCI_DMA_FROMDEVICE); ··· 1696 1696 if (skb == NULL) 1697 1697 break; /* Better luck next round. */ 1698 1698 np->rx_info[entry].mapping = 1699 - pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1699 + pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1700 1700 skb->dev = dev; /* Mark as being used by this device. */ 1701 1701 np->rx_ring[entry].rxaddr = 1702 1702 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
+3 -3
drivers/net/sundance.c
··· 1028 1028 skb->dev = dev; /* Mark as being used by this device. */ 1029 1029 skb_reserve(skb, 2); /* 16 byte align the IP header. */ 1030 1030 np->rx_ring[i].frag[0].addr = cpu_to_le32( 1031 - pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, 1031 + pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, 1032 1032 PCI_DMA_FROMDEVICE)); 1033 1033 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag); 1034 1034 } ··· 1341 1341 np->rx_buf_sz, 1342 1342 PCI_DMA_FROMDEVICE); 1343 1343 1344 - eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0); 1344 + eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0); 1345 1345 pci_dma_sync_single_for_device(np->pci_dev, 1346 1346 desc->frag[0].addr, 1347 1347 np->rx_buf_sz, ··· 1400 1400 skb->dev = dev; /* Mark as being used by this device. */ 1401 1401 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1402 1402 np->rx_ring[entry].frag[0].addr = cpu_to_le32( 1403 - pci_map_single(np->pci_dev, skb->tail, 1403 + pci_map_single(np->pci_dev, skb->data, 1404 1404 np->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1405 1405 } 1406 1406 /* Perhaps we need not reset this field. */
+3 -3
drivers/net/tulip/de2104x.c
··· 446 446 447 447 mapping = 448 448 de->rx_skb[rx_tail].mapping = 449 - pci_map_single(de->pdev, copy_skb->tail, 449 + pci_map_single(de->pdev, copy_skb->data, 450 450 buflen, PCI_DMA_FROMDEVICE); 451 451 de->rx_skb[rx_tail].skb = copy_skb; 452 452 } else { 453 453 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); 454 454 skb_reserve(copy_skb, RX_OFFSET); 455 - memcpy(skb_put(copy_skb, len), skb->tail, len); 455 + memcpy(skb_put(copy_skb, len), skb->data, len); 456 456 457 457 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); 458 458 ··· 1269 1269 skb->dev = de->dev; 1270 1270 1271 1271 de->rx_skb[i].mapping = pci_map_single(de->pdev, 1272 - skb->tail, de->rx_buf_sz, PCI_DMA_FROMDEVICE); 1272 + skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE); 1273 1273 de->rx_skb[i].skb = skb; 1274 1274 1275 1275 de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
+5 -5
drivers/net/tulip/dmfe.c
··· 945 945 946 946 /* Received Packet CRC check need or not */ 947 947 if ( (db->dm910x_chk_mode & 1) && 948 - (cal_CRC(skb->tail, rxlen, 1) != 949 - (*(u32 *) (skb->tail+rxlen) ))) { /* FIXME (?) */ 948 + (cal_CRC(skb->data, rxlen, 1) != 949 + (*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */ 950 950 /* Found a error received packet */ 951 951 dmfe_reuse_skb(db, rxptr->rx_skb_ptr); 952 952 db->dm910x_chk_mode = 3; ··· 959 959 /* size less than COPY_SIZE, allocate a rxlen SKB */ 960 960 skb->dev = dev; 961 961 skb_reserve(skb, 2); /* 16byte align */ 962 - memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->tail, rxlen); 962 + memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->data, rxlen); 963 963 dmfe_reuse_skb(db, rxptr->rx_skb_ptr); 964 964 } else { 965 965 skb->dev = dev; ··· 1252 1252 1253 1253 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) { 1254 1254 rxptr->rx_skb_ptr = skb; 1255 - rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); 1255 + rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); 1256 1256 wmb(); 1257 1257 rxptr->rdes0 = cpu_to_le32(0x80000000); 1258 1258 db->rx_avail_cnt++; ··· 1463 1463 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL ) 1464 1464 break; 1465 1465 rxptr->rx_skb_ptr = skb; /* FIXME (?) */ 1466 - rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); 1466 + rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); 1467 1467 wmb(); 1468 1468 rxptr->rdes0 = cpu_to_le32(0x80000000); 1469 1469 rxptr = rxptr->next_rx_desc;
+5 -5
drivers/net/tulip/interrupt.c
··· 78 78 if (skb == NULL) 79 79 break; 80 80 81 - mapping = pci_map_single(tp->pdev, skb->tail, PKT_BUF_SZ, 81 + mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ, 82 82 PCI_DMA_FROMDEVICE); 83 83 tp->rx_buffers[entry].mapping = mapping; 84 84 ··· 199 199 tp->rx_buffers[entry].mapping, 200 200 pkt_len, PCI_DMA_FROMDEVICE); 201 201 #if ! defined(__alpha__) 202 - eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail, 202 + eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data, 203 203 pkt_len, 0); 204 204 skb_put(skb, pkt_len); 205 205 #else 206 206 memcpy(skb_put(skb, pkt_len), 207 - tp->rx_buffers[entry].skb->tail, 207 + tp->rx_buffers[entry].skb->data, 208 208 pkt_len); 209 209 #endif 210 210 pci_dma_sync_single_for_device(tp->pdev, ··· 423 423 tp->rx_buffers[entry].mapping, 424 424 pkt_len, PCI_DMA_FROMDEVICE); 425 425 #if ! defined(__alpha__) 426 - eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail, 426 + eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data, 427 427 pkt_len, 0); 428 428 skb_put(skb, pkt_len); 429 429 #else 430 430 memcpy(skb_put(skb, pkt_len), 431 - tp->rx_buffers[entry].skb->tail, 431 + tp->rx_buffers[entry].skb->data, 432 432 pkt_len); 433 433 #endif 434 434 pci_dma_sync_single_for_device(tp->pdev,
+1 -1
drivers/net/tulip/tulip_core.c
··· 625 625 tp->rx_buffers[i].skb = skb; 626 626 if (skb == NULL) 627 627 break; 628 - mapping = pci_map_single(tp->pdev, skb->tail, 628 + mapping = pci_map_single(tp->pdev, skb->data, 629 629 PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 630 630 tp->rx_buffers[i].mapping = mapping; 631 631 skb->dev = dev; /* Mark as being used by this device. */
+3 -3
drivers/net/tulip/winbond-840.c
··· 849 849 if (skb == NULL) 850 850 break; 851 851 skb->dev = dev; /* Mark as being used by this device. */ 852 - np->rx_addr[i] = pci_map_single(np->pci_dev,skb->tail, 852 + np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data, 853 853 skb->len,PCI_DMA_FROMDEVICE); 854 854 855 855 np->rx_ring[i].buffer1 = np->rx_addr[i]; ··· 1269 1269 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry], 1270 1270 np->rx_skbuff[entry]->len, 1271 1271 PCI_DMA_FROMDEVICE); 1272 - eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0); 1272 + eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0); 1273 1273 skb_put(skb, pkt_len); 1274 1274 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry], 1275 1275 np->rx_skbuff[entry]->len, ··· 1315 1315 break; /* Better luck next round. */ 1316 1316 skb->dev = dev; /* Mark as being used by this device. */ 1317 1317 np->rx_addr[entry] = pci_map_single(np->pci_dev, 1318 - skb->tail, 1318 + skb->data, 1319 1319 skb->len, PCI_DMA_FROMDEVICE); 1320 1320 np->rx_ring[entry].buffer1 = np->rx_addr[entry]; 1321 1321 }
+2 -2
drivers/net/tulip/xircom_tulip_cb.c
··· 899 899 break; 900 900 skb->dev = dev; /* Mark as being used by this device. */ 901 901 tp->rx_ring[i].status = Rx0DescOwned; /* Owned by Xircom chip */ 902 - tp->rx_ring[i].buffer1 = virt_to_bus(skb->tail); 902 + tp->rx_ring[i].buffer1 = virt_to_bus(skb->data); 903 903 } 904 904 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); 905 905 ··· 1291 1291 if (skb == NULL) 1292 1292 break; 1293 1293 skb->dev = dev; /* Mark as being used by this device. */ 1294 - tp->rx_ring[entry].buffer1 = virt_to_bus(skb->tail); 1294 + tp->rx_ring[entry].buffer1 = virt_to_bus(skb->data); 1295 1295 work_done++; 1296 1296 } 1297 1297 tp->rx_ring[entry].status = Rx0DescOwned;
+2 -2
drivers/net/typhoon.c
··· 1661 1661 #endif 1662 1662 1663 1663 skb->dev = tp->dev; 1664 - dma_addr = pci_map_single(tp->pdev, skb->tail, 1664 + dma_addr = pci_map_single(tp->pdev, skb->data, 1665 1665 PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 1666 1666 1667 1667 /* Since no card does 64 bit DAC, the high bits will never ··· 1721 1721 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, 1722 1722 PKT_BUF_SZ, 1723 1723 PCI_DMA_FROMDEVICE); 1724 - eth_copy_and_sum(new_skb, skb->tail, pkt_len, 0); 1724 + eth_copy_and_sum(new_skb, skb->data, pkt_len, 0); 1725 1725 pci_dma_sync_single_for_device(tp->pdev, dma_addr, 1726 1726 PKT_BUF_SZ, 1727 1727 PCI_DMA_FROMDEVICE);
+3 -3
drivers/net/via-rhine.c
··· 990 990 skb->dev = dev; /* Mark as being used by this device. */ 991 991 992 992 rp->rx_skbuff_dma[i] = 993 - pci_map_single(rp->pdev, skb->tail, rp->rx_buf_sz, 993 + pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz, 994 994 PCI_DMA_FROMDEVICE); 995 995 996 996 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]); ··· 1518 1518 PCI_DMA_FROMDEVICE); 1519 1519 1520 1520 eth_copy_and_sum(skb, 1521 - rp->rx_skbuff[entry]->tail, 1521 + rp->rx_skbuff[entry]->data, 1522 1522 pkt_len, 0); 1523 1523 skb_put(skb, pkt_len); 1524 1524 pci_dma_sync_single_for_device(rp->pdev, ··· 1561 1561 break; /* Better luck next round. */ 1562 1562 skb->dev = dev; /* Mark as being used by this device. */ 1563 1563 rp->rx_skbuff_dma[entry] = 1564 - pci_map_single(rp->pdev, skb->tail, 1564 + pci_map_single(rp->pdev, skb->data, 1565 1565 rp->rx_buf_sz, 1566 1566 PCI_DMA_FROMDEVICE); 1567 1567 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
+3 -3
drivers/net/via-velocity.c
··· 1335 1335 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) 1336 1336 skb_reserve(new_skb, 2); 1337 1337 1338 - memcpy(new_skb->data, rx_skb[0]->tail, pkt_size); 1338 + memcpy(new_skb->data, rx_skb[0]->data, pkt_size); 1339 1339 *rx_skb = new_skb; 1340 1340 ret = 0; 1341 1341 } ··· 1456 1456 * Do the gymnastics to get the buffer head for data at 1457 1457 * 64byte alignment. 1458 1458 */ 1459 - skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->tail & 63); 1459 + skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63); 1460 1460 rd_info->skb->dev = vptr->dev; 1461 - rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->tail, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); 1461 + rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); 1462 1462 1463 1463 /* 1464 1464 * Fill in the descriptor to match
+1 -1
drivers/net/wan/hdlc_cisco.c
··· 72 72 } 73 73 skb_reserve(skb, 4); 74 74 cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0); 75 - data = (cisco_packet*)skb->tail; 75 + data = (cisco_packet*)skb->data; 76 76 77 77 data->type = htonl(type); 78 78 data->par1 = htonl(par1);
+4 -4
drivers/net/yellowfin.c
··· 786 786 skb->dev = dev; /* Mark as being used by this device. */ 787 787 skb_reserve(skb, 2); /* 16 byte align the IP header. */ 788 788 yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev, 789 - skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 789 + skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 790 790 } 791 791 yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP); 792 792 yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); ··· 1111 1111 pci_dma_sync_single_for_cpu(yp->pci_dev, desc->addr, 1112 1112 yp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1113 1113 desc_status = le32_to_cpu(desc->result_status) >> 16; 1114 - buf_addr = rx_skb->tail; 1114 + buf_addr = rx_skb->data; 1115 1115 data_size = (le32_to_cpu(desc->dbdma_cmd) - 1116 1116 le32_to_cpu(desc->result_status)) & 0xffff; 1117 1117 frame_status = le16_to_cpu(get_unaligned((s16*)&(buf_addr[data_size - 2]))); ··· 1185 1185 break; 1186 1186 skb->dev = dev; 1187 1187 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1188 - eth_copy_and_sum(skb, rx_skb->tail, pkt_len, 0); 1188 + eth_copy_and_sum(skb, rx_skb->data, pkt_len, 0); 1189 1189 skb_put(skb, pkt_len); 1190 1190 pci_dma_sync_single_for_device(yp->pci_dev, desc->addr, 1191 1191 yp->rx_buf_sz, ··· 1211 1211 skb->dev = dev; /* Mark as being used by this device. */ 1212 1212 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1213 1213 yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev, 1214 - skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1214 + skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1215 1215 } 1216 1216 yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP); 1217 1217 yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */