Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[NET]: Kill eth_copy_and_sum().

It hasn't "summed" anything in over 7 years, and it's
just a straight mempcy ala skb_copy_to_linear_data()
so just get rid of it.

Signed-off-by: David S. Miller <davem@davemloft.net>

+66 -73
+2 -2
arch/ppc/8260_io/enet.c
··· 477 477 } 478 478 else { 479 479 skb_put(skb,pkt_len-4); /* Make room */ 480 - eth_copy_and_sum(skb, 480 + skb_copy_to_linear_data(skb, 481 481 (unsigned char *)__va(bdp->cbd_bufaddr), 482 - pkt_len-4, 0); 482 + pkt_len-4); 483 483 skb->protocol=eth_type_trans(skb,dev); 484 484 netif_rx(skb); 485 485 }
+2 -2
arch/ppc/8260_io/fcc_enet.c
··· 734 734 } 735 735 else { 736 736 skb_put(skb,pkt_len); /* Make room */ 737 - eth_copy_and_sum(skb, 737 + skb_copy_to_linear_data(skb, 738 738 (unsigned char *)__va(bdp->cbd_bufaddr), 739 - pkt_len, 0); 739 + pkt_len); 740 740 skb->protocol=eth_type_trans(skb,dev); 741 741 netif_rx(skb); 742 742 }
+2 -2
arch/ppc/8xx_io/enet.c
··· 506 506 } 507 507 else { 508 508 skb_put(skb,pkt_len-4); /* Make room */ 509 - eth_copy_and_sum(skb, 509 + skb_copy_to_linear_data(skb, 510 510 cep->rx_vaddr[bdp - cep->rx_bd_base], 511 - pkt_len-4, 0); 511 + pkt_len-4); 512 512 skb->protocol=eth_type_trans(skb,dev); 513 513 netif_rx(skb); 514 514 }
+1 -1
arch/ppc/8xx_io/fec.c
··· 725 725 fep->stats.rx_dropped++; 726 726 } else { 727 727 skb_put(skb,pkt_len-4); /* Make room */ 728 - eth_copy_and_sum(skb, data, pkt_len-4, 0); 728 + skb_copy_to_linear_data(skb, data, pkt_len-4); 729 729 skb->protocol=eth_type_trans(skb,dev); 730 730 netif_rx(skb); 731 731 }
+1 -1
drivers/net/3c523.c
··· 990 990 if (skb != NULL) { 991 991 skb_reserve(skb, 2); /* 16 byte alignment */ 992 992 skb_put(skb,totlen); 993 - eth_copy_and_sum(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen,0); 993 + skb_copy_to_linear_data(skb, (char *) p->base+(unsigned long) rbd->buffer,totlen); 994 994 skb->protocol = eth_type_trans(skb, dev); 995 995 netif_rx(skb); 996 996 dev->last_rx = jiffies;
+2 -2
drivers/net/7990.c
··· 333 333 334 334 skb_reserve (skb, 2); /* 16 byte align */ 335 335 skb_put (skb, len); /* make room */ 336 - eth_copy_and_sum(skb, 336 + skb_copy_to_linear_data(skb, 337 337 (unsigned char *)&(ib->rx_buf [lp->rx_new][0]), 338 - len, 0); 338 + len); 339 339 skb->protocol = eth_type_trans (skb, dev); 340 340 netif_rx (skb); 341 341 dev->last_rx = jiffies;
+1 -1
drivers/net/8139too.c
··· 2017 2017 #if RX_BUF_IDX == 3 2018 2018 wrap_copy(skb, rx_ring, ring_offset+4, pkt_size); 2019 2019 #else 2020 - eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0); 2020 + skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size); 2021 2021 #endif 2022 2022 skb_put (skb, pkt_size); 2023 2023
+2 -2
drivers/net/a2065.c
··· 322 322 323 323 skb_reserve (skb, 2); /* 16 byte align */ 324 324 skb_put (skb, len); /* make room */ 325 - eth_copy_and_sum(skb, 325 + skb_copy_to_linear_data(skb, 326 326 (unsigned char *)&(ib->rx_buf [lp->rx_new][0]), 327 - len, 0); 327 + len); 328 328 skb->protocol = eth_type_trans (skb, dev); 329 329 netif_rx (skb); 330 330 dev->last_rx = jiffies;
+1 -1
drivers/net/ariadne.c
··· 746 746 747 747 skb_reserve(skb,2); /* 16 byte align */ 748 748 skb_put(skb,pkt_len); /* Make room */ 749 - eth_copy_and_sum(skb, (char *)priv->rx_buff[entry], pkt_len,0); 749 + skb_copy_to_linear_data(skb, (char *)priv->rx_buff[entry], pkt_len); 750 750 skb->protocol=eth_type_trans(skb,dev); 751 751 #if 0 752 752 printk(KERN_DEBUG "RX pkt type 0x%04x from ",
+1 -1
drivers/net/arm/ep93xx_eth.c
··· 258 258 skb_reserve(skb, 2); 259 259 dma_sync_single(NULL, ep->descs->rdesc[entry].buf_addr, 260 260 length, DMA_FROM_DEVICE); 261 - eth_copy_and_sum(skb, ep->rx_buf[entry], length, 0); 261 + skb_copy_to_linear_data(skb, ep->rx_buf[entry], length); 262 262 skb_put(skb, length); 263 263 skb->protocol = eth_type_trans(skb, dev); 264 264
+2 -2
drivers/net/au1000_eth.c
··· 1205 1205 continue; 1206 1206 } 1207 1207 skb_reserve(skb, 2); /* 16 byte IP header align */ 1208 - eth_copy_and_sum(skb, 1209 - (unsigned char *)pDB->vaddr, frmlen, 0); 1208 + skb_copy_to_linear_data(skb, 1209 + (unsigned char *)pDB->vaddr, frmlen); 1210 1210 skb_put(skb, frmlen); 1211 1211 skb->protocol = eth_type_trans(skb, dev); 1212 1212 netif_rx(skb); /* pass the packet to upper layers */
+2 -2
drivers/net/dl2k.c
··· 866 866 PCI_DMA_FROMDEVICE); 867 867 /* 16 byte align the IP header */ 868 868 skb_reserve (skb, 2); 869 - eth_copy_and_sum (skb, 869 + skb_copy_to_linear_data (skb, 870 870 np->rx_skbuff[entry]->data, 871 - pkt_len, 0); 871 + pkt_len); 872 872 skb_put (skb, pkt_len); 873 873 pci_dma_sync_single_for_device(np->pdev, 874 874 desc->fraginfo &
+1 -1
drivers/net/eepro100.c
··· 1801 1801 1802 1802 #if 1 || USE_IP_CSUM 1803 1803 /* Packet is in one chunk -- we can copy + cksum. */ 1804 - eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0); 1804 + skb_copy_to_linear_data(skb, sp->rx_skbuff[entry]->data, pkt_len); 1805 1805 skb_put(skb, pkt_len); 1806 1806 #else 1807 1807 skb_copy_from_linear_data(sp->rx_skbuff[entry],
+1 -1
drivers/net/epic100.c
··· 1201 1201 ep->rx_ring[entry].bufaddr, 1202 1202 ep->rx_buf_sz, 1203 1203 PCI_DMA_FROMDEVICE); 1204 - eth_copy_and_sum(skb, ep->rx_skbuff[entry]->data, pkt_len, 0); 1204 + skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len); 1205 1205 skb_put(skb, pkt_len); 1206 1206 pci_dma_sync_single_for_device(ep->pci_dev, 1207 1207 ep->rx_ring[entry].bufaddr,
+2 -2
drivers/net/fealnx.c
··· 1727 1727 /* Call copy + cksum if available. */ 1728 1728 1729 1729 #if ! defined(__alpha__) 1730 - eth_copy_and_sum(skb, 1731 - np->cur_rx->skbuff->data, pkt_len, 0); 1730 + skb_copy_to_linear_data(skb, 1731 + np->cur_rx->skbuff->data, pkt_len); 1732 1732 skb_put(skb, pkt_len); 1733 1733 #else 1734 1734 memcpy(skb_put(skb, pkt_len),
+1 -1
drivers/net/fec.c
··· 648 648 fep->stats.rx_dropped++; 649 649 } else { 650 650 skb_put(skb,pkt_len-4); /* Make room */ 651 - eth_copy_and_sum(skb, data, pkt_len-4, 0); 651 + skb_copy_to_linear_data(skb, data, pkt_len-4); 652 652 skb->protocol=eth_type_trans(skb,dev); 653 653 netif_rx(skb); 654 654 }
+2 -2
drivers/net/hamachi.c
··· 1575 1575 PCI_DMA_FROMDEVICE); 1576 1576 /* Call copy + cksum if available. */ 1577 1577 #if 1 || USE_IP_COPYSUM 1578 - eth_copy_and_sum(skb, 1579 - hmp->rx_skbuff[entry]->data, pkt_len, 0); 1578 + skb_copy_to_linear_data(skb, 1579 + hmp->rx_skbuff[entry]->data, pkt_len); 1580 1580 skb_put(skb, pkt_len); 1581 1581 #else 1582 1582 memcpy(skb_put(skb, pkt_len), hmp->rx_ring_dma
+1 -1
drivers/net/ixp2000/ixpdev.c
··· 111 111 skb = dev_alloc_skb(desc->pkt_length + 2); 112 112 if (likely(skb != NULL)) { 113 113 skb_reserve(skb, 2); 114 - eth_copy_and_sum(skb, buf, desc->pkt_length, 0); 114 + skb_copy_to_linear_data(skb, buf, desc->pkt_length); 115 115 skb_put(skb, desc->pkt_length); 116 116 skb->protocol = eth_type_trans(skb, nds[desc->channel]); 117 117
+2 -2
drivers/net/lance.c
··· 1186 1186 } 1187 1187 skb_reserve(skb,2); /* 16 byte align */ 1188 1188 skb_put(skb,pkt_len); /* Make room */ 1189 - eth_copy_and_sum(skb, 1189 + skb_copy_to_linear_data(skb, 1190 1190 (unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)), 1191 - pkt_len,0); 1191 + pkt_len); 1192 1192 skb->protocol=eth_type_trans(skb,dev); 1193 1193 netif_rx(skb); 1194 1194 dev->last_rx = jiffies;
+2 -2
drivers/net/natsemi.c
··· 2357 2357 np->rx_dma[entry], 2358 2358 buflen, 2359 2359 PCI_DMA_FROMDEVICE); 2360 - eth_copy_and_sum(skb, 2361 - np->rx_skbuff[entry]->data, pkt_len, 0); 2360 + skb_copy_to_linear_data(skb, 2361 + np->rx_skbuff[entry]->data, pkt_len); 2362 2362 skb_put(skb, pkt_len); 2363 2363 pci_dma_sync_single_for_device(np->pci_dev, 2364 2364 np->rx_dma[entry],
+1 -1
drivers/net/ni52.c
··· 936 936 { 937 937 skb_reserve(skb,2); 938 938 skb_put(skb,totlen); 939 - eth_copy_and_sum(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen,0); 939 + skb_copy_to_linear_data(skb,(char *) p->base+(unsigned long) rbd->buffer,totlen); 940 940 skb->protocol=eth_type_trans(skb,dev); 941 941 netif_rx(skb); 942 942 dev->last_rx = jiffies;
+2 -2
drivers/net/ni65.c
··· 1096 1096 #ifdef RCV_VIA_SKB 1097 1097 if( (unsigned long) (skb->data + R_BUF_SIZE) > 0x1000000) { 1098 1098 skb_put(skb,len); 1099 - eth_copy_and_sum(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len,0); 1099 + skb_copy_to_linear_data(skb, (unsigned char *)(p->recv_skb[p->rmdnum]->data),len); 1100 1100 } 1101 1101 else { 1102 1102 struct sk_buff *skb1 = p->recv_skb[p->rmdnum]; ··· 1108 1108 } 1109 1109 #else 1110 1110 skb_put(skb,len); 1111 - eth_copy_and_sum(skb, (unsigned char *) p->recvbounce[p->rmdnum],len,0); 1111 + skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len); 1112 1112 #endif 1113 1113 p->stats.rx_packets++; 1114 1114 p->stats.rx_bytes += len;
+1 -1
drivers/net/pci-skeleton.c
··· 1567 1567 if (skb) { 1568 1568 skb_reserve (skb, 2); /* 16 byte align the IP fields. */ 1569 1569 1570 - eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0); 1570 + skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size); 1571 1571 skb_put (skb, pkt_size); 1572 1572 1573 1573 skb->protocol = eth_type_trans (skb, dev);
+2 -2
drivers/net/pcnet32.c
··· 1235 1235 lp->rx_dma_addr[entry], 1236 1236 pkt_len, 1237 1237 PCI_DMA_FROMDEVICE); 1238 - eth_copy_and_sum(skb, 1238 + skb_copy_to_linear_data(skb, 1239 1239 (unsigned char *)(lp->rx_skbuff[entry]->data), 1240 - pkt_len, 0); 1240 + pkt_len); 1241 1241 pci_dma_sync_single_for_device(lp->pci_dev, 1242 1242 lp->rx_dma_addr[entry], 1243 1243 pkt_len,
+2 -2
drivers/net/saa9730.c
··· 690 690 lp->stats.rx_packets++; 691 691 skb_reserve(skb, 2); /* 16 byte align */ 692 692 skb_put(skb, len); /* make room */ 693 - eth_copy_and_sum(skb, 693 + skb_copy_to_linear_data(skb, 694 694 (unsigned char *) pData, 695 - len, 0); 695 + len); 696 696 skb->protocol = eth_type_trans(skb, dev); 697 697 netif_rx(skb); 698 698 dev->last_rx = jiffies;
+1 -1
drivers/net/sgiseeq.c
··· 320 320 skb_put(skb, len); 321 321 322 322 /* Copy out of kseg1 to avoid silly cache flush. */ 323 - eth_copy_and_sum(skb, pkt_pointer + 2, len, 0); 323 + skb_copy_to_linear_data(skb, pkt_pointer + 2, len); 324 324 skb->protocol = eth_type_trans(skb, dev); 325 325 326 326 /* We don't want to receive our own packets */
+1 -1
drivers/net/sis190.c
··· 548 548 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN); 549 549 if (skb) { 550 550 skb_reserve(skb, NET_IP_ALIGN); 551 - eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0); 551 + skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size); 552 552 *sk_buff = skb; 553 553 sis190_give_to_asic(desc, rx_buf_sz); 554 554 ret = 0;
+1 -1
drivers/net/starfire.c
··· 1456 1456 pci_dma_sync_single_for_cpu(np->pci_dev, 1457 1457 np->rx_info[entry].mapping, 1458 1458 pkt_len, PCI_DMA_FROMDEVICE); 1459 - eth_copy_and_sum(skb, np->rx_info[entry].skb->data, pkt_len, 0); 1459 + skb_copy_to_linear_data(skb, np->rx_info[entry].skb->data, pkt_len); 1460 1460 pci_dma_sync_single_for_device(np->pci_dev, 1461 1461 np->rx_info[entry].mapping, 1462 1462 pkt_len, PCI_DMA_FROMDEVICE);
+1 -1
drivers/net/sun3_82586.c
··· 777 777 { 778 778 skb_reserve(skb,2); 779 779 skb_put(skb,totlen); 780 - eth_copy_and_sum(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen,0); 780 + skb_copy_to_linear_data(skb,(char *) p->base+swab32((unsigned long) rbd->buffer),totlen); 781 781 skb->protocol=eth_type_trans(skb,dev); 782 782 netif_rx(skb); 783 783 p->stats.rx_packets++;
+2 -3
drivers/net/sun3lance.c
··· 853 853 854 854 skb_reserve( skb, 2 ); /* 16 byte align */ 855 855 skb_put( skb, pkt_len ); /* Make room */ 856 - // skb_copy_to_linear_data(skb, PKTBUF_ADDR(head), pkt_len); 857 - eth_copy_and_sum(skb, 856 + skb_copy_to_linear_data(skb, 858 857 PKTBUF_ADDR(head), 859 - pkt_len, 0); 858 + pkt_len); 860 859 861 860 skb->protocol = eth_type_trans( skb, dev ); 862 861 netif_rx( skb );
+1 -1
drivers/net/sunbmac.c
··· 860 860 sbus_dma_sync_single_for_cpu(bp->bigmac_sdev, 861 861 this->rx_addr, len, 862 862 SBUS_DMA_FROMDEVICE); 863 - eth_copy_and_sum(copy_skb, (unsigned char *)skb->data, len, 0); 863 + skb_copy_to_linear_data(copy_skb, (unsigned char *)skb->data, len); 864 864 sbus_dma_sync_single_for_device(bp->bigmac_sdev, 865 865 this->rx_addr, len, 866 866 SBUS_DMA_FROMDEVICE);
+1 -1
drivers/net/sundance.c
··· 1313 1313 np->rx_buf_sz, 1314 1314 PCI_DMA_FROMDEVICE); 1315 1315 1316 - eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0); 1316 + skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); 1317 1317 pci_dma_sync_single_for_device(np->pci_dev, 1318 1318 desc->frag[0].addr, 1319 1319 np->rx_buf_sz,
+2 -2
drivers/net/sunlance.c
··· 549 549 550 550 skb_reserve(skb, 2); /* 16 byte align */ 551 551 skb_put(skb, len); /* make room */ 552 - eth_copy_and_sum(skb, 552 + skb_copy_to_linear_data(skb, 553 553 (unsigned char *)&(ib->rx_buf [entry][0]), 554 - len, 0); 554 + len); 555 555 skb->protocol = eth_type_trans(skb, dev); 556 556 netif_rx(skb); 557 557 dev->last_rx = jiffies;
+2 -2
drivers/net/sunqe.c
··· 439 439 } else { 440 440 skb_reserve(skb, 2); 441 441 skb_put(skb, len); 442 - eth_copy_and_sum(skb, (unsigned char *) this_qbuf, 443 - len, 0); 442 + skb_copy_to_linear_data(skb, (unsigned char *) this_qbuf, 443 + len); 444 444 skb->protocol = eth_type_trans(skb, qep->dev); 445 445 netif_rx(skb); 446 446 qep->dev->last_rx = jiffies;
+4 -4
drivers/net/tulip/interrupt.c
··· 197 197 tp->rx_buffers[entry].mapping, 198 198 pkt_len, PCI_DMA_FROMDEVICE); 199 199 #if ! defined(__alpha__) 200 - eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data, 201 - pkt_len, 0); 200 + skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data, 201 + pkt_len); 202 202 skb_put(skb, pkt_len); 203 203 #else 204 204 memcpy(skb_put(skb, pkt_len), ··· 420 420 tp->rx_buffers[entry].mapping, 421 421 pkt_len, PCI_DMA_FROMDEVICE); 422 422 #if ! defined(__alpha__) 423 - eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data, 424 - pkt_len, 0); 423 + skb_copy_to_linear_data(skb, tp->rx_buffers[entry].skb->data, 424 + pkt_len); 425 425 skb_put(skb, pkt_len); 426 426 #else 427 427 memcpy(skb_put(skb, pkt_len),
+1 -1
drivers/net/tulip/winbond-840.c
··· 1232 1232 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry], 1233 1233 np->rx_skbuff[entry]->len, 1234 1234 PCI_DMA_FROMDEVICE); 1235 - eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0); 1235 + skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); 1236 1236 skb_put(skb, pkt_len); 1237 1237 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry], 1238 1238 np->rx_skbuff[entry]->len,
+1 -1
drivers/net/tulip/xircom_cb.c
··· 1208 1208 goto out; 1209 1209 } 1210 1210 skb_reserve(skb, 2); 1211 - eth_copy_and_sum(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len, 0); 1211 + skb_copy_to_linear_data(skb, (unsigned char*)&card->rx_buffer[bufferoffset / 4], pkt_len); 1212 1212 skb_put(skb, pkt_len); 1213 1213 skb->protocol = eth_type_trans(skb, dev); 1214 1214 netif_rx(skb);
+2 -2
drivers/net/tulip/xircom_tulip_cb.c
··· 1242 1242 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) { 1243 1243 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1244 1244 #if ! defined(__alpha__) 1245 - eth_copy_and_sum(skb, bus_to_virt(tp->rx_ring[entry].buffer1), 1246 - pkt_len, 0); 1245 + skb_copy_to_linear_data(skb, bus_to_virt(tp->rx_ring[entry].buffer1), 1246 + pkt_len); 1247 1247 skb_put(skb, pkt_len); 1248 1248 #else 1249 1249 memcpy(skb_put(skb, pkt_len),
+1 -1
drivers/net/typhoon.c
··· 1703 1703 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, 1704 1704 PKT_BUF_SZ, 1705 1705 PCI_DMA_FROMDEVICE); 1706 - eth_copy_and_sum(new_skb, skb->data, pkt_len, 0); 1706 + skb_copy_to_linear_data(new_skb, skb->data, pkt_len); 1707 1707 pci_dma_sync_single_for_device(tp->pdev, dma_addr, 1708 1708 PKT_BUF_SZ, 1709 1709 PCI_DMA_FROMDEVICE);
+1 -1
drivers/net/usb/catc.c
··· 255 255 if (!(skb = dev_alloc_skb(pkt_len))) 256 256 return; 257 257 258 - eth_copy_and_sum(skb, pkt_start + pkt_offset, pkt_len, 0); 258 + skb_copy_to_linear_data(skb, pkt_start + pkt_offset, pkt_len); 259 259 skb_put(skb, pkt_len); 260 260 261 261 skb->protocol = eth_type_trans(skb, catc->netdev);
+1 -1
drivers/net/usb/kaweth.c
··· 635 635 636 636 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 637 637 638 - eth_copy_and_sum(skb, kaweth->rx_buf + 2, pkt_len, 0); 638 + skb_copy_to_linear_data(skb, kaweth->rx_buf + 2, pkt_len); 639 639 640 640 skb_put(skb, pkt_len); 641 641
+2 -2
drivers/net/via-rhine.c
··· 1492 1492 rp->rx_buf_sz, 1493 1493 PCI_DMA_FROMDEVICE); 1494 1494 1495 - eth_copy_and_sum(skb, 1495 + skb_copy_to_linear_data(skb, 1496 1496 rp->rx_skbuff[entry]->data, 1497 - pkt_len, 0); 1497 + pkt_len); 1498 1498 skb_put(skb, pkt_len); 1499 1499 pci_dma_sync_single_for_device(rp->pdev, 1500 1500 rp->rx_skbuff_dma[entry],
+1 -1
drivers/net/wireless/wl3501_cs.c
··· 1011 1011 } else { 1012 1012 skb->dev = dev; 1013 1013 skb_reserve(skb, 2); /* IP headers on 16 bytes boundaries */ 1014 - eth_copy_and_sum(skb, (unsigned char *)&sig.daddr, 12, 0); 1014 + skb_copy_to_linear_data(skb, (unsigned char *)&sig.daddr, 12); 1015 1015 wl3501_receive(this, skb->data, pkt_len); 1016 1016 skb_put(skb, pkt_len); 1017 1017 skb->protocol = eth_type_trans(skb, dev);
+1 -1
drivers/net/yellowfin.c
··· 1137 1137 if (skb == NULL) 1138 1138 break; 1139 1139 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1140 - eth_copy_and_sum(skb, rx_skb->data, pkt_len, 0); 1140 + skb_copy_to_linear_data(skb, rx_skb->data, pkt_len); 1141 1141 skb_put(skb, pkt_len); 1142 1142 pci_dma_sync_single_for_device(yp->pci_dev, desc->addr, 1143 1143 yp->rx_buf_sz,
-6
include/linux/etherdevice.h
··· 40 40 struct hh_cache *hh); 41 41 42 42 extern struct net_device *alloc_etherdev(int sizeof_priv); 43 - static inline void eth_copy_and_sum (struct sk_buff *dest, 44 - const unsigned char *src, 45 - int len, int base) 46 - { 47 - memcpy (dest->data, src, len); 48 - } 49 43 50 44 /** 51 45 * is_zero_ether_addr - Determine if give Ethernet address is all zeros.