Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[SK_BUFF]: Introduce skb_reset_transport_header(skb)

For the common, open coded 'skb->h.raw = skb->data' operation, so that we can
later turn skb->h.raw into a offset, reducing the size of struct sk_buff in
64bit land while possibly keeping it as a pointer on 32bit.

This one touches just the most simple cases:

skb->h.raw = skb->data;
skb->h.raw = {skb_push|[__]skb_pull}()

The next ones will handle the slightly more "complex" cases.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Arnaldo Carvalho de Melo and committed by
David S. Miller
badff6d0 0660e03f

+82 -73
+3 -3
drivers/infiniband/hw/cxgb3/iwch_cm.c
··· 507 507 */ 508 508 skb_get(skb); 509 509 set_arp_failure_handler(skb, arp_failure_discard); 510 - skb->h.raw = skb->data; 510 + skb_reset_transport_header(skb); 511 511 len = skb->len; 512 512 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); 513 513 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); ··· 559 559 skb_get(skb); 560 560 skb->priority = CPL_PRIORITY_DATA; 561 561 set_arp_failure_handler(skb, arp_failure_discard); 562 - skb->h.raw = skb->data; 562 + skb_reset_transport_header(skb); 563 563 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); 564 564 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)); 565 565 req->wr_lo = htonl(V_WR_TID(ep->hwtid)); ··· 610 610 */ 611 611 skb_get(skb); 612 612 set_arp_failure_handler(skb, arp_failure_discard); 613 - skb->h.raw = skb->data; 613 + skb_reset_transport_header(skb); 614 614 len = skb->len; 615 615 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); 616 616 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA));
+1 -1
drivers/net/appletalk/cops.c
··· 855 855 856 856 skb_reset_mac_header(skb); /* Point to entire packet. */ 857 857 skb_pull(skb,3); 858 - skb->h.raw = skb->data; /* Point to data (Skip header). */ 858 + skb_reset_transport_header(skb); /* Point to data (Skip header). */ 859 859 860 860 /* Update the counters. */ 861 861 lp->stats.rx_packets++;
+2 -2
drivers/net/appletalk/ltpc.c
··· 776 776 /* copy ddp(s,e)hdr + contents */ 777 777 memcpy(skb->data,(void*)ltdmabuf,len); 778 778 779 - skb->h.raw = skb->data; 779 + skb_reset_transport_header(skb); 780 780 781 781 stats->rx_packets++; 782 782 stats->rx_bytes+=skb->len; ··· 923 923 cbuf.laptype = skb->data[2]; 924 924 skb_pull(skb,3); /* skip past LLAP header */ 925 925 cbuf.length = skb->len; /* this is host order */ 926 - skb->h.raw=skb->data; 926 + skb_reset_transport_header(skb); 927 927 928 928 if(debug & DEBUG_UPPER) { 929 929 printk("command ");
+1 -1
drivers/net/cxgb3/sge.c
··· 1622 1622 rq->offload_pkts++; 1623 1623 skb_reset_mac_header(skb); 1624 1624 skb_reset_network_header(skb); 1625 - skb->h.raw = skb->data; 1625 + skb_reset_transport_header(skb); 1626 1626 1627 1627 if (rq->polling) { 1628 1628 rx_gather[gather_idx++] = skb;
+3 -3
include/linux/dccp.h
··· 265 265 266 266 static inline struct dccp_hdr *dccp_zeroed_hdr(struct sk_buff *skb, int headlen) 267 267 { 268 - skb->h.raw = skb_push(skb, headlen); 269 - memset(skb->h.raw, 0, headlen); 270 - return dccp_hdr(skb); 268 + skb_push(skb, headlen); 269 + skb_reset_transport_header(skb); 270 + return memset(skb->h.raw, 0, headlen); 271 271 } 272 272 273 273 static inline struct dccp_hdr_ext *dccp_hdrx(const struct sk_buff *skb)
+5
include/linux/skbuff.h
··· 957 957 skb->tail += len; 958 958 } 959 959 960 + static inline void skb_reset_transport_header(struct sk_buff *skb) 961 + { 962 + skb->h.raw = skb->data; 963 + } 964 + 960 965 static inline unsigned char *skb_network_header(const struct sk_buff *skb) 961 966 { 962 967 return skb->nh.raw;
+3 -3
net/appletalk/aarp.c
··· 119 119 /* Set up the buffer */ 120 120 skb_reserve(skb, dev->hard_header_len + aarp_dl->header_length); 121 121 skb_reset_network_header(skb); 122 - skb->h.raw = skb->data; 122 + skb_reset_transport_header(skb); 123 123 skb_put(skb, sizeof(*eah)); 124 124 skb->protocol = htons(ETH_P_ATALK); 125 125 skb->dev = dev; ··· 166 166 /* Set up the buffer */ 167 167 skb_reserve(skb, dev->hard_header_len + aarp_dl->header_length); 168 168 skb_reset_network_header(skb); 169 - skb->h.raw = skb->data; 169 + skb_reset_transport_header(skb); 170 170 skb_put(skb, sizeof(*eah)); 171 171 skb->protocol = htons(ETH_P_ATALK); 172 172 skb->dev = dev; ··· 217 217 /* Set up the buffer */ 218 218 skb_reserve(skb, dev->hard_header_len + aarp_dl->header_length); 219 219 skb_reset_network_header(skb); 220 - skb->h.raw = skb->data; 220 + skb_reset_transport_header(skb); 221 221 skb_put(skb, sizeof(*eah)); 222 222 skb->protocol = htons(ETH_P_ATALK); 223 223 skb->dev = dev;
+2 -2
net/appletalk/ddp.c
··· 1275 1275 skb->protocol = htons(ETH_P_IP); 1276 1276 skb_pull(skb, 13); 1277 1277 skb->dev = dev; 1278 - skb->h.raw = skb->data; 1278 + skb_reset_transport_header(skb); 1279 1279 1280 1280 stats = dev->priv; 1281 1281 stats->rx_packets++; ··· 1522 1522 /* Non routable, so force a drop if we slip up later */ 1523 1523 ddp->deh_len_hops = htons(skb->len + (DDP_MAXHOPS << 10)); 1524 1524 } 1525 - skb->h.raw = skb->data; 1525 + skb_reset_transport_header(skb); 1526 1526 1527 1527 return atalk_rcv(skb, dev, pt, orig_dev); 1528 1528 freeit:
+2 -2
net/ax25/af_ax25.c
··· 1631 1631 if (!ax25_sk(sk)->pidincl) 1632 1632 skb_pull(skb, 1); /* Remove PID */ 1633 1633 1634 - skb->h.raw = skb->data; 1635 - copied = skb->len; 1634 + skb_reset_transport_header(skb); 1635 + copied = skb->len; 1636 1636 1637 1637 if (copied > size) { 1638 1638 copied = size;
+4 -4
net/ax25/ax25_in.c
··· 62 62 63 63 skbn->dev = ax25->ax25_dev->dev; 64 64 skb_reset_network_header(skbn); 65 - skbn->h.raw = skbn->data; 65 + skb_reset_transport_header(skbn); 66 66 67 67 /* Copy data from the fragments */ 68 68 while ((skbo = skb_dequeue(&ax25->frag_queue)) != NULL) { ··· 196 196 * Process the AX.25/LAPB frame. 197 197 */ 198 198 199 - skb->h.raw = skb->data; 199 + skb_reset_transport_header(skb); 200 200 201 201 if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) { 202 202 kfree_skb(skb); ··· 246 246 switch (skb->data[1]) { 247 247 case AX25_P_IP: 248 248 skb_pull(skb,2); /* drop PID/CTRL */ 249 - skb->h.raw = skb->data; 249 + skb_reset_transport_header(skb); 250 250 skb_reset_network_header(skb); 251 251 skb->dev = dev; 252 252 skb->pkt_type = PACKET_HOST; ··· 256 256 257 257 case AX25_P_ARP: 258 258 skb_pull(skb,2); 259 - skb->h.raw = skb->data; 259 + skb_reset_transport_header(skb); 260 260 skb_reset_network_header(skb); 261 261 skb->dev = dev; 262 262 skb->pkt_type = PACKET_HOST;
+1 -1
net/bluetooth/af_bluetooth.c
··· 221 221 copied = len; 222 222 } 223 223 224 - skb->h.raw = skb->data; 224 + skb_reset_transport_header(skb); 225 225 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 226 226 227 227 skb_free_datagram(sk, skb);
+5 -4
net/bluetooth/hci_core.c
··· 1074 1074 struct hci_acl_hdr *hdr; 1075 1075 int len = skb->len; 1076 1076 1077 - hdr = (struct hci_acl_hdr *) skb_push(skb, HCI_ACL_HDR_SIZE); 1077 + skb_push(skb, HCI_ACL_HDR_SIZE); 1078 + skb_reset_transport_header(skb); 1079 + hdr = (struct hci_acl_hdr *)skb->h.raw; 1078 1080 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); 1079 1081 hdr->dlen = cpu_to_le16(len); 1080 - 1081 - skb->h.raw = (void *) hdr; 1082 1082 } 1083 1083 1084 1084 int hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags) ··· 1143 1143 hdr.handle = cpu_to_le16(conn->handle); 1144 1144 hdr.dlen = skb->len; 1145 1145 1146 - skb->h.raw = skb_push(skb, HCI_SCO_HDR_SIZE); 1146 + skb_push(skb, HCI_SCO_HDR_SIZE); 1147 + skb_reset_transport_header(skb); 1147 1148 memcpy(skb->h.raw, &hdr, HCI_SCO_HDR_SIZE); 1148 1149 1149 1150 skb->dev = (void *) hdev;
+1 -1
net/bluetooth/hci_sock.c
··· 375 375 copied = len; 376 376 } 377 377 378 - skb->h.raw = skb->data; 378 + skb_reset_transport_header(skb); 379 379 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 380 380 381 381 hci_sock_cmsg(sk, msg, skb);
+1 -1
net/core/dev.c
··· 1773 1773 __get_cpu_var(netdev_rx_stat).total++; 1774 1774 1775 1775 skb_reset_network_header(skb); 1776 - skb->h.raw = skb->data; 1776 + skb_reset_transport_header(skb); 1777 1777 skb->mac_len = skb->nh.raw - skb->mac.raw; 1778 1778 1779 1779 pt_prev = NULL;
+1 -1
net/core/netpoll.c
··· 362 362 return; 363 363 364 364 skb_reset_network_header(skb); 365 - skb->h.raw = skb->data; 365 + skb_reset_transport_header(skb); 366 366 arp = arp_hdr(skb); 367 367 368 368 if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
+1 -1
net/decnet/dn_nsp_in.c
··· 725 725 if (!pskb_may_pull(skb, 2)) 726 726 goto free_out; 727 727 728 - skb->h.raw = skb->data; 728 + skb_reset_transport_header(skb); 729 729 cb->nsp_flags = *ptr++; 730 730 731 731 if (decnet_debug_level & 2)
+1 -1
net/decnet/dn_nsp_out.c
··· 79 79 struct dst_entry *dst; 80 80 struct flowi fl; 81 81 82 - skb->h.raw = skb->data; 82 + skb_reset_transport_header(skb); 83 83 scp->stamp = jiffies; 84 84 85 85 dst = sk_dst_check(sk, 0);
+2 -2
net/decnet/dn_route.c
··· 504 504 goto drop_it; 505 505 506 506 skb_pull(skb, 20); 507 - skb->h.raw = skb->data; 507 + skb_reset_transport_header(skb); 508 508 509 509 /* Destination info */ 510 510 ptr += 2; ··· 542 542 goto drop_it; 543 543 544 544 skb_pull(skb, 5); 545 - skb->h.raw = skb->data; 545 + skb_reset_transport_header(skb); 546 546 547 547 cb->dst = *(__le16 *)ptr; 548 548 ptr += 2;
+4 -2
net/ipv4/af_inet.c
··· 1120 1120 if (unlikely(!pskb_may_pull(skb, ihl))) 1121 1121 goto out; 1122 1122 1123 - skb->h.raw = __skb_pull(skb, ihl); 1123 + __skb_pull(skb, ihl); 1124 + skb_reset_transport_header(skb); 1124 1125 iph = ip_hdr(skb); 1125 1126 proto = iph->protocol & (MAX_INET_PROTOS - 1); 1126 1127 err = -EPROTONOSUPPORT; ··· 1164 1163 if (unlikely(!pskb_may_pull(skb, ihl))) 1165 1164 goto out; 1166 1165 1167 - skb->h.raw = __skb_pull(skb, ihl); 1166 + __skb_pull(skb, ihl); 1167 + skb_reset_transport_header(skb); 1168 1168 iph = ip_hdr(skb); 1169 1169 id = ntohs(iph->id); 1170 1170 proto = iph->protocol & (MAX_INET_PROTOS - 1);
+2 -1
net/ipv4/ah4.c
··· 182 182 } 183 183 ((struct iphdr*)work_buf)->protocol = ah->nexthdr; 184 184 skb->nh.raw += ah_hlen; 185 - skb->h.raw = memcpy(skb_network_header(skb), work_buf, ihl); 185 + memcpy(skb_network_header(skb), work_buf, ihl); 186 + skb->h.raw = skb->nh.raw; 186 187 __skb_pull(skb, ah_hlen + ihl); 187 188 188 189 return 0;
+1 -1
net/ipv4/ip_input.c
··· 201 201 __skb_pull(skb, ip_hdrlen(skb)); 202 202 203 203 /* Point into the IP datagram, just past the header. */ 204 - skb->h.raw = skb->data; 204 + skb_reset_transport_header(skb); 205 205 206 206 rcu_read_lock(); 207 207 {
+1 -1
net/ipv4/ip_output.c
··· 500 500 * before previous one went down. */ 501 501 if (frag) { 502 502 frag->ip_summed = CHECKSUM_NONE; 503 - frag->h.raw = frag->data; 503 + skb_reset_transport_header(frag); 504 504 __skb_push(frag, hlen); 505 505 skb_reset_network_header(frag); 506 506 memcpy(skb_network_header(frag), iph, hlen);
+1 -1
net/ipv4/ipmr.c
··· 563 563 */ 564 564 skb_push(skb, sizeof(struct iphdr)); 565 565 skb_reset_network_header(skb); 566 - skb->h.raw = skb->data; 566 + skb_reset_transport_header(skb); 567 567 msg = (struct igmpmsg *)skb_network_header(skb); 568 568 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); 569 569 msg->im_msgtype = IGMPMSG_WHOLEPKT;
+2 -1
net/ipv4/udp.c
··· 1002 1002 * transport header to point to ESP. Keep UDP on the stack 1003 1003 * for later. 1004 1004 */ 1005 - skb->h.raw = __skb_pull(skb, len); 1005 + __skb_pull(skb, len); 1006 + skb_reset_transport_header(skb); 1006 1007 1007 1008 /* modify the protocol (it's ESP!) */ 1008 1009 iph->protocol = IPPROTO_ESP;
+1 -1
net/ipv4/xfrm4_mode_transport.c
··· 52 52 skb->nh.raw = skb->h.raw; 53 53 } 54 54 ip_hdr(skb)->tot_len = htons(skb->len + ihl); 55 - skb->h.raw = skb->data; 55 + skb_reset_transport_header(skb); 56 56 return 0; 57 57 } 58 58
+1 -1
net/ipv6/ip6_input.c
··· 101 101 if (hdr->version != 6) 102 102 goto err; 103 103 104 - skb->h.raw = (u8 *)(hdr + 1); 104 + skb->h.raw = skb->nh.raw + sizeof(*hdr); 105 105 IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); 106 106 107 107 pkt_len = ntohs(hdr->payload_len);
+4 -4
net/ipv6/ip6_output.c
··· 654 654 * before previous one went down. */ 655 655 if (frag) { 656 656 frag->ip_summed = CHECKSUM_NONE; 657 - frag->h.raw = frag->data; 657 + skb_reset_transport_header(frag); 658 658 fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr)); 659 659 __skb_push(frag, hlen); 660 660 skb_reset_network_header(frag); ··· 747 747 skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev)); 748 748 skb_put(frag, len + hlen + sizeof(struct frag_hdr)); 749 749 skb_reset_network_header(frag); 750 - fh = (struct frag_hdr*)(frag->data + hlen); 751 - frag->h.raw = frag->data + hlen + sizeof(struct frag_hdr); 750 + fh = (struct frag_hdr *)(skb_network_header(frag) + hlen); 751 + frag->h.raw = frag->nh.raw + hlen + sizeof(struct frag_hdr); 752 752 753 753 /* 754 754 * Charge the memory for the fragment to any owner ··· 991 991 skb_reset_network_header(skb); 992 992 993 993 /* initialize protocol header pointer */ 994 - skb->h.raw = skb->data + fragheaderlen; 994 + skb->h.raw = skb->nh.raw + fragheaderlen; 995 995 996 996 skb->ip_summed = CHECKSUM_PARTIAL; 997 997 skb->csum = 0;
+2 -2
net/ipv6/ipv6_sockglue.c
··· 108 108 rcu_read_lock(); 109 109 ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); 110 110 if (likely(ops && ops->gso_send_check)) { 111 - skb->h.raw = skb->data; 111 + skb_reset_transport_header(skb); 112 112 err = ops->gso_send_check(skb); 113 113 } 114 114 rcu_read_unlock(); ··· 144 144 rcu_read_lock(); 145 145 ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); 146 146 if (likely(ops && ops->gso_segment)) { 147 - skb->h.raw = skb->data; 147 + skb_reset_transport_header(skb); 148 148 segs = ops->gso_segment(skb, features); 149 149 } 150 150 rcu_read_unlock();
+1 -1
net/ipv6/netfilter/nf_conntrack_reasm.c
··· 634 634 head->nh.raw += sizeof(struct frag_hdr); 635 635 636 636 skb_shinfo(head)->frag_list = head->next; 637 - head->h.raw = head->data; 637 + skb_reset_transport_header(head); 638 638 skb_push(head, head->data - skb_network_header(head)); 639 639 atomic_sub(head->truesize, &nf_ct_frag6_mem); 640 640
+1 -1
net/ipv6/reassembly.c
··· 653 653 head->nh.raw += sizeof(struct frag_hdr); 654 654 655 655 skb_shinfo(head)->frag_list = head->next; 656 - head->h.raw = head->data; 656 + skb_reset_transport_header(head); 657 657 skb_push(head, head->data - skb_network_header(head)); 658 658 atomic_sub(head->truesize, &ip6_frag_mem); 659 659
+1 -1
net/ipv6/xfrm6_mode_transport.c
··· 59 59 } 60 60 ipv6_hdr(skb)->payload_len = htons(skb->len + ihl - 61 61 sizeof(struct ipv6hdr)); 62 - skb->h.raw = skb->data; 62 + skb_reset_transport_header(skb); 63 63 return 0; 64 64 } 65 65
+1 -1
net/ipx/af_ipx.c
··· 577 577 if (skb2) { 578 578 skb_reserve(skb2, out_offset); 579 579 skb_reset_network_header(skb2); 580 - skb2->h.raw = skb2->data; 580 + skb_reset_transport_header(skb2); 581 581 skb_put(skb2, skb->len); 582 582 memcpy(ipx_hdr(skb2), ipx_hdr(skb), skb->len); 583 583 memcpy(skb2->cb, skb->cb, sizeof(skb->cb));
+1 -1
net/ipx/ipx_route.c
··· 204 204 205 205 /* Fill in IPX header */ 206 206 skb_reset_network_header(skb); 207 - skb->h.raw = skb->data; 207 + skb_reset_transport_header(skb); 208 208 skb_put(skb, sizeof(struct ipxhdr)); 209 209 ipx = ipx_hdr(skb); 210 210 ipx->ipx_pktsize = htons(len + sizeof(struct ipxhdr));
+2 -2
net/irda/af_irda.c
··· 1363 1363 if (!skb) 1364 1364 return err; 1365 1365 1366 - skb->h.raw = skb->data; 1367 - copied = skb->len; 1366 + skb_reset_transport_header(skb); 1367 + copied = skb->len; 1368 1368 1369 1369 if (copied > size) { 1370 1370 IRDA_DEBUG(2, "%s(), Received truncated frame (%zd < %zd)!\n",
+1 -1
net/irda/irlap_frame.c
··· 95 95 skb->dev = self->netdev; 96 96 skb_reset_mac_header(skb); 97 97 skb_reset_network_header(skb); 98 - skb->h.raw = skb->data; 98 + skb_reset_transport_header(skb); 99 99 skb->protocol = htons(ETH_P_IRDA); 100 100 skb->priority = TC_PRIO_BESTEFFORT; 101 101
+1 -1
net/iucv/af_iucv.c
··· 953 953 return; 954 954 } 955 955 956 - skb->h.raw = skb->data; 956 + skb_reset_transport_header(skb); 957 957 skb_reset_network_header(skb); 958 958 skb->len = msg->length; 959 959 }
+1 -1
net/key/af_key.c
··· 3667 3667 copied = len; 3668 3668 } 3669 3669 3670 - skb->h.raw = skb->data; 3670 + skb_reset_transport_header(skb); 3671 3671 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 3672 3672 if (err) 3673 3673 goto out_free;
+1 -1
net/llc/llc_sap.c
··· 39 39 skb_reset_mac_header(skb); 40 40 skb_reserve(skb, 50); 41 41 skb_reset_network_header(skb); 42 - skb->h.raw = skb->data; 42 + skb_reset_transport_header(skb); 43 43 skb->protocol = htons(ETH_P_802_2); 44 44 skb->dev = dev; 45 45 if (sk != NULL)
+1 -1
net/netlink/af_netlink.c
··· 1215 1215 copied = len; 1216 1216 } 1217 1217 1218 - skb->h.raw = skb->data; 1218 + skb_reset_transport_header(skb); 1219 1219 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); 1220 1220 1221 1221 if (msg->msg_name) {
+3 -3
net/netrom/af_netrom.c
··· 878 878 if (frametype == NR_PROTOEXT && 879 879 circuit_index == NR_PROTO_IP && circuit_id == NR_PROTO_IP) { 880 880 skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN); 881 - skb->h.raw = skb->data; 881 + skb_reset_transport_header(skb); 882 882 883 883 return nr_rx_ip(skb, dev); 884 884 } ··· 904 904 } 905 905 906 906 if (sk != NULL) { 907 - skb->h.raw = skb->data; 907 + skb_reset_transport_header(skb); 908 908 909 909 if (frametype == NR_CONNACK && skb->len == 22) 910 910 nr_sk(sk)->bpqext = 1; ··· 1149 1149 return er; 1150 1150 } 1151 1151 1152 - skb->h.raw = skb->data; 1152 + skb_reset_transport_header(skb); 1153 1153 copied = skb->len; 1154 1154 1155 1155 if (copied > size) {
+1 -1
net/netrom/nr_in.c
··· 51 51 if ((skbn = alloc_skb(nr->fraglen, GFP_ATOMIC)) == NULL) 52 52 return 1; 53 53 54 - skbn->h.raw = skbn->data; 54 + skb_reset_transport_header(skbn); 55 55 56 56 while ((skbo = skb_dequeue(&nr->frag_queue)) != NULL) { 57 57 memcpy(skb_put(skbn, skbo->len), skbo->data, skbo->len);
+1 -1
net/netrom/nr_loopback.c
··· 35 35 36 36 if ((skbn = alloc_skb(skb->len, GFP_ATOMIC)) != NULL) { 37 37 memcpy(skb_put(skbn, skb->len), skb->data, skb->len); 38 - skbn->h.raw = skbn->data; 38 + skb_reset_transport_header(skbn); 39 39 40 40 skb_queue_tail(&loopback_queue, skbn); 41 41
+1 -1
net/rose/af_rose.c
··· 1234 1234 *asmptr = qbit; 1235 1235 } 1236 1236 1237 - skb->h.raw = skb->data; 1237 + skb_reset_transport_header(skb); 1238 1238 copied = skb->len; 1239 1239 1240 1240 if (copied > size) {
+1 -1
net/rose/rose_loopback.c
··· 77 77 dest = (rose_address *)(skb->data + 4); 78 78 lci_o = 0xFFF - lci_i; 79 79 80 - skb->h.raw = skb->data; 80 + skb_reset_transport_header(skb); 81 81 82 82 sk = rose_find_socket(lci_o, &rose_loopback_neigh); 83 83 if (sk) {
+1 -1
net/rose/rose_route.c
··· 906 906 } 907 907 } 908 908 else { 909 - skb->h.raw = skb->data; 909 + skb_reset_transport_header(skb); 910 910 res = rose_process_rx_frame(sk, skb); 911 911 goto out; 912 912 }
+1 -1
net/unix/af_unix.c
··· 1319 1319 unix_attach_fds(siocb->scm, skb); 1320 1320 unix_get_secdata(siocb->scm, skb); 1321 1321 1322 - skb->h.raw = skb->data; 1322 + skb_reset_transport_header(skb); 1323 1323 err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len); 1324 1324 if (err) 1325 1325 goto out_free;
+1 -2
net/x25/af_x25.c
··· 1210 1210 } 1211 1211 } 1212 1212 1213 - skb->h.raw = skb->data; 1214 - 1213 + skb_reset_transport_header(skb); 1215 1214 copied = skb->len; 1216 1215 1217 1216 if (copied > size) {
+1 -1
net/x25/x25_dev.c
··· 48 48 if ((sk = x25_find_socket(lci, nb)) != NULL) { 49 49 int queued = 1; 50 50 51 - skb->h.raw = skb->data; 51 + skb_reset_transport_header(skb); 52 52 bh_lock_sock(sk); 53 53 if (!sock_owned_by_user(sk)) { 54 54 queued = x25_process_rx_frame(sk, skb);
+1 -1
net/x25/x25_in.c
··· 53 53 54 54 skb_queue_tail(&x25->fragment_queue, skb); 55 55 56 - skbn->h.raw = skbn->data; 56 + skb_reset_transport_header(skbn); 57 57 58 58 skbo = skb_dequeue(&x25->fragment_queue); 59 59 memcpy(skb_put(skbn, skbo->len), skbo->data, skbo->len);