Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[SK_BUFF]: Introduce skb_network_header()

For the places where we need a pointer to the network header, it is still legal
to touch skb->nh.raw directly if just adding to, subtracting from or setting it
to another layer header.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Arnaldo Carvalho de Melo and committed by
David S. Miller
d56f90a7 bbe735e4

+258 -185
+3 -2
drivers/isdn/i4l/isdn_net.c
··· 872 872 static void 873 873 isdn_net_log_skb(struct sk_buff * skb, isdn_net_local * lp) 874 874 { 875 - u_char *p = skb->nh.raw; /* hopefully, this was set correctly */ 875 + /* hopefully, this was set correctly */ 876 + const u_char *p = skb_network_header(skb); 876 877 unsigned short proto = ntohs(skb->protocol); 877 878 int data_ofs; 878 879 ip_ports *ipp; ··· 881 880 882 881 addinfo[0] = '\0'; 883 882 /* This check stolen from 2.1.72 dev_queue_xmit_nit() */ 884 - if (skb->nh.raw < skb->data || skb->nh.raw >= skb->tail) { 883 + if (p < skb->data || p >= skb->tail) { 885 884 /* fall back to old isdn_net_log_packet method() */ 886 885 char * buf = skb->data; 887 886
+1 -1
drivers/net/bonding/bond_alb.c
··· 106 106 107 107 static inline struct arp_pkt *arp_pkt(const struct sk_buff *skb) 108 108 { 109 - return (struct arp_pkt *)skb->nh.raw; 109 + return (struct arp_pkt *)skb_network_header(skb); 110 110 } 111 111 112 112 /* Forward declaration */
+4 -3
drivers/net/loopback.c
··· 76 76 static void emulate_large_send_offload(struct sk_buff *skb) 77 77 { 78 78 struct iphdr *iph = skb->nh.iph; 79 - struct tcphdr *th = (struct tcphdr*)(skb->nh.raw + (iph->ihl * 4)); 79 + struct tcphdr *th = (struct tcphdr *)(skb_network_header(skb) + 80 + (iph->ihl * 4)); 80 81 unsigned int doffset = (iph->ihl + th->doff) * 4; 81 82 unsigned int mtu = skb_shinfo(skb)->gso_size + doffset; 82 83 unsigned int offset = 0; ··· 94 93 skb_set_mac_header(nskb, -ETH_HLEN); 95 94 skb_reset_network_header(nskb); 96 95 iph = nskb->nh.iph; 97 - memcpy(nskb->data, skb->nh.raw, doffset); 96 + memcpy(nskb->data, skb_network_header(skb), doffset); 98 97 if (skb_copy_bits(skb, 99 98 doffset + offset, 100 99 nskb->data + doffset, ··· 109 108 memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); 110 109 nskb->pkt_type = skb->pkt_type; 111 110 112 - th = (struct tcphdr*)(nskb->nh.raw + iph->ihl*4); 111 + th = (struct tcphdr *)(skb_network_header(nskb) + iph->ihl * 4); 113 112 iph->tot_len = htons(frag_size + doffset); 114 113 iph->id = htons(id); 115 114 iph->check = 0;
+4 -2
drivers/net/pasemi_mac.c
··· 729 729 dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_SS | XCT_MACTX_CRC_PAD; 730 730 731 731 if (skb->ip_summed == CHECKSUM_PARTIAL) { 732 + const unsigned char *nh = skb_network_header(skb); 733 + 732 734 switch (skb->nh.iph->protocol) { 733 735 case IPPROTO_TCP: 734 736 dflags |= XCT_MACTX_CSUM_TCP; 735 737 dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2); 736 - dflags |= XCT_MACTX_IPO(skb->nh.raw - skb->data); 738 + dflags |= XCT_MACTX_IPO(nh - skb->data); 737 739 break; 738 740 case IPPROTO_UDP: 739 741 dflags |= XCT_MACTX_CSUM_UDP; 740 742 dflags |= XCT_MACTX_IPH((skb->h.raw - skb->nh.raw) >> 2); 741 - dflags |= XCT_MACTX_IPO(skb->nh.raw - skb->data); 743 + dflags |= XCT_MACTX_IPO(nh - skb->data); 742 744 break; 743 745 } 744 746 }
+4 -2
drivers/s390/net/qeth_main.c
··· 3778 3778 } 3779 3779 /* try something else */ 3780 3780 if (skb->protocol == ETH_P_IPV6) 3781 - return (skb->nh.raw[24] == 0xff) ? RTN_MULTICAST : 0; 3781 + return (skb_network_header(skb)[24] == 0xff) ? 3782 + RTN_MULTICAST : 0; 3782 3783 else if (skb->protocol == ETH_P_IP) 3783 - return ((skb->nh.raw[16] & 0xf0) == 0xe0) ? RTN_MULTICAST : 0; 3784 + return ((skb_network_header(skb)[16] & 0xf0) == 0xe0) ? 3785 + RTN_MULTICAST : 0; 3784 3786 /* ... */ 3785 3787 if (!memcmp(skb->data, skb->dev->broadcast, 6)) 3786 3788 return RTN_BROADCAST;
+1 -1
include/linux/if_pppox.h
··· 116 116 117 117 static inline struct pppoe_hdr *pppoe_hdr(const struct sk_buff *skb) 118 118 { 119 - return (struct pppoe_hdr *)skb->nh.raw; 119 + return (struct pppoe_hdr *)skb_network_header(skb); 120 120 } 121 121 122 122 struct pppoe_opt {
+5
include/linux/skbuff.h
··· 960 960 skb->tail += len; 961 961 } 962 962 963 + static inline unsigned char *skb_network_header(const struct sk_buff *skb) 964 + { 965 + return skb->nh.raw; 966 + } 967 + 963 968 static inline void skb_reset_network_header(struct sk_buff *skb) 964 969 { 965 970 skb->nh.raw = skb->data;
+1 -1
include/net/cipso_ipv4.h
··· 120 120 */ 121 121 122 122 #define CIPSO_V4_OPTEXIST(x) (IPCB(x)->opt.cipso != 0) 123 - #define CIPSO_V4_OPTPTR(x) ((x)->nh.raw + IPCB(x)->opt.cipso) 123 + #define CIPSO_V4_OPTPTR(x) (skb_network_header(x) + IPCB(x)->opt.cipso) 124 124 125 125 /* 126 126 * DOI List Functions
+4 -2
include/net/inet_ecn.h
··· 114 114 { 115 115 switch (skb->protocol) { 116 116 case __constant_htons(ETH_P_IP): 117 - if (skb->nh.raw + sizeof(struct iphdr) <= skb->tail) 117 + if (skb_network_header(skb) + sizeof(struct iphdr) <= 118 + skb->tail) 118 119 return IP_ECN_set_ce(skb->nh.iph); 119 120 break; 120 121 121 122 case __constant_htons(ETH_P_IPV6): 122 - if (skb->nh.raw + sizeof(struct ipv6hdr) <= skb->tail) 123 + if (skb_network_header(skb) + sizeof(struct ipv6hdr) <= 124 + skb->tail) 123 125 return IP6_ECN_set_ce(skb->nh.ipv6h); 124 126 break; 125 127 }
+2 -2
include/net/llc_pdu.h
··· 203 203 204 204 static inline struct llc_pdu_sn *llc_pdu_sn_hdr(struct sk_buff *skb) 205 205 { 206 - return (struct llc_pdu_sn *)skb->nh.raw; 206 + return (struct llc_pdu_sn *)skb_network_header(skb); 207 207 } 208 208 209 209 /* Un-numbered PDU format (3 bytes in length) */ ··· 215 215 216 216 static inline struct llc_pdu_un *llc_pdu_un_hdr(struct sk_buff *skb) 217 217 { 218 - return (struct llc_pdu_un *)skb->nh.raw; 218 + return (struct llc_pdu_un *)skb_network_header(skb); 219 219 } 220 220 221 221 /**
+1 -1
include/net/pkt_cls.h
··· 326 326 case TCF_LAYER_LINK: 327 327 return skb->data; 328 328 case TCF_LAYER_NETWORK: 329 - return skb->nh.raw; 329 + return skb_network_header(skb); 330 330 case TCF_LAYER_TRANSPORT: 331 331 return skb->h.raw; 332 332 }
+7 -5
net/bridge/br_netfilter.c
··· 374 374 { 375 375 unsigned char *raw = (u8 *) (skb->nh.ipv6h + 1); 376 376 u32 pkt_len; 377 - int off = raw - skb->nh.raw; 377 + const unsigned char *nh = skb_network_header(skb); 378 + int off = raw - nh; 378 379 int len = (raw[1] + 1) << 3; 379 380 380 381 if ((raw + len) - skb->data > skb_headlen(skb)) ··· 385 384 len -= 2; 386 385 387 386 while (len > 0) { 388 - int optlen = skb->nh.raw[off + 1] + 2; 387 + int optlen = nh[off + 1] + 2; 389 388 390 - switch (skb->nh.raw[off]) { 389 + switch (nh[off]) { 391 390 case IPV6_TLV_PAD0: 392 391 optlen = 1; 393 392 break; ··· 396 395 break; 397 396 398 397 case IPV6_TLV_JUMBO: 399 - if (skb->nh.raw[off + 1] != 4 || (off & 3) != 2) 398 + if (nh[off + 1] != 4 || (off & 3) != 2) 400 399 goto bad; 401 - pkt_len = ntohl(*(__be32 *) (skb->nh.raw + off + 2)); 400 + pkt_len = ntohl(*(__be32 *) (nh + off + 2)); 402 401 if (pkt_len <= IPV6_MAXPLEN || 403 402 skb->nh.ipv6h->payload_len) 404 403 goto bad; ··· 407 406 if (pskb_trim_rcsum(skb, 408 407 pkt_len + sizeof(struct ipv6hdr))) 409 408 goto bad; 409 + nh = skb_network_header(skb); 410 410 break; 411 411 default: 412 412 if (optlen > len)
+5 -4
net/core/dev.c
··· 1068 1068 */ 1069 1069 skb_reset_mac_header(skb2); 1070 1070 1071 - if (skb2->nh.raw < skb2->data || 1072 - skb2->nh.raw > skb2->tail) { 1071 + if (skb_network_header(skb2) < skb2->data || 1072 + skb_network_header(skb2) > skb2->tail) { 1073 1073 if (net_ratelimit()) 1074 1074 printk(KERN_CRIT "protocol %04x is " 1075 1075 "buggy, dev %s\n", ··· 1207 1207 BUG_ON(skb_shinfo(skb)->frag_list); 1208 1208 1209 1209 skb_reset_mac_header(skb); 1210 - skb->mac_len = skb->nh.raw - skb->data; 1210 + skb->mac_len = skb->nh.raw - skb->mac.raw; 1211 1211 __skb_pull(skb, skb->mac_len); 1212 1212 1213 1213 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { ··· 1224 1224 segs = ERR_PTR(err); 1225 1225 if (err || skb_gso_ok(skb, features)) 1226 1226 break; 1227 - __skb_push(skb, skb->data - skb->nh.raw); 1227 + __skb_push(skb, (skb->data - 1228 + skb_network_header(skb))); 1228 1229 } 1229 1230 segs = ptype->gso_segment(skb, features); 1230 1231 break;
+1 -1
net/core/filter.c
··· 42 42 u8 *ptr = NULL; 43 43 44 44 if (k >= SKF_NET_OFF) 45 - ptr = skb->nh.raw + k - SKF_NET_OFF; 45 + ptr = skb_network_header(skb) + k - SKF_NET_OFF; 46 46 else if (k >= SKF_LL_OFF) 47 47 ptr = skb_mac_header(skb) + k - SKF_LL_OFF; 48 48
+4 -4
net/dccp/ipv6.c
··· 261 261 262 262 if (rxopt->srcrt) 263 263 opt = ipv6_invert_rthdr(sk, 264 - (struct ipv6_rt_hdr *)(pktopts->nh.raw + 265 - rxopt->srcrt)); 264 + (struct ipv6_rt_hdr *)(skb_network_header(pktopts) + 265 + rxopt->srcrt)); 266 266 } 267 267 268 268 if (opt != NULL && opt->srcrt != NULL) { ··· 573 573 574 574 if (rxopt->srcrt) 575 575 opt = ipv6_invert_rthdr(sk, 576 - (struct ipv6_rt_hdr *)(ireq6->pktopts->nh.raw + 577 - rxopt->srcrt)); 576 + (struct ipv6_rt_hdr *)(skb_network_header(ireq6->pktopts) + 577 + rxopt->srcrt)); 578 578 } 579 579 580 580 if (dst == NULL) {
+2 -2
net/decnet/dn_route.c
··· 386 386 __le16 tmp; 387 387 388 388 /* Add back headers */ 389 - skb_push(skb, skb->data - skb->nh.raw); 389 + skb_push(skb, skb->data - skb_network_header(skb)); 390 390 391 391 if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL) 392 392 return NET_RX_DROP; ··· 425 425 unsigned char tmp[ETH_ALEN]; 426 426 427 427 /* Add back all headers */ 428 - skb_push(skb, skb->data - skb->nh.raw); 428 + skb_push(skb, skb->data - skb_network_header(skb)); 429 429 430 430 if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL) 431 431 return NET_RX_DROP;
+1 -1
net/ipv4/af_inet.c
··· 1184 1184 iph->id = htons(id++); 1185 1185 iph->tot_len = htons(skb->len - skb->mac_len); 1186 1186 iph->check = 0; 1187 - iph->check = ip_fast_csum(skb->nh.raw, iph->ihl); 1187 + iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl); 1188 1188 } while ((skb = skb->next)); 1189 1189 1190 1190 out:
+3 -2
net/ipv4/ah4.c
··· 154 154 ah = (struct ip_auth_hdr*)skb->data; 155 155 iph = skb->nh.iph; 156 156 157 - ihl = skb->data - skb->nh.raw; 157 + ihl = skb->data - skb_network_header(skb); 158 158 memcpy(work_buf, iph, ihl); 159 159 160 160 iph->ttl = 0; ··· 181 181 } 182 182 } 183 183 ((struct iphdr*)work_buf)->protocol = ah->nexthdr; 184 - skb->h.raw = memcpy(skb->nh.raw += ah_hlen, work_buf, ihl); 184 + skb->nh.raw += ah_hlen; 185 + skb->h.raw = memcpy(skb_network_header(skb), work_buf, ihl); 185 186 __skb_pull(skb, ah_hlen + ihl); 186 187 187 188 return 0;
+4 -3
net/ipv4/esp4.c
··· 57 57 *(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2; 58 58 pskb_put(skb, trailer, clen - skb->len); 59 59 60 - __skb_push(skb, skb->data - skb->nh.raw); 60 + __skb_push(skb, skb->data - skb_network_header(skb)); 61 61 top_iph = skb->nh.iph; 62 - esph = (struct ip_esp_hdr *)(skb->nh.raw + top_iph->ihl*4); 62 + esph = (struct ip_esp_hdr *)(skb_network_header(skb) + 63 + top_iph->ihl * 4); 63 64 top_iph->tot_len = htons(skb->len + alen); 64 65 *(u8*)(trailer->tail - 1) = top_iph->protocol; 65 66 ··· 223 222 224 223 if (x->encap) { 225 224 struct xfrm_encap_tmpl *encap = x->encap; 226 - struct udphdr *uh = (void *)(skb->nh.raw + ihl); 225 + struct udphdr *uh = (void *)(skb_network_header(skb) + ihl); 227 226 228 227 /* 229 228 * 1) if the NAT-T peer's IP or port changed then
+2 -2
net/ipv4/icmp.c
··· 484 484 u8 _inner_type, *itp; 485 485 486 486 itp = skb_header_pointer(skb_in, 487 - skb_in->nh.raw + 487 + skb_network_header(skb_in) + 488 488 (iph->ihl << 2) + 489 489 offsetof(struct icmphdr, 490 490 type) - ··· 536 536 icmp_param.data.icmph.un.gateway = info; 537 537 icmp_param.data.icmph.checksum = 0; 538 538 icmp_param.skb = skb_in; 539 - icmp_param.offset = skb_in->nh.raw - skb_in->data; 539 + icmp_param.offset = skb_network_offset(skb_in); 540 540 icmp_out_count(icmp_param.data.icmph.type); 541 541 inet_sk(icmp_socket->sk)->tos = tos; 542 542 ipc.addr = iph->saddr;
+1 -1
net/ipv4/ip_fragment.c
··· 658 658 } 659 659 660 660 skb_shinfo(head)->frag_list = head->next; 661 - skb_push(head, head->data - head->nh.raw); 661 + skb_push(head, head->data - skb_network_header(head)); 662 662 atomic_sub(head->truesize, &ip_frag_mem); 663 663 664 664 for (fp=head->next; fp; fp = fp->next) {
+6 -6
net/ipv4/ip_options.c
··· 40 40 void ip_options_build(struct sk_buff * skb, struct ip_options * opt, 41 41 __be32 daddr, struct rtable *rt, int is_frag) 42 42 { 43 - unsigned char * iph = skb->nh.raw; 43 + unsigned char *iph = skb_network_header(skb); 44 44 45 45 memcpy(&(IPCB(skb)->opt), opt, sizeof(struct ip_options)); 46 46 memcpy(iph+sizeof(struct iphdr), opt->__data, opt->optlen); ··· 104 104 return 0; 105 105 } 106 106 107 - sptr = skb->nh.raw; 107 + sptr = skb_network_header(skb); 108 108 dptr = dopt->__data; 109 109 110 110 if (skb->dst) ··· 217 217 218 218 void ip_options_fragment(struct sk_buff * skb) 219 219 { 220 - unsigned char * optptr = skb->nh.raw + sizeof(struct iphdr); 220 + unsigned char *optptr = skb_network_header(skb) + sizeof(struct iphdr); 221 221 struct ip_options * opt = &(IPCB(skb)->opt); 222 222 int l = opt->optlen; 223 223 int optlen; ··· 264 264 265 265 if (!opt) { 266 266 opt = &(IPCB(skb)->opt); 267 - iph = skb->nh.raw; 267 + iph = skb_network_header(skb); 268 268 opt->optlen = ((struct iphdr *)iph)->ihl*4 - sizeof(struct iphdr); 269 269 optptr = iph + sizeof(struct iphdr); 270 270 opt->is_data = 0; ··· 563 563 struct ip_options * opt = &(IPCB(skb)->opt); 564 564 unsigned char * optptr; 565 565 struct rtable *rt = (struct rtable*)skb->dst; 566 - unsigned char *raw = skb->nh.raw; 566 + unsigned char *raw = skb_network_header(skb); 567 567 568 568 if (opt->rr_needaddr) { 569 569 optptr = (unsigned char *)raw + opt->rr; ··· 609 609 int srrspace, srrptr; 610 610 __be32 nexthop; 611 611 struct iphdr *iph = skb->nh.iph; 612 - unsigned char * optptr = skb->nh.raw + opt->srr; 612 + unsigned char *optptr = skb_network_header(skb) + opt->srr; 613 613 struct rtable *rt = (struct rtable*)skb->dst; 614 614 struct rtable *rt2; 615 615 int err;
+3 -3
net/ipv4/ip_output.c
··· 503 503 frag->h.raw = frag->data; 504 504 __skb_push(frag, hlen); 505 505 skb_reset_network_header(frag); 506 - memcpy(frag->nh.raw, iph, hlen); 506 + memcpy(skb_network_header(frag), iph, hlen); 507 507 iph = frag->nh.iph; 508 508 iph->tot_len = htons(frag->len); 509 509 ip_copy_metadata(frag, skb); ··· 607 607 * Copy the packet header into the new buffer. 608 608 */ 609 609 610 - memcpy(skb2->nh.raw, skb->data, hlen); 610 + memcpy(skb_network_header(skb2), skb->data, hlen); 611 611 612 612 /* 613 613 * Copy a block of the IP datagram. ··· 1198 1198 tail_skb = &(skb_shinfo(skb)->frag_list); 1199 1199 1200 1200 /* move skb->data to ip header from ext header */ 1201 - if (skb->data < skb->nh.raw) 1201 + if (skb->data < skb_network_header(skb)) 1202 1202 __skb_pull(skb, skb_network_offset(skb)); 1203 1203 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { 1204 1204 __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
+5 -3
net/ipv4/ip_sockglue.c
··· 273 273 serr->ee.ee_pad = 0; 274 274 serr->ee.ee_info = info; 275 275 serr->ee.ee_data = 0; 276 - serr->addr_offset = (u8*)&(((struct iphdr*)(skb->h.icmph+1))->daddr) - skb->nh.raw; 276 + serr->addr_offset = (u8 *)&(((struct iphdr *)(skb->h.icmph + 1))->daddr) - 277 + skb_network_header(skb); 277 278 serr->port = port; 278 279 279 280 skb->h.raw = payload; ··· 310 309 serr->ee.ee_pad = 0; 311 310 serr->ee.ee_info = info; 312 311 serr->ee.ee_data = 0; 313 - serr->addr_offset = (u8*)&iph->daddr - skb->nh.raw; 312 + serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb); 314 313 serr->port = port; 315 314 316 315 skb->h.raw = skb->tail; ··· 356 355 sin = (struct sockaddr_in *)msg->msg_name; 357 356 if (sin) { 358 357 sin->sin_family = AF_INET; 359 - sin->sin_addr.s_addr = *(__be32*)(skb->nh.raw + serr->addr_offset); 358 + sin->sin_addr.s_addr = *(__be32 *)(skb_network_header(skb) + 359 + serr->addr_offset); 360 360 sin->sin_port = serr->port; 361 361 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); 362 362 }
+1 -1
net/ipv4/ipmr.c
··· 563 563 */ 564 564 msg = (struct igmpmsg*)skb_push(skb, sizeof(struct iphdr)); 565 565 skb->nh.raw = skb->h.raw = (u8*)msg; 566 - memcpy(msg, pkt->nh.raw, sizeof(struct iphdr)); 566 + memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); 567 567 msg->im_msgtype = IGMPMSG_WHOLEPKT; 568 568 msg->im_mbz = 0; 569 569 msg->im_vif = reg_vif_num;
+2 -2
net/ipv4/ipvs/ip_vs_app.c
··· 338 338 if (!ip_vs_make_skb_writable(pskb, tcp_offset + sizeof(*th))) 339 339 return 0; 340 340 341 - th = (struct tcphdr *)((*pskb)->nh.raw + tcp_offset); 341 + th = (struct tcphdr *)(skb_network_header(*pskb) + tcp_offset); 342 342 343 343 /* 344 344 * Remember seq number in case this pkt gets resized ··· 413 413 if (!ip_vs_make_skb_writable(pskb, tcp_offset + sizeof(*th))) 414 414 return 0; 415 415 416 - th = (struct tcphdr *)((*pskb)->nh.raw + tcp_offset); 416 + th = (struct tcphdr *)(skb_network_header(*pskb) + tcp_offset); 417 417 418 418 /* 419 419 * Remember seq number in case this pkt gets resized
+2 -1
net/ipv4/ipvs/ip_vs_core.c
··· 559 559 { 560 560 struct iphdr *iph = skb->nh.iph; 561 561 unsigned int icmp_offset = iph->ihl*4; 562 - struct icmphdr *icmph = (struct icmphdr *)(skb->nh.raw + icmp_offset); 562 + struct icmphdr *icmph = (struct icmphdr *)(skb_network_header(skb) + 563 + icmp_offset); 563 564 struct iphdr *ciph = (struct iphdr *)(icmph + 1); 564 565 565 566 if (inout) {
+1 -1
net/ipv4/netfilter/arpt_mangle.c
··· 31 31 } 32 32 33 33 arp = (*pskb)->nh.arph; 34 - arpptr = (*pskb)->nh.raw + sizeof(*arp); 34 + arpptr = skb_network_header(*pskb) + sizeof(*arp); 35 35 pln = arp->ar_pln; 36 36 hln = arp->ar_hln; 37 37 /* We assume that pln and hln were checked in the match */
+4 -5
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
··· 105 105 return -NF_DROP; 106 106 } 107 107 108 - *dataoff = (*pskb)->nh.raw - (*pskb)->data + (*pskb)->nh.iph->ihl*4; 108 + *dataoff = skb_network_offset(*pskb) + (*pskb)->nh.iph->ihl * 4; 109 109 *protonum = (*pskb)->nh.iph->protocol; 110 110 111 111 return NF_ACCEPT; ··· 151 151 if (!help || !help->helper) 152 152 return NF_ACCEPT; 153 153 154 - return help->helper->help(pskb, 155 - (*pskb)->nh.raw - (*pskb)->data 156 - + (*pskb)->nh.iph->ihl*4, 157 - ct, ctinfo); 154 + return help->helper->help(pskb, (skb_network_offset(*pskb) + 155 + (*pskb)->nh.iph->ihl * 4), 156 + ct, ctinfo); 158 157 } 159 158 160 159 static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
+1 -1
net/ipv4/raw.c
··· 256 256 } 257 257 nf_reset(skb); 258 258 259 - skb_push(skb, skb->data - skb->nh.raw); 259 + skb_push(skb, skb->data - skb_network_header(skb)); 260 260 261 261 raw_rcv_skb(sk, skb); 262 262 return 0;
+1 -1
net/ipv4/tcp_input.c
··· 3634 3634 return; 3635 3635 3636 3636 skb_set_mac_header(nskb, skb_mac_header(skb) - skb->head); 3637 - nskb->nh.raw = nskb->data + (skb->nh.raw - skb->head); 3637 + nskb->nh.raw = nskb->data + (skb_network_header(skb) - skb->head); 3638 3638 nskb->h.raw = nskb->data + (skb->h.raw - skb->head); 3639 3639 3640 3640 skb_reserve(nskb, header);
+1 -1
net/ipv4/xfrm4_input.c
··· 146 146 return 0; 147 147 } else { 148 148 #ifdef CONFIG_NETFILTER 149 - __skb_push(skb, skb->data - skb->nh.raw); 149 + __skb_push(skb, skb->data - skb_network_header(skb)); 150 150 skb->nh.iph->tot_len = htons(skb->len); 151 151 ip_send_check(skb->nh.iph); 152 152
+2 -2
net/ipv4/xfrm4_mode_beet.c
··· 98 98 } 99 99 100 100 skb->nh.raw = skb->data + (phlen - sizeof(*iph)); 101 - memmove(skb->nh.raw, iph, sizeof(*iph)); 101 + memmove(skb_network_header(skb), iph, sizeof(*iph)); 102 102 skb->h.raw = skb->data + (phlen + optlen); 103 103 skb->data = skb->h.raw; 104 104 ··· 112 112 else 113 113 iph->protocol = protocol; 114 114 iph->check = 0; 115 - iph->check = ip_fast_csum(skb->nh.raw, iph->ihl); 115 + iph->check = ip_fast_csum(skb_network_header(skb), iph->ihl); 116 116 err = 0; 117 117 out: 118 118 return err;
+2 -2
net/ipv4/xfrm4_mode_transport.c
··· 34 34 35 35 skb_push(skb, x->props.header_len); 36 36 skb_reset_network_header(skb); 37 - memmove(skb->nh.raw, iph, ihl); 37 + memmove(skb_network_header(skb), iph, ihl); 38 38 return 0; 39 39 } 40 40 ··· 51 51 int ihl = skb->data - skb->h.raw; 52 52 53 53 if (skb->h.raw != skb->nh.raw) { 54 - memmove(skb->h.raw, skb->nh.raw, ihl); 54 + memmove(skb->h.raw, skb_network_header(skb), ihl); 55 55 skb->nh.raw = skb->h.raw; 56 56 } 57 57 skb->nh.iph->tot_len = htons(skb->len + ihl);
+1 -1
net/ipv4/xfrm4_policy.c
··· 210 210 _decode_session4(struct sk_buff *skb, struct flowi *fl) 211 211 { 212 212 struct iphdr *iph = skb->nh.iph; 213 - u8 *xprth = skb->nh.raw + iph->ihl*4; 213 + u8 *xprth = skb_network_header(skb) + iph->ihl * 4; 214 214 215 215 memset(fl, 0, sizeof(struct flowi)); 216 216 if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) {
+2 -1
net/ipv6/af_inet6.c
··· 693 693 if (np->rxopt.all) { 694 694 if ((opt->hop && (np->rxopt.bits.hopopts || 695 695 np->rxopt.bits.ohopopts)) || 696 - ((IPV6_FLOWINFO_MASK & *(__be32*)skb->nh.raw) && 696 + ((IPV6_FLOWINFO_MASK & 697 + *(__be32 *)skb_network_header(skb)) && 697 698 np->rxopt.bits.rxflow) || 698 699 (opt->srcrt && (np->rxopt.bits.srcrt || 699 700 np->rxopt.bits.osrcrt)) ||
+7 -5
net/ipv6/ah6.c
··· 238 238 top_iph = (struct ipv6hdr *)skb->data; 239 239 top_iph->payload_len = htons(skb->len - sizeof(*top_iph)); 240 240 241 - nexthdr = *skb->nh.raw; 242 - *skb->nh.raw = IPPROTO_AH; 241 + nexthdr = *skb_network_header(skb); 242 + *skb_network_header(skb) = IPPROTO_AH; 243 243 244 244 /* When there are no extension headers, we only need to save the first 245 245 * 8 bytes of the base IP header. ··· 341 341 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) 342 342 goto out; 343 343 344 - hdr_len = skb->data - skb->nh.raw; 344 + hdr_len = skb->data - skb_network_header(skb); 345 345 ah = (struct ipv6_auth_hdr*)skb->data; 346 346 ahp = x->data; 347 347 nexthdr = ah->nexthdr; ··· 354 354 if (!pskb_may_pull(skb, ah_hlen)) 355 355 goto out; 356 356 357 - tmp_hdr = kmemdup(skb->nh.raw, hdr_len, GFP_ATOMIC); 357 + tmp_hdr = kmemdup(skb_network_header(skb), hdr_len, GFP_ATOMIC); 358 358 if (!tmp_hdr) 359 359 goto out; 360 360 if (ipv6_clear_mutable_options(skb->nh.ipv6h, hdr_len, XFRM_POLICY_IN)) ··· 382 382 } 383 383 } 384 384 385 - skb->h.raw = memcpy(skb->nh.raw += ah_hlen, tmp_hdr, hdr_len); 385 + skb->nh.raw += ah_hlen; 386 + memcpy(skb_network_header(skb), tmp_hdr, hdr_len); 387 + skb->h.raw = skb->nh.raw; 386 388 __skb_pull(skb, ah_hlen + hdr_len); 387 389 388 390 kfree(tmp_hdr);
+18 -13
net/ipv6/datagram.c
··· 227 227 serr->ee.ee_pad = 0; 228 228 serr->ee.ee_info = info; 229 229 serr->ee.ee_data = 0; 230 - serr->addr_offset = (u8*)&(((struct ipv6hdr*)(icmph+1))->daddr) - skb->nh.raw; 230 + serr->addr_offset = (u8 *)&(((struct ipv6hdr *)(icmph + 1))->daddr) - 231 + skb_network_header(skb); 231 232 serr->port = port; 232 233 233 234 skb->h.raw = payload; ··· 265 264 serr->ee.ee_pad = 0; 266 265 serr->ee.ee_info = info; 267 266 serr->ee.ee_data = 0; 268 - serr->addr_offset = (u8*)&iph->daddr - skb->nh.raw; 267 + serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb); 269 268 serr->port = fl->fl_ip_dport; 270 269 271 270 skb->h.raw = skb->tail; ··· 311 310 312 311 sin = (struct sockaddr_in6 *)msg->msg_name; 313 312 if (sin) { 313 + const unsigned char *nh = skb_network_header(skb); 314 314 sin->sin6_family = AF_INET6; 315 315 sin->sin6_flowinfo = 0; 316 316 sin->sin6_port = serr->port; 317 317 sin->sin6_scope_id = 0; 318 318 if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP6) { 319 319 ipv6_addr_copy(&sin->sin6_addr, 320 - (struct in6_addr *)(skb->nh.raw + serr->addr_offset)); 320 + (struct in6_addr *)(nh + serr->addr_offset)); 321 321 if (np->sndflow) 322 - sin->sin6_flowinfo = *(__be32*)(skb->nh.raw + serr->addr_offset - 24) & IPV6_FLOWINFO_MASK; 322 + sin->sin6_flowinfo = 323 + (*(__be32 *)(nh + serr->addr_offset - 24) & 324 + IPV6_FLOWINFO_MASK); 323 325 if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) 324 326 sin->sin6_scope_id = IP6CB(skb)->iif; 325 327 } else { 326 328 ipv6_addr_set(&sin->sin6_addr, 0, 0, 327 329 htonl(0xffff), 328 - *(__be32*)(skb->nh.raw + serr->addr_offset)); 330 + *(__be32 *)(nh + serr->addr_offset)); 329 331 } 330 332 } 331 333 ··· 386 382 { 387 383 struct ipv6_pinfo *np = inet6_sk(sk); 388 384 struct inet6_skb_parm *opt = IP6CB(skb); 385 + unsigned char *nh = skb_network_header(skb); 389 386 390 387 if (np->rxopt.bits.rxinfo) { 391 388 struct in6_pktinfo src_info; ··· 406 401 put_cmsg(msg, SOL_IPV6, IPV6_TCLASS, sizeof(tclass), &tclass); 407 402 } 408 403 409 - if (np->rxopt.bits.rxflow && (*(__be32*)skb->nh.raw & IPV6_FLOWINFO_MASK)) { 410 - __be32 flowinfo = *(__be32*)skb->nh.raw & IPV6_FLOWINFO_MASK; 404 + if (np->rxopt.bits.rxflow && (*(__be32 *)nh & IPV6_FLOWINFO_MASK)) { 405 + __be32 flowinfo = *(__be32 *)nh & IPV6_FLOWINFO_MASK; 411 406 put_cmsg(msg, SOL_IPV6, IPV6_FLOWINFO, sizeof(flowinfo), &flowinfo); 412 407 } 413 408 414 409 /* HbH is allowed only once */ 415 410 if (np->rxopt.bits.hopopts && opt->hop) { 416 - u8 *ptr = skb->nh.raw + opt->hop; 411 + u8 *ptr = nh + opt->hop; 417 412 put_cmsg(msg, SOL_IPV6, IPV6_HOPOPTS, (ptr[1]+1)<<3, ptr); 418 413 } 419 414 ··· 433 428 434 429 while (off <= opt->lastopt) { 435 430 unsigned len; 436 - u8 *ptr = skb->nh.raw + off; 431 + u8 *ptr = nh + off; 437 432 438 433 switch(nexthdr) { 439 434 case IPPROTO_DSTOPTS: ··· 475 470 put_cmsg(msg, SOL_IPV6, IPV6_2292HOPLIMIT, sizeof(hlim), &hlim); 476 471 } 477 472 if (np->rxopt.bits.ohopopts && opt->hop) { 478 - u8 *ptr = skb->nh.raw + opt->hop; 473 + u8 *ptr = nh + opt->hop; 479 474 put_cmsg(msg, SOL_IPV6, IPV6_2292HOPOPTS, (ptr[1]+1)<<3, ptr); 480 475 } 481 476 if (np->rxopt.bits.odstopts && opt->dst0) { 482 - u8 *ptr = skb->nh.raw + opt->dst0; 477 + u8 *ptr = nh + opt->dst0; 483 478 put_cmsg(msg, SOL_IPV6, IPV6_2292DSTOPTS, (ptr[1]+1)<<3, ptr); 484 479 } 485 480 if (np->rxopt.bits.osrcrt && opt->srcrt) { 486 - struct ipv6_rt_hdr *rthdr = (struct ipv6_rt_hdr *)(skb->nh.raw + opt->srcrt); 481 + struct ipv6_rt_hdr *rthdr = (struct ipv6_rt_hdr *)(nh + opt->srcrt); 487 482 put_cmsg(msg, SOL_IPV6, IPV6_2292RTHDR, (rthdr->hdrlen+1) << 3, rthdr); 488 483 } 489 484 if (np->rxopt.bits.odstopts && opt->dst1) { 490 - u8 *ptr = skb->nh.raw + opt->dst1; 485 + u8 *ptr = nh + opt->dst1; 491 486 put_cmsg(msg, SOL_IPV6, IPV6_2292DSTOPTS, (ptr[1]+1)<<3, ptr); 492 487 } 493 488 return 0;
+2 -2
net/ipv6/esp6.c
··· 92 92 top_iph = (struct ipv6hdr *)__skb_push(skb, hdr_len); 93 93 esph = (struct ipv6_esp_hdr *)skb->h.raw; 94 94 top_iph->payload_len = htons(skb->len + alen - sizeof(*top_iph)); 95 - *(u8*)(trailer->tail - 1) = *skb->nh.raw; 96 - *skb->nh.raw = IPPROTO_ESP; 95 + *(u8 *)(trailer->tail - 1) = *skb_network_header(skb); 96 + *skb_network_header(skb) = IPPROTO_ESP; 97 97 98 98 esph->spi = x->id.spi; 99 99 esph->seq_no = htonl(++x->replay.oseq);
+33 -23
net/ipv6/exthdrs.c
··· 50 50 51 51 int ipv6_find_tlv(struct sk_buff *skb, int offset, int type) 52 52 { 53 - int packet_len = skb->tail - skb->nh.raw; 53 + const unsigned char *nh = skb_network_header(skb); 54 + int packet_len = skb->tail - nh; 54 55 struct ipv6_opt_hdr *hdr; 55 56 int len; 56 57 57 58 if (offset + 2 > packet_len) 58 59 goto bad; 59 - hdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset); 60 + hdr = (struct ipv6_opt_hdr *)(nh + offset); 60 61 len = ((hdr->hdrlen + 1) << 3); 61 62 62 63 if (offset + len > packet_len) ··· 67 66 len -= 2; 68 67 69 68 while (len > 0) { 70 - int opttype = skb->nh.raw[offset]; 69 + int opttype = nh[offset]; 71 70 int optlen; 72 71 73 72 if (opttype == type) ··· 78 77 optlen = 1; 79 78 break; 80 79 default: 81 - optlen = skb->nh.raw[offset + 1] + 2; 80 + optlen = nh[offset + 1] + 2; 82 81 if (optlen > len) 83 82 goto bad; 84 83 break; ··· 114 113 { 115 114 struct sk_buff *skb = *skbp; 116 115 117 - switch ((skb->nh.raw[optoff] & 0xC0) >> 6) { 116 + switch ((skb_network_header(skb)[optoff] & 0xC0) >> 6) { 118 117 case 0: /* ignore */ 119 118 return 1; 120 119 ··· 142 141 { 143 142 struct sk_buff *skb = *skbp; 144 143 struct tlvtype_proc *curr; 144 + const unsigned char *nh = skb_network_header(skb); 145 145 int off = skb->h.raw - skb->nh.raw; 146 146 int len = ((skb->h.raw[1]+1)<<3); 147 147 ··· 153 151 len -= 2; 154 152 155 153 while (len > 0) { 156 - int optlen = skb->nh.raw[off+1]+2; 154 + int optlen = nh[off + 1] + 2; 157 155 158 - switch (skb->nh.raw[off]) { 156 + switch (nh[off]) { 159 157 case IPV6_TLV_PAD0: 160 158 optlen = 1; 161 159 break; ··· 167 165 if (optlen > len) 168 166 goto bad; 169 167 for (curr=procs; curr->type >= 0; curr++) { 170 - if (curr->type == skb->nh.raw[off]) { 168 + if (curr->type == nh[off]) { 171 169 /* type specific length/alignment 172 170 checks will be performed in the 173 171 func(). */ ··· 213 211 opt->dsthao = opt->dst1; 214 212 opt->dst1 = 0; 215 213 216 - hao = (struct ipv6_destopt_hao *)(skb->nh.raw + optoff); 214 + hao = (struct ipv6_destopt_hao *)(skb_network_header(skb) + optoff); 217 215 218 216 if (hao->length != 16) { 219 217 LIMIT_NETDEBUG( ··· 246 244 247 245 /* update all variable using below by copied skbuff */ 248 246 *skbp = skb = skb2; 249 - hao = (struct ipv6_destopt_hao *)(skb2->nh.raw + optoff); 250 - ipv6h = (struct ipv6hdr *)skb2->nh.raw; 247 + hao = (struct ipv6_destopt_hao *)(skb_network_header(skb2) + 248 + optoff); 249 + ipv6h = skb2->nh.ipv6h; 251 250 } 252 251 253 252 if (skb->ip_summed == CHECKSUM_COMPLETE) ··· 409 406 default: 410 407 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), 411 408 IPSTATS_MIB_INHDRERRORS); 412 - icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->type) - skb->nh.raw); 409 + icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 410 + (&hdr->type) - skb_network_header(skb)); 413 411 return -1; 414 412 } 415 413 ··· 447 443 skb->h.raw += (hdr->hdrlen + 1) << 3; 448 444 opt->dst0 = opt->dst1; 449 445 opt->dst1 = 0; 450 - opt->nhoff = (&hdr->nexthdr) - skb->nh.raw; 446 + opt->nhoff = (&hdr->nexthdr) - skb_network_header(skb); 451 447 return 1; 452 448 } 453 449 ··· 456 452 if (hdr->hdrlen & 0x01) { 457 453 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), 458 454 IPSTATS_MIB_INHDRERRORS); 459 - icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->hdrlen) - skb->nh.raw); 455 + icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 456 + ((&hdr->hdrlen) - 457 + skb_network_header(skb))); 460 458 return -1; 461 459 } 462 460 break; ··· 485 479 if (hdr->segments_left > n) { 486 480 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), 487 481 IPSTATS_MIB_INHDRERRORS); 488 - icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, (&hdr->segments_left) - skb->nh.raw); 482 + icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 483 + ((&hdr->segments_left) - 484 + skb_network_header(skb))); 489 485 return -1; 490 486 } 491 487 ··· 555 547 dst_release(xchg(&skb->dst, NULL)); 556 548 ip6_route_input(skb); 557 549 if (skb->dst->error) { 558 - skb_push(skb, skb->data - skb->nh.raw); 550 + skb_push(skb, skb->data - skb_network_header(skb)); 559 551 dst_input(skb); 560 552 return -1; 561 553 } ··· 573 565 goto looped_back; 574 566 } 575 567 576 - skb_push(skb, skb->data - skb->nh.raw); 568 + skb_push(skb, skb->data - skb_network_header(skb)); 577 569 dst_input(skb); 578 570 return -1; 579 571 } ··· 664 656 static int ipv6_hop_ra(struct sk_buff **skbp, int optoff) 665 657 { 666 658 struct sk_buff *skb = *skbp; 659 + const unsigned char *nh = skb_network_header(skb); 667 660 668 - if (skb->nh.raw[optoff+1] == 2) { 661 + if (nh[optoff + 1] == 2) { 669 662 IP6CB(skb)->ra = optoff; 670 663 return 1; 671 664 } 672 665 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_ra: wrong RA length %d\n", 673 - skb->nh.raw[optoff+1]); 666 + nh[optoff + 1]); 674 667 kfree_skb(skb); 675 668 return 0; 676 669 } ··· 681 672 static int ipv6_hop_jumbo(struct sk_buff **skbp, int optoff) 682 673 { 683 674 struct sk_buff *skb = *skbp; 675 + const unsigned char *nh = skb_network_header(skb); 684 676 u32 pkt_len; 685 677 686 - if (skb->nh.raw[optoff+1] != 4 || (optoff&3) != 2) { 678 + if (nh[optoff + 1] != 4 || (optoff & 3) != 2) { 687 679 LIMIT_NETDEBUG(KERN_DEBUG "ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", 688 - skb->nh.raw[optoff+1]); 680 + nh[optoff+1]); 689 681 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), 690 682 IPSTATS_MIB_INHDRERRORS); 691 683 goto drop; 692 684 } 693 685 694 - pkt_len = ntohl(*(__be32*)(skb->nh.raw+optoff+2)); 686 + pkt_len = ntohl(*(__be32 *)(nh + optoff + 2)); 695 687 if (pkt_len <= IPV6_MAXPLEN) { 696 688 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_INHDRERRORS); 697 689 icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2); ··· 737 727 struct inet6_skb_parm *opt = IP6CB(skb); 738 728 739 729 /* 740 - * skb->nh.raw is equal to skb->data, and 730 + * skb_network_header(skb) is equal to skb->data, and 741 731 * skb->h.raw - skb->nh.raw is always equal to 742 732 * sizeof(struct ipv6hdr) by definition of 743 733 * hop-by-hop options.
+2 -1
net/ipv6/icmp.c
··· 284 284 if (opt->dsthao) { 285 285 off = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO); 286 286 if (likely(off >= 0)) { 287 - hao = (struct ipv6_destopt_hao *)(skb->nh.raw + off); 287 + hao = (struct ipv6_destopt_hao *) 288 + (skb_network_header(skb) + off); 288 289 ipv6_addr_copy(&tmp, &iph->saddr); 289 290 ipv6_addr_copy(&iph->saddr, &hao->addr); 290 291 ipv6_addr_copy(&hao->addr, &tmp);
+2 -2
net/ipv6/ip6_input.c
··· 163 163 if (!pskb_pull(skb, skb->h.raw - skb->data)) 164 164 goto discard; 165 165 nhoff = IP6CB(skb)->nhoff; 166 - nexthdr = skb->nh.raw[nhoff]; 166 + nexthdr = skb_network_header(skb)[nhoff]; 167 167 168 168 raw_sk = sk_head(&raw_v6_htable[nexthdr & (MAX_INET_PROTOS - 1)]); 169 169 if (raw_sk && !ipv6_raw_deliver(skb, nexthdr)) ··· 181 181 indefinitely. */ 182 182 nf_reset(skb); 183 183 184 - skb_postpull_rcsum(skb, skb->nh.raw, 184 + skb_postpull_rcsum(skb, skb_network_header(skb), 185 185 skb->h.raw - skb->nh.raw); 186 186 hdr = skb->nh.ipv6h; 187 187 if (ipv6_addr_is_multicast(&hdr->daddr) &&
+13 -10
net/ipv6/ip6_output.c
··· 323 323 if (nexthdr == IPPROTO_ICMPV6) { 324 324 struct icmp6hdr *icmp6; 325 325 326 - if (!pskb_may_pull(skb, skb->nh.raw + offset + 1 - skb->data)) 326 + if (!pskb_may_pull(skb, (skb_network_header(skb) + 327 + offset + 1 - skb->data))) 327 328 return 0; 328 329 329 - icmp6 = (struct icmp6hdr *)(skb->nh.raw + offset); 330 + icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset); 330 331 331 332 switch (icmp6->icmp6_type) { 332 333 case NDISC_ROUTER_SOLICITATION: ··· 393 392 * that different fragments will go along one path. --ANK 394 393 */ 395 394 if (opt->ra) { 396 - u8 *ptr = skb->nh.raw + opt->ra; 395 + u8 *ptr = skb_network_header(skb) + opt->ra; 397 396 if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3])) 398 397 return 0; 399 398 } ··· 528 527 { 529 528 u16 offset = sizeof(struct ipv6hdr); 530 529 struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.ipv6h + 1); 531 - unsigned int packet_len = skb->tail - skb->nh.raw; 530 + unsigned int packet_len = skb->tail - skb_network_header(skb); 532 531 int found_rhdr = 0; 533 532 *nexthdr = &skb->nh.ipv6h->nexthdr; 534 533 ··· 555 554 556 555 offset += ipv6_optlen(exthdr); 557 556 *nexthdr = &exthdr->nexthdr; 558 - exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset); 557 + exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) + 558 + offset); 559 559 } 560 560 561 561 return offset; ··· 622 620 /* BUILD HEADER */ 623 621 624 622 *prevhdr = NEXTHDR_FRAGMENT; 625 - tmp_hdr = kmemdup(skb->nh.raw, hlen, GFP_ATOMIC); 623 + tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC); 626 624 if (!tmp_hdr) { 627 625 IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS); 628 626 return -ENOMEM; ··· 632 630 fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr)); 633 631 __skb_push(skb, hlen); 634 632 skb_reset_network_header(skb); 635 - memcpy(skb->nh.raw, tmp_hdr, hlen); 633 + memcpy(skb_network_header(skb), tmp_hdr, hlen); 636 634 637 635 ipv6_select_ident(skb, fh); 638 636 fh->nexthdr = nexthdr; ··· 656 654 fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr)); 657 655 __skb_push(frag, hlen); 658 656 skb_reset_network_header(frag); 659 - memcpy(frag->nh.raw, tmp_hdr, hlen); 657 + memcpy(skb_network_header(frag), tmp_hdr, 658 + hlen); 660 659 offset += skb->len - hlen - sizeof(struct frag_hdr); 661 660 fh->nexthdr = nexthdr; 662 661 fh->reserved = 0; ··· 756 753 /* 757 754 * Copy the packet header into the new buffer. 758 755 */ 759 - memcpy(frag->nh.raw, skb->data, hlen); 756 + memcpy(skb_network_header(frag), skb->data, hlen); 760 757 761 758 /* 762 759 * Build fragment header. ··· 1332 1329 tail_skb = &(skb_shinfo(skb)->frag_list); 1333 1330 1334 1331 /* move skb->data to ip header from ext header */ 1335 - if (skb->data < skb->nh.raw) 1332 + if (skb->data < skb_network_header(skb)) 1336 1333 __skb_pull(skb, skb_network_offset(skb)); 1337 1334 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { 1338 1335 __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
+3 -2
net/ipv6/ip6_tunnel.c
··· 995 995 !ip6_tnl_xmit_ctl(t) || ip6_tnl_addr_conflict(t, ipv6h)) 996 996 return -1; 997 997 998 - if ((offset = parse_tlv_tnl_enc_lim(skb, skb->nh.raw)) > 0) { 998 + offset = parse_tlv_tnl_enc_lim(skb, skb_network_header(skb)); 999 + if (offset > 0) { 999 1000 struct ipv6_tlv_tnl_enc_lim *tel; 1000 - tel = (struct ipv6_tlv_tnl_enc_lim *) &skb->nh.raw[offset]; 1001 + tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset]; 1001 1002 if (tel->encap_limit == 0) { 1002 1003 icmpv6_send(skb, ICMPV6_PARAMPROB, 1003 1004 ICMPV6_HDR_FIELD, offset + 2, skb->dev);
+2 -2
net/ipv6/ipcomp6.c
··· 166 166 top_iph->payload_len = htons(skb->len - sizeof(struct ipv6hdr)); 167 167 168 168 ipch = (struct ipv6_comp_hdr *)start; 169 - ipch->nexthdr = *skb->nh.raw; 169 + ipch->nexthdr = *skb_network_header(skb); 170 170 ipch->flags = 0; 171 171 ipch->cpi = htons((u16 )ntohl(x->id.spi)); 172 - *skb->nh.raw = IPPROTO_COMP; 172 + *skb_network_header(skb) = IPPROTO_COMP; 173 173 174 174 out_ok: 175 175 return 0;
+17 -12
net/ipv6/mip6.c
··· 99 99 if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) { 100 100 LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n", 101 101 mh->ip6mh_hdrlen, mip6_mh_len(mh->ip6mh_type)); 102 - mip6_param_prob(skb, 0, (&mh->ip6mh_hdrlen) - skb->nh.raw); 102 + mip6_param_prob(skb, 0, ((&mh->ip6mh_hdrlen) - 103 + skb_network_header(skb))); 103 104 return -1; 104 105 } 105 106 106 107 if (mh->ip6mh_proto != IPPROTO_NONE) { 107 108 LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n", 108 109 mh->ip6mh_proto); 109 - mip6_param_prob(skb, 0, (&mh->ip6mh_proto) - skb->nh.raw); 110 + mip6_param_prob(skb, 0, ((&mh->ip6mh_proto) - 111 + skb_network_header(skb))); 110 112 return -1; 111 113 } 112 114 ··· 154 152 iph = (struct ipv6hdr *)skb->data; 155 153 iph->payload_len = htons(skb->len - sizeof(*iph)); 156 154 157 - nexthdr = *skb->nh.raw; 158 - *skb->nh.raw = IPPROTO_DSTOPTS; 155 + nexthdr = *skb_network_header(skb); 156 + *skb_network_header(skb) = IPPROTO_DSTOPTS; 159 157 160 158 dstopt = (struct ipv6_destopt_hdr *)skb->h.raw; 161 159 dstopt->nexthdr = nexthdr; ··· 217 215 if (likely(opt->dsthao)) { 218 216 offset = ipv6_find_tlv(skb, opt->dsthao, IPV6_TLV_HAO); 219 217 if (likely(offset >= 0)) 220 - hao = (struct ipv6_destopt_hao *)(skb->nh.raw + offset); 218 + hao = (struct ipv6_destopt_hao *) 219 + (skb_network_header(skb) + offset); 221 220 } 222 221 223 222 skb_get_timestamp(skb, &stamp); ··· 257 254 { 258 255 u16 offset = sizeof(struct ipv6hdr); 259 256 struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.ipv6h + 1); 260 - unsigned int packet_len = skb->tail - skb->nh.raw; 257 + const unsigned char *nh = skb_network_header(skb); 258 + unsigned int packet_len = skb->tail - nh; 261 259 int found_rhdr = 0; 262 260 263 261 *nexthdr = &skb->nh.ipv6h->nexthdr; ··· 292 288 293 289 offset += ipv6_optlen(exthdr); 294 290 *nexthdr = &exthdr->nexthdr; 295 - exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset); 291 + exthdr = (struct ipv6_opt_hdr *)(nh + offset); 296 292 } 297 293 298 294 return offset; ··· 365 361 iph = (struct ipv6hdr *)skb->data; 366 362 iph->payload_len = htons(skb->len - sizeof(*iph)); 367 363 368 - nexthdr = *skb->nh.raw; 369 - *skb->nh.raw = IPPROTO_ROUTING; 364 + nexthdr = *skb_network_header(skb); 365 + *skb_network_header(skb) = IPPROTO_ROUTING; 370 366 371 367 rt2 = (struct rt2_hdr *)skb->h.raw; 372 368 rt2->rt_hdr.nexthdr = nexthdr; ··· 388 384 { 389 385 u16 offset = sizeof(struct ipv6hdr); 390 386 struct ipv6_opt_hdr *exthdr = (struct ipv6_opt_hdr*)(skb->nh.ipv6h + 1); 391 - unsigned int packet_len = skb->tail - skb->nh.raw; 387 + const unsigned char *nh = skb_network_header(skb); 388 + unsigned int packet_len = skb->tail - nh; 392 389 int found_rhdr = 0; 393 390 394 391 *nexthdr = &skb->nh.ipv6h->nexthdr; ··· 402 397 case NEXTHDR_ROUTING: 403 398 if (offset + 3 <= packet_len) { 404 399 struct ipv6_rt_hdr *rt; 405 - rt = (struct ipv6_rt_hdr *)(skb->nh.raw + offset); 400 + rt = (struct ipv6_rt_hdr *)(nh + offset); 406 401 if (rt->type != 0) 407 402 return offset; 408 403 } ··· 422 417 423 418 offset += ipv6_optlen(exthdr); 424 419 *nexthdr = &exthdr->nexthdr; 425 - exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset); 420 + exthdr = (struct ipv6_opt_hdr *)(nh + offset); 426 421 } 427 422 428 423 return offset;
+12 -7
net/ipv6/netfilter/nf_conntrack_reasm.c
··· 408 408 return -1; 409 409 } 410 410 411 - if (skb->ip_summed == CHECKSUM_COMPLETE) 411 + if (skb->ip_summed == CHECKSUM_COMPLETE) { 412 + const unsigned char *nh = skb_network_header(skb); 412 413 skb->csum = csum_sub(skb->csum, 413 - csum_partial(skb->nh.raw, 414 - (u8*)(fhdr + 1) - skb->nh.raw, 414 + csum_partial(nh, (u8 *)(fhdr + 1) - nh, 415 415 0)); 416 + } 416 417 417 418 /* Is this the final fragment? */ 418 419 if (!(fhdr->frag_off & htons(IP6_MF))) { ··· 584 583 BUG_TRAP(NFCT_FRAG6_CB(head)->offset == 0); 585 584 586 585 /* Unfragmented part is taken from the first segment. */ 587 - payload_len = (head->data - head->nh.raw) - sizeof(struct ipv6hdr) + fq->len - sizeof(struct frag_hdr); 586 + payload_len = ((head->data - skb_network_header(head)) - 587 + sizeof(struct ipv6hdr) + fq->len - 588 + sizeof(struct frag_hdr)); 588 589 if (payload_len > IPV6_MAXPLEN) { 589 590 DEBUGP("payload len is too large.\n"); 590 591 goto out_oversize; ··· 627 624 628 625 /* We have to remove fragment header from datagram and to relocate 629 626 * header in order to calculate ICV correctly. */ 630 - head->nh.raw[fq->nhoffset] = head->h.raw[0]; 627 + skb_network_header(head)[fq->nhoffset] = head->h.raw[0]; 631 628 memmove(head->head + sizeof(struct frag_hdr), head->head, 632 629 (head->data - head->head) - sizeof(struct frag_hdr)); 633 630 head->mac.raw += sizeof(struct frag_hdr); ··· 635 632 636 633 skb_shinfo(head)->frag_list = head->next; 637 634 head->h.raw = head->data; 638 - skb_push(head, head->data - head->nh.raw); 635 + skb_push(head, head->data - skb_network_header(head)); 639 636 atomic_sub(head->truesize, &nf_ct_frag6_mem); 640 637 641 638 for (fp=head->next; fp; fp = fp->next) { ··· 656 653 657 654 /* Yes, and fold redundant checksum back. 8) */ 658 655 if (head->ip_summed == CHECKSUM_COMPLETE) 659 - head->csum = csum_partial(head->nh.raw, head->h.raw-head->nh.raw, head->csum); 656 + head->csum = csum_partial(skb_network_header(head), 657 + head->h.raw - head->nh.raw, 658 + head->csum); 660 659 661 660 fq->fragments = NULL; 662 661
+3 -2
net/ipv6/raw.c
··· 361 361 skb->ip_summed = CHECKSUM_UNNECESSARY; 362 362 363 363 if (skb->ip_summed == CHECKSUM_COMPLETE) { 364 - skb_postpull_rcsum(skb, skb->nh.raw, 364 + skb_postpull_rcsum(skb, skb_network_header(skb), 365 365 skb->h.raw - skb->nh.raw); 366 366 if (!csum_ipv6_magic(&skb->nh.ipv6h->saddr, 367 367 &skb->nh.ipv6h->daddr, ··· 488 488 goto out; 489 489 490 490 offset = rp->offset; 491 - total_len = inet_sk(sk)->cork.length - (skb->nh.raw - skb->data); 491 + total_len = inet_sk(sk)->cork.length - (skb_network_header(skb) - 492 + skb->data); 492 493 if (offset >= total_len - 1) { 493 494 err = -EINVAL; 494 495 ip6_flush_pending_frames(sk);
+17 -8
net/ipv6/reassembly.c
··· 436 436 if ((unsigned int)end > IPV6_MAXPLEN) { 437 437 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), 438 438 IPSTATS_MIB_INHDRERRORS); 439 - icmpv6_param_prob(skb,ICMPV6_HDR_FIELD, (u8*)&fhdr->frag_off - skb->nh.raw); 439 + icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, 440 + ((u8 *)&fhdr->frag_off - 441 + skb_network_header(skb))); 440 442 return; 441 443 } 442 444 443 - if (skb->ip_summed == CHECKSUM_COMPLETE) 445 + if (skb->ip_summed == CHECKSUM_COMPLETE) { 446 + const unsigned char *nh = skb_network_header(skb); 444 447 skb->csum = csum_sub(skb->csum, 445 - csum_partial(skb->nh.raw, (u8*)(fhdr+1)-skb->nh.raw, 0)); 448 + csum_partial(nh, (u8 *)(fhdr + 1) - nh, 449 + 0)); 450 + } 446 451 447 452 /* Is this the final fragment? */ 448 453 if (!(fhdr->frag_off & htons(IP6_MF))) { ··· 610 605 BUG_TRAP(FRAG6_CB(head)->offset == 0); 611 606 612 607 /* Unfragmented part is taken from the first segment. */ 613 - payload_len = (head->data - head->nh.raw) - sizeof(struct ipv6hdr) + fq->len - sizeof(struct frag_hdr); 608 + payload_len = ((head->data - skb_network_header(head)) - 609 + sizeof(struct ipv6hdr) + fq->len - 610 + sizeof(struct frag_hdr)); 614 611 if (payload_len > IPV6_MAXPLEN) 615 612 goto out_oversize; 616 613 ··· 646 639 /* We have to remove fragment header from datagram and to relocate 647 640 * header in order to calculate ICV correctly. */ 648 641 nhoff = fq->nhoffset; 649 - head->nh.raw[nhoff] = head->h.raw[0]; 642 + skb_network_header(head)[nhoff] = head->h.raw[0]; 650 643 memmove(head->head + sizeof(struct frag_hdr), head->head, 651 644 (head->data - head->head) - sizeof(struct frag_hdr)); 652 645 head->mac.raw += sizeof(struct frag_hdr); ··· 654 647 655 648 skb_shinfo(head)->frag_list = head->next; 656 649 head->h.raw = head->data; 657 - skb_push(head, head->data - head->nh.raw); 650 + skb_push(head, head->data - skb_network_header(head)); 658 651 atomic_sub(head->truesize, &ip6_frag_mem); 659 652 660 653 for (fp=head->next; fp; fp = fp->next) { ··· 678 671 679 672 /* Yes, and fold redundant checksum back. 8) */ 680 673 if (head->ip_summed == CHECKSUM_COMPLETE) 681 - head->csum = csum_partial(head->nh.raw, head->h.raw-head->nh.raw, head->csum); 674 + head->csum = csum_partial(skb_network_header(head), 675 + head->h.raw - head->nh.raw, 676 + head->csum); 682 677 683 678 rcu_read_lock(); 684 679 IP6_INC_STATS_BH(__in6_dev_get(dev), IPSTATS_MIB_REASMOKS); ··· 734 725 skb->h.raw += sizeof(struct frag_hdr); 735 726 IP6_INC_STATS_BH(ip6_dst_idev(skb->dst), IPSTATS_MIB_REASMOKS); 736 727 737 - IP6CB(skb)->nhoff = (u8*)fhdr - skb->nh.raw; 728 + IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); 738 729 return 1; 739 730 } 740 731
+6 -2
net/ipv6/tcp_ipv6.c
··· 486 486 struct sk_buff *pktopts = treq->pktopts; 487 487 struct inet6_skb_parm *rxopt = IP6CB(pktopts); 488 488 if (rxopt->srcrt) 489 - opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr*)(pktopts->nh.raw + rxopt->srcrt)); 489 + opt = ipv6_invert_rthdr(sk, 490 + (struct ipv6_rt_hdr *)(skb_network_header(pktopts) + 491 + rxopt->srcrt)); 490 492 } 491 493 492 494 if (opt && opt->srcrt) { ··· 1391 1389 opt == NULL && treq->pktopts) { 1392 1390 struct inet6_skb_parm *rxopt = IP6CB(treq->pktopts); 1393 1391 if (rxopt->srcrt) 1394 - opt = ipv6_invert_rthdr(sk, (struct ipv6_rt_hdr *)(treq->pktopts->nh.raw + rxopt->srcrt)); 1392 + opt = ipv6_invert_rthdr(sk, 1393 + (struct ipv6_rt_hdr *)(skb_network_header(treq->pktopts) + 1394 + rxopt->srcrt)); 1395 1395 } 1396 1396 1397 1397 if (dst == NULL) {
+3 -3
net/ipv6/xfrm6_input.c
··· 28 28 unsigned int nhoff; 29 29 30 30 nhoff = IP6CB(skb)->nhoff; 31 - nexthdr = skb->nh.raw[nhoff]; 31 + nexthdr = skb_network_header(skb)[nhoff]; 32 32 33 33 seq = 0; 34 34 if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) ··· 58 58 if (nexthdr <= 0) 59 59 goto drop_unlock; 60 60 61 - skb->nh.raw[nhoff] = nexthdr; 61 + skb_network_header(skb)[nhoff] = nexthdr; 62 62 63 63 if (x->props.replay_window) 64 64 xfrm_replay_advance(x, seq); ··· 113 113 } else { 114 114 #ifdef CONFIG_NETFILTER 115 115 skb->nh.ipv6h->payload_len = htons(skb->len); 116 - __skb_push(skb, skb->data - skb->nh.raw); 116 + __skb_push(skb, skb->data - skb_network_header(skb)); 117 117 118 118 NF_HOOK(PF_INET6, NF_IP6_PRE_ROUTING, skb, skb->dev, NULL, 119 119 ip6_rcv_finish);
+1 -1
net/ipv6/xfrm6_mode_beet.c
··· 67 67 goto out; 68 68 69 69 skb_push(skb, size); 70 - memmove(skb->data, skb->nh.raw, size); 70 + memmove(skb->data, skb_network_header(skb), size); 71 71 skb_reset_network_header(skb); 72 72 73 73 old_mac = skb_mac_header(skb);
+4 -2
net/ipv6/xfrm6_mode_transport.c
··· 53 53 { 54 54 int ihl = skb->data - skb->h.raw; 55 55 56 - if (skb->h.raw != skb->nh.raw) 57 - skb->nh.raw = memmove(skb->h.raw, skb->nh.raw, ihl); 56 + if (skb->h.raw != skb->nh.raw) { 57 + memmove(skb->h.raw, skb_network_header(skb), ihl); 58 + skb->nh.raw = skb->h.raw; 59 + } 58 60 skb->nh.ipv6h->payload_len = htons(skb->len + ihl - 59 61 sizeof(struct ipv6hdr)); 60 62 skb->h.raw = skb->data;
+5 -3
net/ipv6/xfrm6_mode_tunnel.c
··· 87 87 { 88 88 int err = -EINVAL; 89 89 const unsigned char *old_mac; 90 + const unsigned char *nh = skb_network_header(skb); 90 91 91 - if (skb->nh.raw[IP6CB(skb)->nhoff] != IPPROTO_IPV6 92 - && skb->nh.raw[IP6CB(skb)->nhoff] != IPPROTO_IPIP) 92 + if (nh[IP6CB(skb)->nhoff] != IPPROTO_IPV6 && 93 + nh[IP6CB(skb)->nhoff] != IPPROTO_IPIP) 93 94 goto out; 94 95 if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) 95 96 goto out; ··· 99 98 (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) 100 99 goto out; 101 100 102 - if (skb->nh.raw[IP6CB(skb)->nhoff] == IPPROTO_IPV6) { 101 + nh = skb_network_header(skb); 102 + if (nh[IP6CB(skb)->nhoff] == IPPROTO_IPV6) { 103 103 if (x->props.flags & XFRM_STATE_DECAP_DSCP) 104 104 ipv6_copy_dscp(skb->nh.ipv6h, skb->h.ipv6h); 105 105 if (!(x->props.flags & XFRM_STATE_NOECN))
+9 -7
net/ipv6/xfrm6_policy.c
··· 273 273 u16 offset = skb->h.raw - skb->nh.raw; 274 274 struct ipv6hdr *hdr = skb->nh.ipv6h; 275 275 struct ipv6_opt_hdr *exthdr; 276 - u8 nexthdr = skb->nh.raw[IP6CB(skb)->nhoff]; 276 + const unsigned char *nh = skb_network_header(skb); 277 + u8 nexthdr = nh[IP6CB(skb)->nhoff]; 277 278 278 279 memset(fl, 0, sizeof(struct flowi)); 279 280 ipv6_addr_copy(&fl->fl6_dst, &hdr->daddr); 280 281 ipv6_addr_copy(&fl->fl6_src, &hdr->saddr); 281 282 282 - while (pskb_may_pull(skb, skb->nh.raw + offset + 1 - skb->data)) { 283 - exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset); 283 + while (pskb_may_pull(skb, nh + offset + 1 - skb->data)) { 284 + nh = skb_network_header(skb); 285 + exthdr = (struct ipv6_opt_hdr *)(nh + offset); 284 286 285 287 switch (nexthdr) { 286 288 case NEXTHDR_ROUTING: ··· 290 288 case NEXTHDR_DEST: 291 289 offset += ipv6_optlen(exthdr); 292 290 nexthdr = exthdr->nexthdr; 293 - exthdr = (struct ipv6_opt_hdr*)(skb->nh.raw + offset); 291 + exthdr = (struct ipv6_opt_hdr *)(nh + offset); 294 292 break; 295 293 296 294 case IPPROTO_UDP: ··· 298 296 case IPPROTO_TCP: 299 297 case IPPROTO_SCTP: 300 298 case IPPROTO_DCCP: 301 - if (pskb_may_pull(skb, skb->nh.raw + offset + 4 - skb->data)) { 299 + if (pskb_may_pull(skb, nh + offset + 4 - skb->data)) { 302 300 __be16 *ports = (__be16 *)exthdr; 303 301 304 302 fl->fl_ip_sport = ports[0]; ··· 308 306 return; 309 307 310 308 case IPPROTO_ICMPV6: 311 - if (pskb_may_pull(skb, skb->nh.raw + offset + 2 - skb->data)) { 309 + if (pskb_may_pull(skb, nh + offset + 2 - skb->data)) { 312 310 u8 *icmp = (u8 *)exthdr; 313 311 314 312 fl->fl_icmp_type = icmp[0]; ··· 319 317 320 318 #ifdef CONFIG_IPV6_MIP6 321 319 case IPPROTO_MH: 322 - if (pskb_may_pull(skb, skb->nh.raw + offset + 3 - skb->data)) { 320 + if (pskb_may_pull(skb, nh + offset + 3 - skb->data)) { 323 321 struct ip6_mh *mh; 324 322 mh = (struct ip6_mh *)exthdr; 325 323
+2 -2
net/netfilter/xt_TCPMSS.c
··· 54 54 return -1; 55 55 56 56 tcplen = (*pskb)->len - tcphoff; 57 - tcph = (struct tcphdr *)((*pskb)->nh.raw + tcphoff); 57 + tcph = (struct tcphdr *)(skb_network_header(*pskb) + tcphoff); 58 58 59 59 /* Since it passed flags test in tcp match, we know it is is 60 60 not a fragment, and has data >= tcp header length. SYN ··· 113 113 return -1; 114 114 kfree_skb(*pskb); 115 115 *pskb = newskb; 116 - tcph = (struct tcphdr *)((*pskb)->nh.raw + tcphoff); 116 + tcph = (struct tcphdr *)(skb_network_header(*pskb) + tcphoff); 117 117 } 118 118 119 119 skb_put((*pskb), TCPOLEN_MSS);
+1 -1
net/sched/act_pedit.c
··· 136 136 } 137 137 } 138 138 139 - pptr = skb->nh.raw; 139 + pptr = skb_network_header(skb); 140 140 141 141 spin_lock(&p->tcf_lock); 142 142
+1 -1
net/sched/cls_u32.c
··· 119 119 } stack[TC_U32_MAXDEPTH]; 120 120 121 121 struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root; 122 - u8 *ptr = skb->nh.raw; 122 + u8 *ptr = skb_network_header(skb); 123 123 struct tc_u_knode *n; 124 124 int sdepth = 0; 125 125 int off2 = 0;
+1 -1
net/sched/em_u32.c
··· 22 22 struct tcf_pkt_info *info) 23 23 { 24 24 struct tc_u32_key *key = (struct tc_u32_key *) em->data; 25 - unsigned char *ptr = skb->nh.raw; 25 + const unsigned char *ptr = skb_network_header(skb); 26 26 27 27 if (info) { 28 28 if (info->ptr)