Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[SK_BUFF]: Convert skb->tail to sk_buff_data_t

So that it is also an offset from skb->head, reduces its size from 8 to 4 bytes
on 64bit architectures, allowing us to combine the 4 bytes hole left by the
layer headers conversion, reducing struct sk_buff size to 256 bytes, i.e. 4
64byte cachelines, and since the sk_buff slab cache is SLAB_HWCACHE_ALIGN...
:-)

Many calculations that previously required that skb->{transport,network,
mac}_header be first converted to a pointer now can be done directly, being
meaningful as offsets or pointers.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Arnaldo Carvalho de Melo and committed by
David S. Miller
27a884dc be8bd863

+396 -329
+5 -5
arch/ia64/sn/kernel/xpnet.c
··· 264 264 265 265 dev_dbg(xpnet, "<skb->head=0x%p skb->data=0x%p skb->tail=0x%p " 266 266 "skb->end=0x%p skb->len=%d\n", (void *) skb->head, 267 - (void *) skb->data, (void *) skb->tail, (void *) skb->end, 267 + (void *)skb->data, skb_tail_pointer(skb), (void *)skb->end, 268 268 skb->len); 269 269 270 270 skb->protocol = eth_type_trans(skb, xpnet_device); ··· 272 272 273 273 dev_dbg(xpnet, "passing skb to network layer; \n\tskb->head=0x%p " 274 274 "skb->data=0x%p skb->tail=0x%p skb->end=0x%p skb->len=%d\n", 275 - (void *) skb->head, (void *) skb->data, (void *) skb->tail, 275 + (void *)skb->head, (void *)skb->data, skb_tail_pointer(skb), 276 276 (void *) skb->end, skb->len); 277 277 278 278 ··· 475 475 476 476 dev_dbg(xpnet, ">skb->head=0x%p skb->data=0x%p skb->tail=0x%p " 477 477 "skb->end=0x%p skb->len=%d\n", (void *) skb->head, 478 - (void *) skb->data, (void *) skb->tail, (void *) skb->end, 478 + (void *)skb->data, skb_tail_pointer(skb), (void *)skb->end, 479 479 skb->len); 480 480 481 481 ··· 497 497 498 498 /* get the beginning of the first cacheline and end of last */ 499 499 start_addr = ((u64) skb->data & ~(L1_CACHE_BYTES - 1)); 500 - end_addr = L1_CACHE_ALIGN((u64) skb->tail); 500 + end_addr = L1_CACHE_ALIGN((u64)skb_tail_pointer(skb)); 501 501 502 502 /* calculate how many bytes to embed in the XPC message */ 503 503 embedded_bytes = 0; ··· 573 573 msg->magic = XPNET_MAGIC; 574 574 msg->size = end_addr - start_addr; 575 575 msg->leadin_ignore = (u64) skb->data - start_addr; 576 - msg->tailout_ignore = end_addr - (u64) skb->tail; 576 + msg->tailout_ignore = end_addr - (u64)skb_tail_pointer(skb); 577 577 msg->buf_pa = __pa(start_addr); 578 578 579 579 dev_dbg(xpnet, "sending XPC message to %d:%d\nmsg->buf_pa="
+2 -2
drivers/atm/he.c
··· 1901 1901 case ATM_AAL0: 1902 1902 /* 2.10.1.5 raw cell receive */ 1903 1903 skb->len = ATM_AAL0_SDU; 1904 - skb->tail = skb->data + skb->len; 1904 + skb_set_tail_pointer(skb, skb->len); 1905 1905 break; 1906 1906 case ATM_AAL5: 1907 1907 /* 2.10.1.2 aal5 receive */ 1908 1908 1909 1909 skb->len = AAL5_LEN(skb->data, he_vcc->pdu_len); 1910 - skb->tail = skb->data + skb->len; 1910 + skb_set_tail_pointer(skb, skb->len); 1911 1911 #ifdef USE_CHECKSUM_HW 1912 1912 if (vcc->vpi == 0 && vcc->vci >= ATM_NOT_RSV_VCI) { 1913 1913 skb->ip_summed = CHECKSUM_COMPLETE;
+2 -1
drivers/atm/idt77252.c
··· 1816 1816 u32 handle; 1817 1817 u32 addr; 1818 1818 1819 - skb->data = skb->tail = skb->head; 1819 + skb->data = skb->head; 1820 + skb_reset_tail_pointer(skb); 1820 1821 skb->len = 0; 1821 1822 1822 1823 skb_reserve(skb, 16);
+6 -4
drivers/atm/nicstar.c
··· 2208 2208 if (i == 1 && ns_rsqe_eopdu(rsqe)) 2209 2209 *((u32 *) sb->data) |= 0x00000002; 2210 2210 skb_put(sb, NS_AAL0_HEADER); 2211 - memcpy(sb->tail, cell, ATM_CELL_PAYLOAD); 2211 + memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD); 2212 2212 skb_put(sb, ATM_CELL_PAYLOAD); 2213 2213 ATM_SKB(sb)->vcc = vcc; 2214 2214 __net_timestamp(sb); ··· 2252 2252 vc->rx_iov = iovb; 2253 2253 NS_SKB(iovb)->iovcnt = 0; 2254 2254 iovb->len = 0; 2255 - iovb->tail = iovb->data = iovb->head; 2255 + iovb->data = iovb->head; 2256 + skb_reset_tail_pointer(iovb); 2256 2257 NS_SKB(iovb)->vcc = vcc; 2257 2258 /* IMPORTANT: a pointer to the sk_buff containing the small or large 2258 2259 buffer is stored as iovec base, NOT a pointer to the ··· 2266 2265 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS); 2267 2266 NS_SKB(iovb)->iovcnt = 0; 2268 2267 iovb->len = 0; 2269 - iovb->tail = iovb->data = iovb->head; 2268 + iovb->data = iovb->head; 2269 + skb_reset_tail_pointer(iovb); 2270 2270 NS_SKB(iovb)->vcc = vcc; 2271 2271 } 2272 2272 iov = &((struct iovec *) iovb->data)[NS_SKB(iovb)->iovcnt++]; ··· 2491 2489 { 2492 2490 lb = (struct sk_buff *) iov->iov_base; 2493 2491 tocopy = min_t(int, remaining, iov->iov_len); 2494 - memcpy(hb->tail, lb->data, tocopy); 2492 + memcpy(skb_tail_pointer(hb), lb->data, tocopy); 2495 2493 skb_put(hb, tocopy); 2496 2494 iov++; 2497 2495 remaining -= tocopy;
+3 -2
drivers/infiniband/hw/amso1100/c2.c
··· 439 439 } 440 440 441 441 /* Setup the skb for reuse since we're dropping this pkt */ 442 - elem->skb->tail = elem->skb->data = elem->skb->head; 442 + elem->skb->data = elem->skb->head; 443 + skb_reset_tail_pointer(elem->skb); 443 444 444 445 /* Zero out the rxp hdr in the sk_buff */ 445 446 memset(elem->skb->data, 0, sizeof(*rxp_hdr)); ··· 522 521 * "sizeof(struct c2_rxp_hdr)". 523 522 */ 524 523 skb->data += sizeof(*rxp_hdr); 525 - skb->tail = skb->data + buflen; 524 + skb_set_tail_pointer(skb, buflen); 526 525 skb->len = buflen; 527 526 skb->protocol = eth_type_trans(skb, netdev); 528 527
+1 -1
drivers/isdn/i4l/isdn_net.c
··· 881 881 882 882 addinfo[0] = '\0'; 883 883 /* This check stolen from 2.1.72 dev_queue_xmit_nit() */ 884 - if (p < skb->data || p >= skb->tail) { 884 + if (p < skb->data || skb->network_header >= skb->tail) { 885 885 /* fall back to old isdn_net_log_packet method() */ 886 886 char * buf = skb->data; 887 887
+6 -4
drivers/media/dvb/dvb-core/dvb_net.c
··· 600 600 /* Check CRC32, we've got it in our skb already. */ 601 601 unsigned short ulen = htons(priv->ule_sndu_len); 602 602 unsigned short utype = htons(priv->ule_sndu_type); 603 + const u8 *tail; 603 604 struct kvec iov[3] = { 604 605 { &ulen, sizeof ulen }, 605 606 { &utype, sizeof utype }, ··· 614 613 } 615 614 616 615 ule_crc = iov_crc32(ule_crc, iov, 3); 617 - expected_crc = *((u8 *)priv->ule_skb->tail - 4) << 24 | 618 - *((u8 *)priv->ule_skb->tail - 3) << 16 | 619 - *((u8 *)priv->ule_skb->tail - 2) << 8 | 620 - *((u8 *)priv->ule_skb->tail - 1); 616 + tail = skb_tail_pointer(priv->ule_skb); 617 + expected_crc = *(tail - 4) << 24 | 618 + *(tail - 3) << 16 | 619 + *(tail - 2) << 8 | 620 + *(tail - 1); 621 621 if (ule_crc != expected_crc) { 622 622 printk(KERN_WARNING "%lu: CRC32 check FAILED: %08x / %08x, SNDU len %d type %#x, ts_remain %d, next 2: %x.\n", 623 623 priv->ts_count, ule_crc, expected_crc, priv->ule_sndu_len, priv->ule_sndu_type, ts_remain, ts_remain > 2 ? *(unsigned short *)from_where : 0);
+1 -1
drivers/net/cris/eth_v10.c
··· 1348 1348 1349 1349 #ifdef ETHDEBUG 1350 1350 printk("head = 0x%x, data = 0x%x, tail = 0x%x, end = 0x%x\n", 1351 - skb->head, skb->data, skb->tail, skb->end); 1351 + skb->head, skb->data, skb_tail_pointer(skb), skb->end); 1352 1352 printk("copying packet to 0x%x.\n", skb_data_ptr); 1353 1353 #endif 1354 1354
+3 -3
drivers/net/cxgb3/sge.c
··· 1325 1325 flits = skb_transport_offset(skb) / 8; 1326 1326 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; 1327 1327 sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), 1328 - skb->tail - skb_transport_header(skb), 1328 + skb->tail - skb->transport_header, 1329 1329 adap->pdev); 1330 1330 if (need_skb_unmap()) { 1331 1331 setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); 1332 1332 skb->destructor = deferred_unmap_destructor; 1333 1333 ((struct unmap_info *)skb->cb)->len = (skb->tail - 1334 - skb_transport_header(skb)); 1334 + skb->transport_header); 1335 1335 } 1336 1336 1337 1337 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, ··· 1353 1353 return 1; /* packet fits as immediate data */ 1354 1354 1355 1355 flits = skb_transport_offset(skb) / 8; /* headers */ 1356 - if (skb->tail != skb_transport_header(skb)) 1356 + if (skb->tail != skb->transport_header) 1357 1357 cnt++; 1358 1358 return flits_to_desc(flits + sgl_len(cnt)); 1359 1359 }
+2 -2
drivers/net/e1000/e1000_main.c
··· 3304 3304 * NOTE: this is a TSO only workaround 3305 3305 * if end byte alignment not correct move us 3306 3306 * into the next dword */ 3307 - if ((unsigned long)(skb->tail - 1) & 4) 3307 + if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4) 3308 3308 break; 3309 3309 /* fall through */ 3310 3310 case e1000_82571: ··· 4388 4388 PCI_DMA_FROMDEVICE); 4389 4389 vaddr = kmap_atomic(ps_page->ps_page[0], 4390 4390 KM_SKB_DATA_SOFTIRQ); 4391 - memcpy(skb->tail, vaddr, l1); 4391 + memcpy(skb_tail_pointer(skb), vaddr, l1); 4392 4392 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); 4393 4393 pci_dma_sync_single_for_device(pdev, 4394 4394 ps_page_dma->ps_page_dma[0],
+1 -1
drivers/net/ibm_emac/ibm_emac_core.c
··· 1338 1338 dev_kfree_skb(dev->rx_sg_skb); 1339 1339 dev->rx_sg_skb = NULL; 1340 1340 } else { 1341 - cacheable_memcpy(dev->rx_sg_skb->tail, 1341 + cacheable_memcpy(skb_tail_pointer(dev->rx_sg_skb), 1342 1342 dev->rx_skb[slot]->data, len); 1343 1343 skb_put(dev->rx_sg_skb, len); 1344 1344 emac_recycle_rx_skb(dev, slot, len);
+2 -1
drivers/net/macb.c
··· 575 575 int i; 576 576 dev_dbg(&bp->pdev->dev, 577 577 "start_xmit: len %u head %p data %p tail %p end %p\n", 578 - skb->len, skb->head, skb->data, skb->tail, skb->end); 578 + skb->len, skb->head, skb->data, 579 + skb_tail_pointer(skb), skb->end); 579 580 dev_dbg(&bp->pdev->dev, 580 581 "data:"); 581 582 for (i = 0; i < 16; i++)
+1 -1
drivers/net/pcmcia/nmclan_cs.c
··· 1185 1185 skb_reserve(skb, 2); 1186 1186 insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1); 1187 1187 if (pkt_len & 1) 1188 - *(skb->tail-1) = inb(ioaddr + AM2150_RCV); 1188 + *(skb_tail_pointer(skb) - 1) = inb(ioaddr + AM2150_RCV); 1189 1189 skb->protocol = eth_type_trans(skb, dev); 1190 1190 1191 1191 netif_rx(skb); /* Send the packet to the upper (protocol) layers. */
+2 -2
drivers/net/s2io.c
··· 2195 2195 frag_list->next = NULL; 2196 2196 tmp = (void *)ALIGN((long)frag_list->data, ALIGN_SIZE + 1); 2197 2197 frag_list->data = tmp; 2198 - frag_list->tail = tmp; 2198 + skb_reset_tail_pointer(frag_list); 2199 2199 2200 2200 /* Buffer-2 receives L4 data payload */ 2201 2201 ((struct RxD3*)rxdp)->Buffer2_ptr = pci_map_single(nic->pdev, ··· 2349 2349 tmp += ALIGN_SIZE; 2350 2350 tmp &= ~ALIGN_SIZE; 2351 2351 skb->data = (void *) (unsigned long)tmp; 2352 - skb->tail = (void *) (unsigned long)tmp; 2352 + skb_reset_tail_pointer(skb); 2353 2353 2354 2354 if (!(((struct RxD3*)rxdp)->Buffer0_ptr)) 2355 2355 ((struct RxD3*)rxdp)->Buffer0_ptr =
+11 -3
drivers/net/tulip/uli526x.c
··· 829 829 != NULL) ) { 830 830 /* size less than COPY_SIZE, allocate a rxlen SKB */ 831 831 skb_reserve(skb, 2); /* 16byte align */ 832 - memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->tail, rxlen); 832 + memcpy(skb_put(skb, rxlen), 833 + skb_tail_pointer(rxptr->rx_skb_ptr), 834 + rxlen); 833 835 uli526x_reuse_skb(db, rxptr->rx_skb_ptr); 834 836 } else 835 837 skb_put(skb, rxlen); ··· 1177 1175 1178 1176 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) { 1179 1177 rxptr->rx_skb_ptr = skb; 1180 - rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); 1178 + rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev, 1179 + skb_tail_pointer(skb), 1180 + RX_ALLOC_SIZE, 1181 + PCI_DMA_FROMDEVICE)); 1181 1182 wmb(); 1182 1183 rxptr->rdes0 = cpu_to_le32(0x80000000); 1183 1184 db->rx_avail_cnt++; ··· 1344 1339 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL ) 1345 1340 break; 1346 1341 rxptr->rx_skb_ptr = skb; /* FIXME (?) */ 1347 - rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); 1342 + rxptr->rdes2 = cpu_to_le32(pci_map_single(db->pdev, 1343 + skb_tail_pointer(skb), 1344 + RX_ALLOC_SIZE, 1345 + PCI_DMA_FROMDEVICE)); 1348 1346 wmb(); 1349 1347 rxptr->rdes0 = cpu_to_le32(0x80000000); 1350 1348 rxptr = rxptr->next_rx_desc;
+1 -1
drivers/net/wan/hdlc_fr.c
··· 533 533 skb->protocol = __constant_htons(NLPID_CCITT_ANSI_LMI); 534 534 fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI); 535 535 } 536 - data = skb->tail; 536 + data = skb_tail_pointer(skb); 537 537 data[i++] = LMI_CALLREF; 538 538 data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY; 539 539 if (lmi == LMI_ANSI)
+2 -2
drivers/net/wan/lmc/lmc_main.c
··· 1636 1636 if (nsb) { 1637 1637 sc->lmc_rxq[i] = nsb; 1638 1638 nsb->dev = dev; 1639 - sc->lmc_rxring[i].buffer1 = virt_to_bus (nsb->tail); 1639 + sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb)); 1640 1640 } 1641 1641 sc->failed_recv_alloc = 1; 1642 1642 goto skip_packet; ··· 1679 1679 if (nsb) { 1680 1680 sc->lmc_rxq[i] = nsb; 1681 1681 nsb->dev = dev; 1682 - sc->lmc_rxring[i].buffer1 = virt_to_bus (nsb->tail); 1682 + sc->lmc_rxring[i].buffer1 = virt_to_bus(skb_tail_pointer(nsb)); 1683 1683 /* Transferred to 21140 below */ 1684 1684 } 1685 1685 else {
+1 -1
drivers/net/wireless/hostap/hostap_80211_rx.c
··· 922 922 if (frag != 0) 923 923 flen -= hdrlen; 924 924 925 - if (frag_skb->tail + flen > frag_skb->end) { 925 + if (skb_tail_pointer(frag_skb) + flen > frag_skb->end) { 926 926 printk(KERN_WARNING "%s: host decrypted and " 927 927 "reassembled frame did not fit skb\n", 928 928 dev->name);
+7 -4
drivers/s390/net/ctcmain.c
··· 706 706 spin_unlock(&ch->collect_lock); 707 707 return; 708 708 } 709 - ch->trans_skb->tail = ch->trans_skb->data = ch->trans_skb_data; 709 + ch->trans_skb->data = ch->trans_skb_data; 710 + skb_reset_tail_pointer(ch->trans_skb); 710 711 ch->trans_skb->len = 0; 711 712 if (ch->prof.maxmulti < (ch->collect_len + 2)) 712 713 ch->prof.maxmulti = ch->collect_len + 2; ··· 832 831 ctc_unpack_skb(ch, skb); 833 832 } 834 833 again: 835 - skb->data = skb->tail = ch->trans_skb_data; 834 + skb->data = ch->trans_skb_data; 835 + skb_reset_tail_pointer(skb); 836 836 skb->len = 0; 837 837 if (ctc_checkalloc_buffer(ch, 1)) 838 838 return; ··· 2228 2226 * IDAL support in CTC is broken, so we have to 2229 2227 * care about skb's above 2G ourselves. 2230 2228 */ 2231 - hi = ((unsigned long) skb->tail + LL_HEADER_LENGTH) >> 31; 2229 + hi = ((unsigned long)skb_tail_pointer(skb) + 2230 + LL_HEADER_LENGTH) >> 31; 2232 2231 if (hi) { 2233 2232 nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA); 2234 2233 if (!nskb) { ··· 2265 2262 return -EBUSY; 2266 2263 } 2267 2264 2268 - ch->trans_skb->tail = ch->trans_skb->data; 2265 + skb_reset_tail_pointer(ch->trans_skb); 2269 2266 ch->trans_skb->len = 0; 2270 2267 ch->ccw[1].count = skb->len; 2271 2268 memcpy(skb_put(ch->trans_skb, skb->len), skb->data,
+6 -4
drivers/s390/net/netiucv.c
··· 689 689 msg->length, conn->max_buffsize); 690 690 return; 691 691 } 692 - conn->rx_buff->data = conn->rx_buff->tail = conn->rx_buff->head; 692 + conn->rx_buff->data = conn->rx_buff->head; 693 + skb_reset_tail_pointer(conn->rx_buff); 693 694 conn->rx_buff->len = 0; 694 695 rc = iucv_message_receive(conn->path, msg, 0, conn->rx_buff->data, 695 696 msg->length, NULL); ··· 736 735 } 737 736 } 738 737 } 739 - conn->tx_buff->data = conn->tx_buff->tail = conn->tx_buff->head; 738 + conn->tx_buff->data = conn->tx_buff->head; 739 + skb_reset_tail_pointer(conn->tx_buff); 740 740 conn->tx_buff->len = 0; 741 741 spin_lock_irqsave(&conn->collect_lock, saveflags); 742 742 while ((skb = skb_dequeue(&conn->collect_queue))) { ··· 1166 1164 * Copy the skb to a new allocated skb in lowmem only if the 1167 1165 * data is located above 2G in memory or tailroom is < 2. 1168 1166 */ 1169 - unsigned long hi = 1170 - ((unsigned long)(skb->tail + NETIUCV_HDRLEN)) >> 31; 1167 + unsigned long hi = ((unsigned long)(skb_tail_pointer(skb) + 1168 + NETIUCV_HDRLEN)) >> 31; 1171 1169 int copied = 0; 1172 1170 if (hi || (skb_tailroom(skb) < 2)) { 1173 1171 nskb = alloc_skb(skb->len + NETIUCV_HDRLEN +
+5 -5
drivers/usb/atm/usbatm.c
··· 335 335 336 336 sarb = instance->cached_vcc->sarb; 337 337 338 - if (sarb->tail + ATM_CELL_PAYLOAD > sarb->end) { 338 + if (skb_tail_pointer(sarb) + ATM_CELL_PAYLOAD > sarb->end) { 339 339 atm_rldbg(instance, "%s: buffer overrun (sarb->len %u, vcc: 0x%p)!\n", 340 340 __func__, sarb->len, vcc); 341 341 /* discard cells already received */ 342 342 skb_trim(sarb, 0); 343 - UDSL_ASSERT(sarb->tail + ATM_CELL_PAYLOAD <= sarb->end); 343 + UDSL_ASSERT(skb_tail_pointer(sarb) + ATM_CELL_PAYLOAD <= sarb->end); 344 344 } 345 345 346 - memcpy(sarb->tail, source + ATM_CELL_HEADER, ATM_CELL_PAYLOAD); 346 + memcpy(skb_tail_pointer(sarb), source + ATM_CELL_HEADER, ATM_CELL_PAYLOAD); 347 347 __skb_put(sarb, ATM_CELL_PAYLOAD); 348 348 349 349 if (pti & 1) { ··· 370 370 goto out; 371 371 } 372 372 373 - if (crc32_be(~0, sarb->tail - pdu_length, pdu_length) != 0xc704dd7b) { 373 + if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) { 374 374 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n", 375 375 __func__, vcc); 376 376 atomic_inc(&vcc->stats->rx_err); ··· 396 396 goto out; /* atm_charge increments rx_drop */ 397 397 } 398 398 399 - memcpy(skb->data, sarb->tail - pdu_length, length); 399 + memcpy(skb->data, skb_tail_pointer(sarb) - pdu_length, length); 400 400 __skb_put(skb, length); 401 401 402 402 vdbg("%s: sending skb 0x%p, skb->len %u, skb->truesize %u",
+3 -3
drivers/usb/net/asix.c
··· 298 298 if (ax_skb) { 299 299 ax_skb->len = size; 300 300 ax_skb->data = packet; 301 - ax_skb->tail = packet + size; 301 + skb_set_tail_pointer(ax_skb, size); 302 302 usbnet_skb_return(dev, ax_skb); 303 303 } else { 304 304 return 0; ··· 338 338 && ((headroom + tailroom) >= (4 + padlen))) { 339 339 if ((headroom < 4) || (tailroom < padlen)) { 340 340 skb->data = memmove(skb->head + 4, skb->data, skb->len); 341 - skb->tail = skb->data + skb->len; 341 + skb_set_tail_pointer(skb, skb->len); 342 342 } 343 343 } else { 344 344 struct sk_buff *skb2; ··· 356 356 357 357 if ((skb->len % 512) == 0) { 358 358 cpu_to_le32s(&padbytes); 359 - memcpy( skb->tail, &padbytes, sizeof(padbytes)); 359 + memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes)); 360 360 skb_put(skb, sizeof(padbytes)); 361 361 } 362 362 return skb;
+1 -1
drivers/usb/net/gl620a.c
··· 157 157 if ((headroom < (4 + 4*1)) || (tailroom < padlen)) { 158 158 skb->data = memmove(skb->head + (4 + 4*1), 159 159 skb->data, skb->len); 160 - skb->tail = skb->data + skb->len; 160 + skb_set_tail_pointer(skb, skb->len); 161 161 } 162 162 } else { 163 163 struct sk_buff *skb2;
+1 -1
drivers/usb/net/net1080.c
··· 520 520 skb->data = memmove(skb->head 521 521 + sizeof (struct nc_header), 522 522 skb->data, skb->len); 523 - skb->tail = skb->data + len; 523 + skb_set_tail_pointer(skb, len); 524 524 goto encapsulate; 525 525 } 526 526 }
+1 -1
drivers/usb/net/rndis_host.c
··· 588 588 if (likely((sizeof *hdr) <= room)) { 589 589 skb->data = memmove(skb->head + sizeof *hdr, 590 590 skb->data, len); 591 - skb->tail = skb->data + len; 591 + skb_set_tail_pointer(skb, len); 592 592 goto fill; 593 593 } 594 594 }
+2 -2
include/linux/netfilter/nfnetlink.h
··· 62 62 #define NFA_DATA(nfa) ((void *)(((char *)(nfa)) + NFA_LENGTH(0))) 63 63 #define NFA_PAYLOAD(nfa) ((int)((nfa)->nfa_len) - NFA_LENGTH(0)) 64 64 #define NFA_NEST(skb, type) \ 65 - ({ struct nfattr *__start = (struct nfattr *) (skb)->tail; \ 65 + ({ struct nfattr *__start = (struct nfattr *)skb_tail_pointer(skb); \ 66 66 NFA_PUT(skb, (NFNL_NFA_NEST | type), 0, NULL); \ 67 67 __start; }) 68 68 #define NFA_NEST_END(skb, start) \ 69 - ({ (start)->nfa_len = ((skb)->tail - (unsigned char *) (start)); \ 69 + ({ (start)->nfa_len = skb_tail_pointer(skb) - (unsigned char *)(start); \ 70 70 (skb)->len; }) 71 71 #define NFA_NEST_CANCEL(skb, start) \ 72 72 ({ if (start) \
+1 -1
include/linux/netlink.h
··· 229 229 (cb)->nlh->nlmsg_seq, type, len, flags) 230 230 231 231 #define NLMSG_END(skb, nlh) \ 232 - ({ (nlh)->nlmsg_len = (skb)->tail - (unsigned char *) (nlh); \ 232 + ({ (nlh)->nlmsg_len = skb_tail_pointer(skb) - (unsigned char *)(nlh); \ 233 233 (skb)->len; }) 234 234 235 235 #define NLMSG_CANCEL(skb, nlh) \
+3 -3
include/linux/rtnetlink.h
··· 605 605 606 606 #define RTA_PUT_NOHDR(skb, attrlen, data) \ 607 607 ({ RTA_APPEND(skb, RTA_ALIGN(attrlen), data); \ 608 - memset(skb->tail - (RTA_ALIGN(attrlen) - attrlen), 0, \ 608 + memset(skb_tail_pointer(skb) - (RTA_ALIGN(attrlen) - attrlen), 0, \ 609 609 RTA_ALIGN(attrlen) - attrlen); }) 610 610 611 611 #define RTA_PUT_U8(skb, attrtype, value) \ ··· 637 637 RTA_PUT(skb, attrtype, 0, NULL); 638 638 639 639 #define RTA_NEST(skb, type) \ 640 - ({ struct rtattr *__start = (struct rtattr *) (skb)->tail; \ 640 + ({ struct rtattr *__start = (struct rtattr *)skb_tail_pointer(skb); \ 641 641 RTA_PUT(skb, type, 0, NULL); \ 642 642 __start; }) 643 643 644 644 #define RTA_NEST_END(skb, start) \ 645 - ({ (start)->rta_len = ((skb)->tail - (unsigned char *) (start)); \ 645 + ({ (start)->rta_len = skb_tail_pointer(skb) - (unsigned char *)(start); \ 646 646 (skb)->len; }) 647 647 648 648 #define RTA_NEST_CANCEL(skb, start) \
+45 -12
include/linux/skbuff.h
··· 246 246 int iif; 247 247 /* 4 byte hole on 64 bit*/ 248 248 249 - sk_buff_data_t transport_header; 250 - sk_buff_data_t network_header; 251 - sk_buff_data_t mac_header; 252 249 struct dst_entry *dst; 253 250 struct sec_path *sp; 254 251 ··· 300 303 301 304 __u32 mark; 302 305 306 + sk_buff_data_t transport_header; 307 + sk_buff_data_t network_header; 308 + sk_buff_data_t mac_header; 303 309 /* These elements must be at the end, see alloc_skb() for details. */ 304 - unsigned int truesize; 305 - atomic_t users; 310 + sk_buff_data_t tail; 306 311 unsigned char *head, 307 312 *data, 308 - *tail, 309 313 *end; 314 + unsigned int truesize; 315 + atomic_t users; 310 316 }; 311 317 312 318 #ifdef __KERNEL__ ··· 812 812 #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_shinfo(skb)->frag_list) 813 813 #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) 814 814 815 + #ifdef NET_SKBUFF_DATA_USES_OFFSET 816 + static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 817 + { 818 + return skb->head + skb->tail; 819 + } 820 + 821 + static inline void skb_reset_tail_pointer(struct sk_buff *skb) 822 + { 823 + skb->tail = skb->data - skb->head; 824 + } 825 + 826 + static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 827 + { 828 + skb_reset_tail_pointer(skb); 829 + skb->tail += offset; 830 + } 831 + #else /* NET_SKBUFF_DATA_USES_OFFSET */ 832 + static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb) 833 + { 834 + return skb->tail; 835 + } 836 + 837 + static inline void skb_reset_tail_pointer(struct sk_buff *skb) 838 + { 839 + skb->tail = skb->data; 840 + } 841 + 842 + static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset) 843 + { 844 + skb->tail = skb->data + offset; 845 + } 846 + #endif /* NET_SKBUFF_DATA_USES_OFFSET */ 847 + 815 848 /* 816 849 * Add data to an sk_buff 817 850 */ 818 851 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len) 819 852 { 820 - unsigned char *tmp = skb->tail; 853 + unsigned char *tmp = skb_tail_pointer(skb); 821 854 SKB_LINEAR_ASSERT(skb); 822 855 skb->tail += len; 823 856 skb->len += len; ··· 868 835 */ 869 836 static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len) 870 837 { 871 - unsigned char *tmp = skb->tail; 838 + unsigned char *tmp = skb_tail_pointer(skb); 872 839 SKB_LINEAR_ASSERT(skb); 873 840 skb->tail += len; 874 841 skb->len += len; 875 - if (unlikely(skb->tail>skb->end)) 842 + if (unlikely(skb_tail_pointer(skb) > skb->end)) 876 843 skb_over_panic(skb, len, current_text_addr()); 877 844 return tmp; 878 845 } ··· 968 935 */ 969 936 static inline int skb_tailroom(const struct sk_buff *skb) 970 937 { 971 - return skb_is_nonlinear(skb) ? 0 : skb->end - skb->tail; 938 + return skb_is_nonlinear(skb) ? 0 : skb->end - skb_tail_pointer(skb); 972 939 } 973 940 974 941 /** ··· 1160 1127 WARN_ON(1); 1161 1128 return; 1162 1129 } 1163 - skb->len = len; 1164 - skb->tail = skb->data + len; 1130 + skb->len = len; 1131 + skb_set_tail_pointer(skb, len); 1165 1132 } 1166 1133 1167 1134 /**
+2 -4
include/net/inet_ecn.h
··· 114 114 { 115 115 switch (skb->protocol) { 116 116 case __constant_htons(ETH_P_IP): 117 - if (skb_network_header(skb) + sizeof(struct iphdr) <= 118 - skb->tail) 117 + if (skb->network_header + sizeof(struct iphdr) <= skb->tail) 119 118 return IP_ECN_set_ce(ip_hdr(skb)); 120 119 break; 121 120 122 121 case __constant_htons(ETH_P_IPV6): 123 - if (skb_network_header(skb) + sizeof(struct ipv6hdr) <= 124 - skb->tail) 122 + if (skb->network_header + sizeof(struct ipv6hdr) <= skb->tail) 125 123 return IP6_ECN_set_ce(ipv6_hdr(skb)); 126 124 break; 127 125 }
+4 -4
include/net/netlink.h
··· 525 525 */ 526 526 static inline int nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh) 527 527 { 528 - nlh->nlmsg_len = skb->tail - (unsigned char *) nlh; 528 + nlh->nlmsg_len = skb_tail_pointer(skb) - (unsigned char *)nlh; 529 529 530 530 return skb->len; 531 531 } ··· 538 538 */ 539 539 static inline void *nlmsg_get_pos(struct sk_buff *skb) 540 540 { 541 - return skb->tail; 541 + return skb_tail_pointer(skb); 542 542 } 543 543 544 544 /** ··· 940 940 */ 941 941 static inline struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype) 942 942 { 943 - struct nlattr *start = (struct nlattr *) skb->tail; 943 + struct nlattr *start = (struct nlattr *)skb_tail_pointer(skb); 944 944 945 945 if (nla_put(skb, attrtype, 0, NULL) < 0) 946 946 return NULL; ··· 960 960 */ 961 961 static inline int nla_nest_end(struct sk_buff *skb, struct nlattr *start) 962 962 { 963 - start->nla_len = skb->tail - (unsigned char *) start; 963 + start->nla_len = skb_tail_pointer(skb) - (unsigned char *)start; 964 964 return skb->len; 965 965 } 966 966
+1 -1
include/net/pkt_cls.h
··· 337 337 static inline int tcf_valid_offset(const struct sk_buff *skb, 338 338 const unsigned char *ptr, const int len) 339 339 { 340 - return unlikely((ptr + len) < skb->tail && ptr > skb->head); 340 + return unlikely((ptr + len) < skb_tail_pointer(skb) && ptr > skb->head); 341 341 } 342 342 343 343 #ifdef CONFIG_NET_CLS_IND
+4 -4
kernel/audit.c
··· 1073 1073 goto out; 1074 1074 } 1075 1075 va_copy(args2, args); 1076 - len = vsnprintf(skb->tail, avail, fmt, args); 1076 + len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args); 1077 1077 if (len >= avail) { 1078 1078 /* The printk buffer is 1024 bytes long, so if we get 1079 1079 * here and AUDIT_BUFSIZ is at least 1024, then we can ··· 1082 1082 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail)); 1083 1083 if (!avail) 1084 1084 goto out; 1085 - len = vsnprintf(skb->tail, avail, fmt, args2); 1085 + len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2); 1086 1086 } 1087 1087 if (len > 0) 1088 1088 skb_put(skb, len); ··· 1143 1143 return; 1144 1144 } 1145 1145 1146 - ptr = skb->tail; 1146 + ptr = skb_tail_pointer(skb); 1147 1147 for (i=0; i<len; i++) { 1148 1148 *ptr++ = hex[(buf[i] & 0xF0)>>4]; /* Upper nibble */ 1149 1149 *ptr++ = hex[buf[i] & 0x0F]; /* Lower nibble */ ··· 1175 1175 if (!avail) 1176 1176 return; 1177 1177 } 1178 - ptr = skb->tail; 1178 + ptr = skb_tail_pointer(skb); 1179 1179 *ptr++ = '"'; 1180 1180 memcpy(ptr, string, slen); 1181 1181 ptr += slen;
+1 -1
net/atm/lec.c
··· 283 283 } 284 284 285 285 DPRINTK("skbuff head:%lx data:%lx tail:%lx end:%lx\n", 286 - (long)skb->head, (long)skb->data, (long)skb->tail, 286 + (long)skb->head, (long)skb->data, (long)skb_tail_pointer(skb), 287 287 (long)skb->end); 288 288 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE) 289 289 if (memcmp(skb->data, bridge_ula_lec, sizeof(bridge_ula_lec)) == 0)
+1 -1
net/bluetooth/rfcomm/core.c
··· 1567 1567 1568 1568 /* Trim FCS */ 1569 1569 skb->len--; skb->tail--; 1570 - fcs = *(u8 *) skb->tail; 1570 + fcs = *(u8 *)skb_tail_pointer(skb); 1571 1571 1572 1572 if (__check_fcs(skb->data, type, fcs)) { 1573 1573 BT_ERR("bad checksum in packet");
+2 -2
net/core/dev.c
··· 1069 1069 skb_reset_mac_header(skb2); 1070 1070 1071 1071 if (skb_network_header(skb2) < skb2->data || 1072 - skb_network_header(skb2) > skb2->tail) { 1072 + skb2->network_header > skb2->tail) { 1073 1073 if (net_ratelimit()) 1074 1074 printk(KERN_CRIT "protocol %04x is " 1075 1075 "buggy, dev %s\n", ··· 1175 1175 BUG_ON(offset > (int)skb->len); 1176 1176 csum = skb_checksum(skb, offset, skb->len-offset, 0); 1177 1177 1178 - offset = skb->tail - skb_transport_header(skb); 1178 + offset = skb->tail - skb->transport_header; 1179 1179 BUG_ON(offset <= 0); 1180 1180 BUG_ON(skb->csum_offset + 2 > offset); 1181 1181
+1 -1
net/core/filter.c
··· 46 46 else if (k >= SKF_LL_OFF) 47 47 ptr = skb_mac_header(skb) + k - SKF_LL_OFF; 48 48 49 - if (ptr >= skb->head && ptr < skb->tail) 49 + if (ptr >= skb->head && ptr < skb_tail_pointer(skb)) 50 50 return ptr; 51 51 return NULL; 52 52 }
+2 -2
net/core/gen_stats.c
··· 61 61 spin_lock_bh(lock); 62 62 d->lock = lock; 63 63 if (type) 64 - d->tail = (struct rtattr *) skb->tail; 64 + d->tail = (struct rtattr *)skb_tail_pointer(skb); 65 65 d->skb = skb; 66 66 d->compat_tc_stats = tc_stats_type; 67 67 d->compat_xstats = xstats_type; ··· 212 212 gnet_stats_finish_copy(struct gnet_dump *d) 213 213 { 214 214 if (d->tail) 215 - d->tail->rta_len = d->skb->tail - (u8 *) d->tail; 215 + d->tail->rta_len = skb_tail_pointer(d->skb) - (u8 *)d->tail; 216 216 217 217 if (d->compat_tc_stats) 218 218 if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats,
+2 -2
net/core/pktgen.c
··· 2357 2357 *vlan_encapsulated_proto = htons(ETH_P_IP); 2358 2358 } 2359 2359 2360 - skb_set_network_header(skb, skb->tail - skb->data); 2360 + skb->network_header = skb->tail; 2361 2361 skb->transport_header = skb->network_header + sizeof(struct iphdr); 2362 2362 skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr)); 2363 2363 ··· 2696 2696 *vlan_encapsulated_proto = htons(ETH_P_IPV6); 2697 2697 } 2698 2698 2699 - skb_set_network_header(skb, skb->tail - skb->data); 2699 + skb->network_header = skb->tail; 2700 2700 skb->transport_header = skb->network_header + sizeof(struct ipv6hdr); 2701 2701 skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr)); 2702 2702
+21 -14
net/core/skbuff.c
··· 87 87 void skb_over_panic(struct sk_buff *skb, int sz, void *here) 88 88 { 89 89 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " 90 - "data:%p tail:%p end:%p dev:%s\n", 91 - here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end, 90 + "data:%p tail:%#lx end:%p dev:%s\n", 91 + here, skb->len, sz, skb->head, skb->data, 92 + (unsigned long)skb->tail, skb->end, 92 93 skb->dev ? skb->dev->name : "<NULL>"); 93 94 BUG(); 94 95 } ··· 106 105 void skb_under_panic(struct sk_buff *skb, int sz, void *here) 107 106 { 108 107 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " 109 - "data:%p tail:%p end:%p dev:%s\n", 110 - here, skb->len, sz, skb->head, skb->data, skb->tail, skb->end, 108 + "data:%p tail:%#lx end:%p dev:%s\n", 109 + here, skb->len, sz, skb->head, skb->data, 110 + (unsigned long)skb->tail, skb->end, 111 111 skb->dev ? skb->dev->name : "<NULL>"); 112 112 BUG(); 113 113 } ··· 169 167 atomic_set(&skb->users, 1); 170 168 skb->head = data; 171 169 skb->data = data; 172 - skb->tail = data; 170 + skb_reset_tail_pointer(skb); 173 171 skb->end = data + size; 174 172 /* make sure we initialize shinfo sequentially */ 175 173 shinfo = skb_shinfo(skb); ··· 631 629 632 630 /* Copy only real data... and, alas, header. This should be 633 631 * optimized for the cases when header is void. */ 634 - memcpy(data + nhead, skb->head, skb->tail - skb->head); 632 + memcpy(data + nhead, skb->head, 633 + skb->tail 634 + #ifndef NET_SKBUFF_DATA_USES_OFFSET 635 + - skb->head 636 + #endif 637 + ); 635 638 memcpy(data + size, skb->end, sizeof(struct skb_shared_info)); 636 639 637 640 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) ··· 652 645 skb->head = data; 653 646 skb->end = data + size; 654 647 skb->data += off; 655 - skb->tail += off; 656 648 #ifndef NET_SKBUFF_DATA_USES_OFFSET 657 - /* {transport,network,mac}_header are relative to skb->head */ 649 + /* {transport,network,mac}_header and tail are relative to skb->head */ 650 + skb->tail += off; 658 651 skb->transport_header += off; 659 652 skb->network_header += off; 660 653 skb->mac_header += off; ··· 769 762 return 0; 770 763 } 771 764 772 - ntail = skb->data_len + pad - (skb->end - skb->tail); 765 + ntail = skb->data_len + pad - (skb->end - skb_tail_pointer(skb)); 773 766 if (likely(skb_cloned(skb) || ntail > 0)) { 774 767 err = pskb_expand_head(skb, 0, ntail, GFP_ATOMIC); 775 768 if (unlikely(err)) ··· 870 863 } else { 871 864 skb->len = len; 872 865 skb->data_len = 0; 873 - skb->tail = skb->data + len; 866 + skb_set_tail_pointer(skb, len); 874 867 } 875 868 876 869 return 0; ··· 907 900 * plus 128 bytes for future expansions. If we have enough 908 901 * room at tail, reallocate without expansion only if skb is cloned. 909 902 */ 910 - int i, k, eat = (skb->tail + delta) - skb->end; 903 + int i, k, eat = (skb_tail_pointer(skb) + delta) - skb->end; 911 904 912 905 if (eat > 0 || skb_cloned(skb)) { 913 906 if (pskb_expand_head(skb, 0, eat > 0 ? eat + 128 : 0, ··· 915 908 return NULL; 916 909 } 917 910 918 - if (skb_copy_bits(skb, skb_headlen(skb), skb->tail, delta)) 911 + if (skb_copy_bits(skb, skb_headlen(skb), skb_tail_pointer(skb), delta)) 919 912 BUG(); 920 913 921 914 /* Optimization: no fragments, no reasons to preestimate ··· 1011 1004 skb->tail += delta; 1012 1005 skb->data_len -= delta; 1013 1006 1014 - return skb->tail; 1007 + return skb_tail_pointer(skb); 1015 1008 } 1016 1009 1017 1010 /* Copy some data bits from skb to kernel buffer. */ ··· 1546 1539 skb1->len += skb1->data_len; 1547 1540 skb->data_len = 0; 1548 1541 skb->len = len; 1549 - skb->tail = skb->data + len; 1542 + skb_set_tail_pointer(skb, len); 1550 1543 } 1551 1544 1552 1545 static inline void skb_split_no_header(struct sk_buff *skb,
+2 -2
net/core/wireless.c
··· 1938 1938 { 1939 1939 struct ifinfomsg *r; 1940 1940 struct nlmsghdr *nlh; 1941 - unsigned char *b = skb->tail; 1941 + unsigned char *b = skb_tail_pointer(skb); 1942 1942 1943 1943 nlh = NLMSG_PUT(skb, 0, 0, type, sizeof(*r)); 1944 1944 r = NLMSG_DATA(nlh); ··· 1952 1952 /* Add the wireless events in the netlink packet */ 1953 1953 RTA_PUT(skb, IFLA_WIRELESS, event_len, event); 1954 1954 1955 - nlh->nlmsg_len = skb->tail - b; 1955 + nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1956 1956 return skb->len; 1957 1957 1958 1958 nlmsg_failure:
+4 -2
net/decnet/dn_nsp_out.c
··· 681 681 if (scp->peer.sdn_objnum) 682 682 type = 0; 683 683 684 - skb_put(skb, dn_sockaddr2username(&scp->peer, skb->tail, type)); 685 - skb_put(skb, dn_sockaddr2username(&scp->addr, skb->tail, 2)); 684 + skb_put(skb, dn_sockaddr2username(&scp->peer, 685 + skb_tail_pointer(skb), type)); 686 + skb_put(skb, dn_sockaddr2username(&scp->addr, 687 + skb_tail_pointer(skb), 2)); 686 688 687 689 menuver = DN_MENUVER_ACC | DN_MENUVER_USR; 688 690 if (scp->peer.sdn_flags & SDF_PROXY)
+2 -2
net/decnet/dn_route.c
··· 1468 1468 struct dn_route *rt = (struct dn_route *)skb->dst; 1469 1469 struct rtmsg *r; 1470 1470 struct nlmsghdr *nlh; 1471 - unsigned char *b = skb->tail; 1471 + unsigned char *b = skb_tail_pointer(skb); 1472 1472 long expires; 1473 1473 1474 1474 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags); ··· 1509 1509 if (rt->fl.iif) 1510 1510 RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif); 1511 1511 1512 - nlh->nlmsg_len = skb->tail - b; 1512 + nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1513 1513 return skb->len; 1514 1514 1515 1515 nlmsg_failure:
+4 -4
net/decnet/dn_table.c
··· 295 295 { 296 296 struct rtmsg *rtm; 297 297 struct nlmsghdr *nlh; 298 - unsigned char *b = skb->tail; 298 + unsigned char *b = skb_tail_pointer(skb); 299 299 300 300 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags); 301 301 rtm = NLMSG_DATA(nlh); ··· 337 337 nhp->rtnh_ifindex = nh->nh_oif; 338 338 if (nh->nh_gw) 339 339 RTA_PUT(skb, RTA_GATEWAY, 2, &nh->nh_gw); 340 - nhp->rtnh_len = skb->tail - (unsigned char *)nhp; 340 + nhp->rtnh_len = skb_tail_pointer(skb) - (unsigned char *)nhp; 341 341 } endfor_nexthops(fi); 342 342 mp_head->rta_type = RTA_MULTIPATH; 343 - mp_head->rta_len = skb->tail - (u8*)mp_head; 343 + mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head; 344 344 } 345 345 346 - nlh->nlmsg_len = skb->tail - b; 346 + nlh->nlmsg_len = skb_tail_pointer(skb) - b; 347 347 return skb->len; 348 348 349 349
+1 -1
net/decnet/netfilter/dn_rtmsg.c
··· 33 33 { 34 34 struct sk_buff *skb = NULL; 35 35 size_t size; 36 - unsigned char *old_tail; 36 + sk_buff_data_t old_tail; 37 37 struct nlmsghdr *nlh; 38 38 unsigned char *ptr; 39 39 struct nf_dn_rtmsg *rtm;
+1 -1
net/econet/af_econet.c
··· 366 366 fh->cb = cb; 367 367 fh->port = port; 368 368 if (sock->type != SOCK_DGRAM) { 369 - skb->tail = skb->data; 369 + skb_reset_tail_pointer(skb); 370 370 skb->len = 0; 371 371 } else if (res < 0) 372 372 goto out_free;
+1 -1
net/ieee80211/ieee80211_rx.c
··· 595 595 if (frag != 0) 596 596 flen -= hdrlen; 597 597 598 - if (frag_skb->tail + flen > frag_skb->end) { 598 + if (skb_tail_pointer(frag_skb) + flen > frag_skb->end) { 599 599 printk(KERN_WARNING "%s: host decrypted and " 600 600 "reassembled frame did not fit skb\n", 601 601 dev->name);
+5 -3
net/ipv4/esp4.c
··· 21 21 struct blkcipher_desc desc; 22 22 struct esp_data *esp; 23 23 struct sk_buff *trailer; 24 + u8 *tail; 24 25 int blksize; 25 26 int clen; 26 27 int alen; ··· 50 49 goto error; 51 50 52 51 /* Fill padding... */ 52 + tail = skb_tail_pointer(trailer); 53 53 do { 54 54 int i; 55 55 for (i=0; i<clen-skb->len - 2; i++) 56 - *(u8*)(trailer->tail + i) = i+1; 56 + tail[i] = i + 1; 57 57 } while (0); 58 - *(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2; 58 + tail[clen - skb->len - 2] = (clen - skb->len) - 2; 59 59 pskb_put(skb, trailer, clen - skb->len); 60 60 61 61 __skb_push(skb, skb->data - skb_network_header(skb)); ··· 64 62 esph = (struct ip_esp_hdr *)(skb_network_header(skb) + 65 63 top_iph->ihl * 4); 66 64 top_iph->tot_len = htons(skb->len + alen); 67 - *(u8*)(trailer->tail - 1) = top_iph->protocol; 65 + *(skb_tail_pointer(skb) - 1) = top_iph->protocol; 68 66 69 67 /* this is non-NULL only with UDP Encapsulation */ 70 68 if (x->encap) {
+2 -1
net/ipv4/icmp.c
··· 450 450 */ 451 451 iph = ip_hdr(skb_in); 452 452 453 - if ((u8 *)iph < skb_in->head || (u8 *)(iph + 1) > skb_in->tail) 453 + if ((u8 *)iph < skb_in->head || 454 + (skb_in->network_header + sizeof(*iph)) > skb_in->tail) 454 455 goto out; 455 456 456 457 /*
+2 -2
net/ipv4/igmp.c
··· 348 348 { 349 349 struct iphdr *pip = ip_hdr(skb); 350 350 struct igmphdr *pig = igmp_hdr(skb); 351 - const int iplen = skb->tail - skb_network_header(skb); 352 - const int igmplen = skb->tail - skb_transport_header(skb); 351 + const int iplen = skb->tail - skb->network_header; 352 + const int igmplen = skb->tail - skb->transport_header; 353 353 354 354 pip->tot_len = htons(iplen); 355 355 ip_send_check(pip);
+6 -6
net/ipv4/inet_diag.c
··· 60 60 struct nlmsghdr *nlh; 61 61 void *info = NULL; 62 62 struct inet_diag_meminfo *minfo = NULL; 63 - unsigned char *b = skb->tail; 63 + unsigned char *b = skb_tail_pointer(skb); 64 64 const struct inet_diag_handler *handler; 65 65 66 66 handler = inet_diag_table[unlh->nlmsg_type]; ··· 147 147 icsk->icsk_ca_ops && icsk->icsk_ca_ops->get_info) 148 148 icsk->icsk_ca_ops->get_info(sk, ext, skb); 149 149 150 - nlh->nlmsg_len = skb->tail - b; 150 + nlh->nlmsg_len = skb_tail_pointer(skb) - b; 151 151 return skb->len; 152 152 153 153 rtattr_failure: ··· 163 163 { 164 164 long tmo; 165 165 struct inet_diag_msg *r; 166 - const unsigned char *previous_tail = skb->tail; 166 + const unsigned char *previous_tail = skb_tail_pointer(skb); 167 167 struct nlmsghdr *nlh = NLMSG_PUT(skb, pid, seq, 168 168 unlh->nlmsg_type, sizeof(*r)); 169 169 ··· 205 205 &tw6->tw_v6_daddr); 206 206 } 207 207 #endif 208 - nlh->nlmsg_len = skb->tail - previous_tail; 208 + nlh->nlmsg_len = skb_tail_pointer(skb) - previous_tail; 209 209 return skb->len; 210 210 nlmsg_failure: 211 211 skb_trim(skb, previous_tail - skb->data); ··· 535 535 { 536 536 const struct inet_request_sock *ireq = inet_rsk(req); 537 537 struct inet_sock *inet = inet_sk(sk); 538 - unsigned char *b = skb->tail; 538 + unsigned char *b = skb_tail_pointer(skb); 539 539 struct inet_diag_msg *r; 540 540 struct nlmsghdr *nlh; 541 541 long tmo; ··· 574 574 &inet6_rsk(req)->rmt_addr); 575 575 } 576 576 #endif 577 - nlh->nlmsg_len = skb->tail - b; 577 + nlh->nlmsg_len = skb_tail_pointer(skb) - b; 578 578 579 579 return skb->len; 580 580
+1 -1
net/ipv4/ip_sockglue.c
··· 316 316 serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb); 317 317 serr->port = port; 318 318 319 - __skb_pull(skb, skb->tail - skb->data); 319 + __skb_pull(skb, skb_tail_pointer(skb) - skb->data); 320 320 skb_reset_transport_header(skb); 321 321 322 322 if (sock_queue_err_skb(sk, skb))
+5 -4
net/ipv4/ipmr.c
··· 513 513 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); 514 514 515 515 if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) { 516 - nlh->nlmsg_len = skb->tail - (u8*)nlh; 516 + nlh->nlmsg_len = (skb_tail_pointer(skb) - 517 + (u8 *)nlh); 517 518 } else { 518 519 nlh->nlmsg_type = NLMSG_ERROR; 519 520 nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr)); ··· 581 580 * Copy the IP header 582 581 */ 583 582 584 - skb_set_network_header(skb, skb->tail - skb->data); 583 + skb->network_header = skb->tail; 585 584 skb_put(skb, ihl); 586 585 memcpy(skb->data,pkt->data,ihl); 587 586 ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */ ··· 1545 1544 int ct; 1546 1545 struct rtnexthop *nhp; 1547 1546 struct net_device *dev = vif_table[c->mfc_parent].dev; 1548 - u8 *b = skb->tail; 1547 + u8 *b = skb_tail_pointer(skb); 1549 1548 struct rtattr *mp_head; 1550 1549 1551 1550 if (dev) ··· 1565 1564 } 1566 1565 } 1567 1566 mp_head->rta_type = RTA_MULTIPATH; 1568 - mp_head->rta_len = skb->tail - (u8*)mp_head; 1567 + mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head; 1569 1568 rtm->rtm_type = RTN_MULTICAST; 1570 1569 return 1; 1571 1570
+2 -2
net/ipv4/ipvs/ip_vs_ftp.c
··· 162 162 iph = ip_hdr(*pskb); 163 163 th = (struct tcphdr *)&(((char *)iph)[iph->ihl*4]); 164 164 data = (char *)th + (th->doff << 2); 165 - data_limit = (*pskb)->tail; 165 + data_limit = skb_tail_pointer(*pskb); 166 166 167 167 if (ip_vs_ftp_get_addrport(data, data_limit, 168 168 SERVER_STRING, ··· 269 269 the length of the header in 32-bit multiples, it is accurate 270 270 to calculate data address by th+HLEN*4 */ 271 271 data = data_start = (char *)th + (th->doff << 2); 272 - data_limit = (*pskb)->tail; 272 + data_limit = skb_tail_pointer(*pskb); 273 273 274 274 while (data <= data_limit - 6) { 275 275 if (strnicmp(data, "PASV\r\n", 6) == 0) {
+4 -4
net/ipv4/netfilter/arpt_mangle.c
··· 37 37 /* We assume that pln and hln were checked in the match */ 38 38 if (mangle->flags & ARPT_MANGLE_SDEV) { 39 39 if (ARPT_DEV_ADDR_LEN_MAX < hln || 40 - (arpptr + hln > (**pskb).tail)) 40 + (arpptr + hln > skb_tail_pointer(*pskb))) 41 41 return NF_DROP; 42 42 memcpy(arpptr, mangle->src_devaddr, hln); 43 43 } 44 44 arpptr += hln; 45 45 if (mangle->flags & ARPT_MANGLE_SIP) { 46 46 if (ARPT_MANGLE_ADDR_LEN_MAX < pln || 47 - (arpptr + pln > (**pskb).tail)) 47 + (arpptr + pln > skb_tail_pointer(*pskb))) 48 48 return NF_DROP; 49 49 memcpy(arpptr, &mangle->u_s.src_ip, pln); 50 50 } 51 51 arpptr += pln; 52 52 if (mangle->flags & ARPT_MANGLE_TDEV) { 53 53 if (ARPT_DEV_ADDR_LEN_MAX < hln || 54 - (arpptr + hln > (**pskb).tail)) 54 + (arpptr + hln > skb_tail_pointer(*pskb))) 55 55 return NF_DROP; 56 56 memcpy(arpptr, mangle->tgt_devaddr, hln); 57 57 } 58 58 arpptr += hln; 59 59 if (mangle->flags & ARPT_MANGLE_TIP) { 60 60 if (ARPT_MANGLE_ADDR_LEN_MAX < pln || 61 - (arpptr + pln > (**pskb).tail)) 61 + (arpptr + pln > skb_tail_pointer(*pskb))) 62 62 return NF_DROP; 63 63 memcpy(arpptr, &mangle->u_t.tgt_ip, pln); 64 64 }
+2 -2
net/ipv4/netfilter/ip_queue.c
··· 191 191 static struct sk_buff * 192 192 ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) 193 193 { 194 - unsigned char *old_tail; 194 + sk_buff_data_t old_tail; 195 195 size_t size = 0; 196 196 size_t data_len = 0; 197 197 struct sk_buff *skb; ··· 235 235 if (!skb) 236 236 goto nlmsg_failure; 237 237 238 - old_tail= skb->tail; 238 + old_tail = skb->tail; 239 239 nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh)); 240 240 pmsg = NLMSG_DATA(nlh); 241 241 memset(pmsg, 0, sizeof(*pmsg));
+2 -1
net/ipv4/netfilter/nf_nat_helper.c
··· 92 92 /* move post-replacement */ 93 93 memmove(data + match_offset + rep_len, 94 94 data + match_offset + match_len, 95 - skb->tail - (data + match_offset + match_len)); 95 + skb->tail - (skb->network_header + dataoff + 96 + match_offset + match_len)); 96 97 97 98 /* insert data from buffer */ 98 99 memcpy(data + match_offset, rep_buffer, rep_len);
+1 -1
net/ipv4/tcp.c
··· 2231 2231 th->cwr = 0; 2232 2232 } while (skb->next); 2233 2233 2234 - delta = htonl(oldlen + (skb->tail - skb_transport_header(skb)) + 2234 + delta = htonl(oldlen + (skb->tail - skb->transport_header) + 2235 2235 skb->data_len); 2236 2236 th->check = ~csum_fold((__force __wsum)((__force u32)th->check + 2237 2237 (__force u32)delta));
+1 -1
net/ipv4/tcp_output.c
··· 733 733 } 734 734 skb_shinfo(skb)->nr_frags = k; 735 735 736 - skb->tail = skb->data; 736 + skb_reset_tail_pointer(skb); 737 737 skb->data_len -= len; 738 738 skb->len = skb->data_len; 739 739 }
+1 -1
net/ipv6/datagram.c
··· 268 268 serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb); 269 269 serr->port = fl->fl_ip_dport; 270 270 271 - __skb_pull(skb, skb->tail - skb->data); 271 + __skb_pull(skb, skb_tail_pointer(skb) - skb->data); 272 272 skb_reset_transport_header(skb); 273 273 274 274 if (sock_queue_err_skb(sk, skb))
+5 -3
net/ipv6/esp6.c
··· 51 51 int clen; 52 52 int alen; 53 53 int nfrags; 54 + u8 *tail; 54 55 struct esp_data *esp = x->data; 55 56 int hdr_len = (skb_transport_offset(skb) + 56 57 sizeof(*esph) + esp->conf.ivlen); ··· 79 78 } 80 79 81 80 /* Fill padding... */ 81 + tail = skb_tail_pointer(trailer); 82 82 do { 83 83 int i; 84 84 for (i=0; i<clen-skb->len - 2; i++) 85 - *(u8*)(trailer->tail + i) = i+1; 85 + tail[i] = i + 1; 86 86 } while (0); 87 - *(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2; 87 + tail[clen-skb->len - 2] = (clen - skb->len) - 2; 88 88 pskb_put(skb, trailer, clen - skb->len); 89 89 90 90 top_iph = (struct ipv6hdr *)__skb_push(skb, hdr_len); 91 91 esph = (struct ipv6_esp_hdr *)skb_transport_header(skb); 92 92 top_iph->payload_len = htons(skb->len + alen - sizeof(*top_iph)); 93 - *(u8 *)(trailer->tail - 1) = *skb_network_header(skb); 93 + *(skb_tail_pointer(skb) - 1) = *skb_network_header(skb); 94 94 *skb_network_header(skb) = IPPROTO_ESP; 95 95 96 96 esph->spi = x->id.spi;
+1 -1
net/ipv6/exthdrs.c
··· 51 51 int ipv6_find_tlv(struct sk_buff *skb, int offset, int type) 52 52 { 53 53 const unsigned char *nh = skb_network_header(skb); 54 - int packet_len = skb->tail - nh; 54 + int packet_len = skb->tail - skb->network_header; 55 55 struct ipv6_opt_hdr *hdr; 56 56 int len; 57 57
+2 -1
net/ipv6/icmp.c
··· 317 317 int hlimit, tclass; 318 318 int err = 0; 319 319 320 - if ((u8*)hdr < skb->head || (u8*)(hdr+1) > skb->tail) 320 + if ((u8 *)hdr < skb->head || 321 + (skb->network_header + sizeof(*hdr)) > skb->tail) 321 322 return; 322 323 323 324 /*
+1 -1
net/ipv6/ip6_output.c
··· 514 514 u16 offset = sizeof(struct ipv6hdr); 515 515 struct ipv6_opt_hdr *exthdr = 516 516 (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1); 517 - unsigned int packet_len = skb->tail - skb_network_header(skb); 517 + unsigned int packet_len = skb->tail - skb->network_header; 518 518 int found_rhdr = 0; 519 519 *nexthdr = &ipv6_hdr(skb)->nexthdr; 520 520
+3 -3
net/ipv6/mcast.c
··· 1423 1423 1424 1424 memcpy(skb_put(skb, sizeof(ra)), ra, sizeof(ra)); 1425 1425 1426 - skb_set_transport_header(skb, skb->tail - skb->data); 1426 + skb_set_transport_header(skb, skb_tail_pointer(skb) - skb->data); 1427 1427 skb_put(skb, sizeof(*pmr)); 1428 1428 pmr = (struct mld2_report *)skb_transport_header(skb); 1429 1429 pmr->type = ICMPV6_MLD2_REPORT; ··· 1468 1468 int err; 1469 1469 1470 1470 IP6_INC_STATS(idev, IPSTATS_MIB_OUTREQUESTS); 1471 - payload_len = skb->tail - skb_network_header(skb) - sizeof(*pip6); 1472 - mldlen = skb->tail - skb_transport_header(skb); 1471 + payload_len = (skb->tail - skb->network_header) - sizeof(*pip6); 1472 + mldlen = skb->tail - skb->transport_header; 1473 1473 pip6->payload_len = htons(payload_len); 1474 1474 1475 1475 pmr->csum = csum_ipv6_magic(&pip6->saddr, &pip6->daddr, mldlen,
+2 -2
net/ipv6/mip6.c
··· 260 260 struct ipv6_opt_hdr *exthdr = 261 261 (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1); 262 262 const unsigned char *nh = skb_network_header(skb); 263 - unsigned int packet_len = skb->tail - nh; 263 + unsigned int packet_len = skb->tail - skb->network_header; 264 264 int found_rhdr = 0; 265 265 266 266 *nexthdr = &ipv6_hdr(skb)->nexthdr; ··· 392 392 struct ipv6_opt_hdr *exthdr = 393 393 (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1); 394 394 const unsigned char *nh = skb_network_header(skb); 395 - unsigned int packet_len = skb->tail - nh; 395 + unsigned int packet_len = skb->tail - skb->network_header; 396 396 int found_rhdr = 0; 397 397 398 398 *nexthdr = &ipv6_hdr(skb)->nexthdr;
+10 -9
net/ipv6/ndisc.c
··· 492 492 skb_reserve(skb, LL_RESERVED_SPACE(dev)); 493 493 ip6_nd_hdr(sk, skb, dev, src_addr, daddr, IPPROTO_ICMPV6, len); 494 494 495 - skb_set_transport_header(skb, skb->tail - skb->data); 495 + skb->transport_header = skb->tail; 496 496 skb_put(skb, len); 497 497 msg = (struct nd_msg *)skb_transport_header(skb); 498 498 ··· 584 584 skb_reserve(skb, LL_RESERVED_SPACE(dev)); 585 585 ip6_nd_hdr(sk, skb, dev, saddr, daddr, IPPROTO_ICMPV6, len); 586 586 587 - skb_set_transport_header(skb, skb->tail - skb->data); 587 + skb->transport_header = skb->tail; 588 588 skb_put(skb, len); 589 589 msg = (struct nd_msg *)skb_transport_header(skb); 590 590 msg->icmph.icmp6_type = NDISC_NEIGHBOUR_SOLICITATION; ··· 685 685 skb_reserve(skb, LL_RESERVED_SPACE(dev)); 686 686 ip6_nd_hdr(sk, skb, dev, saddr, daddr, IPPROTO_ICMPV6, len); 687 687 688 - skb_set_transport_header(skb, skb->tail - skb->data); 688 + skb->transport_header = skb->tail; 689 689 skb_put(skb, len); 690 690 hdr = icmp6_hdr(skb); 691 691 hdr->icmp6_type = NDISC_ROUTER_SOLICITATION; ··· 767 767 struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; 768 768 struct in6_addr *daddr = &ipv6_hdr(skb)->daddr; 769 769 u8 *lladdr = NULL; 770 - u32 ndoptlen = skb->tail - msg->opt; 770 + u32 ndoptlen = skb->tail - (skb->transport_header + 771 + offsetof(struct nd_msg, opt)); 771 772 struct ndisc_options ndopts; 772 773 struct net_device *dev = skb->dev; 773 774 struct inet6_ifaddr *ifp; ··· 946 945 struct in6_addr *saddr = &ipv6_hdr(skb)->saddr; 947 946 struct in6_addr *daddr = &ipv6_hdr(skb)->daddr; 948 947 u8 *lladdr = NULL; 949 - u32 ndoptlen = skb->tail - msg->opt; 948 + u32 ndoptlen = skb->tail - (skb->transport_header + 949 + offsetof(struct nd_msg, opt)); 950 950 struct ndisc_options ndopts; 951 951 struct net_device *dev = skb->dev; 952 952 struct inet6_ifaddr *ifp; ··· 1113 1111 1114 1112 __u8 * opt = (__u8 *)(ra_msg + 1); 1115 1113 1116 - optlen = (skb->tail - skb_transport_header(skb)) - 1117 - sizeof(struct ra_msg); 1114 + optlen = (skb->tail - skb->transport_header) - sizeof(struct ra_msg); 1118 1115 1119 1116 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) { 1120 1117 ND_PRINTK2(KERN_WARNING ··· 1362 1361 return; 1363 1362 } 1364 1363 1365 - optlen = skb->tail - skb_transport_header(skb); 1364 + optlen = skb->tail - skb->transport_header; 1366 1365 optlen -= sizeof(struct icmp6hdr) + 2 * sizeof(struct in6_addr); 1367 1366 1368 1367 if (optlen < 0) { ··· 1523 1522 ip6_nd_hdr(sk, buff, dev, &saddr_buf, &ipv6_hdr(skb)->saddr, 1524 1523 IPPROTO_ICMPV6, len); 1525 1524 1526 - skb_set_transport_header(buff, buff->tail - buff->data); 1525 + skb_set_transport_header(buff, skb_tail_pointer(buff) - buff->data); 1527 1526 skb_put(buff, len); 1528 1527 icmph = icmp6_hdr(buff); 1529 1528
+2 -2
net/ipv6/netfilter/ip6_queue.c
··· 189 189 static struct sk_buff * 190 190 ipq_build_packet_message(struct ipq_queue_entry *entry, int *errp) 191 191 { 192 - unsigned char *old_tail; 192 + sk_buff_data_t old_tail; 193 193 size_t size = 0; 194 194 size_t data_len = 0; 195 195 struct sk_buff *skb; ··· 233 233 if (!skb) 234 234 goto nlmsg_failure; 235 235 236 - old_tail= skb->tail; 236 + old_tail = skb->tail; 237 237 nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh)); 238 238 pmsg = NLMSG_DATA(nlh); 239 239 memset(pmsg, 0, sizeof(*pmsg));
+1 -1
net/ipv6/raw.c
··· 1077 1077 spin_lock_bh(&sk->sk_receive_queue.lock); 1078 1078 skb = skb_peek(&sk->sk_receive_queue); 1079 1079 if (skb != NULL) 1080 - amount = skb->tail - skb_transport_header(skb); 1080 + amount = skb->tail - skb->transport_header; 1081 1081 spin_unlock_bh(&sk->sk_receive_queue.lock); 1082 1082 return put_user(amount, (int __user *)arg); 1083 1083 }
+2 -2
net/irda/ircomm/ircomm_param.c
··· 133 133 * Inserting is a little bit tricky since we don't know how much 134 134 * room we will need. But this should hopefully work OK 135 135 */ 136 - count = irda_param_insert(self, pi, skb->tail, skb_tailroom(skb), 137 - &ircomm_param_info); 136 + count = irda_param_insert(self, pi, skb_tail_pointer(skb), 137 + skb_tailroom(skb), &ircomm_param_info); 138 138 if (count < 0) { 139 139 IRDA_WARNING("%s(), no room for parameter!\n", __FUNCTION__); 140 140 spin_unlock_irqrestore(&self->spinlock, flags);
+1 -1
net/irda/irlan/irlan_common.c
··· 1039 1039 } 1040 1040 1041 1041 /* Insert at end of sk-buffer */ 1042 - frame = skb->tail; 1042 + frame = skb_tail_pointer(skb); 1043 1043 1044 1044 /* Make space for data */ 1045 1045 if (skb_tailroom(skb) < (param_len+value_len+3)) {
+7 -7
net/irda/qos.c
··· 469 469 int ret; 470 470 471 471 /* Insert data rate */ 472 - ret = irda_param_insert(self, PI_BAUD_RATE, skb->tail, 472 + ret = irda_param_insert(self, PI_BAUD_RATE, skb_tail_pointer(skb), 473 473 skb_tailroom(skb), &irlap_param_info); 474 474 if (ret < 0) 475 475 return ret; 476 476 skb_put(skb, ret); 477 477 478 478 /* Insert max turnaround time */ 479 - ret = irda_param_insert(self, PI_MAX_TURN_TIME, skb->tail, 479 + ret = irda_param_insert(self, PI_MAX_TURN_TIME, skb_tail_pointer(skb), 480 480 skb_tailroom(skb), &irlap_param_info); 481 481 if (ret < 0) 482 482 return ret; 483 483 skb_put(skb, ret); 484 484 485 485 /* Insert data size */ 486 - ret = irda_param_insert(self, PI_DATA_SIZE, skb->tail, 486 + ret = irda_param_insert(self, PI_DATA_SIZE, skb_tail_pointer(skb), 487 487 skb_tailroom(skb), &irlap_param_info); 488 488 if (ret < 0) 489 489 return ret; 490 490 skb_put(skb, ret); 491 491 492 492 /* Insert window size */ 493 - ret = irda_param_insert(self, PI_WINDOW_SIZE, skb->tail, 493 + ret = irda_param_insert(self, PI_WINDOW_SIZE, skb_tail_pointer(skb), 494 494 skb_tailroom(skb), &irlap_param_info); 495 495 if (ret < 0) 496 496 return ret; 497 497 skb_put(skb, ret); 498 498 499 499 /* Insert additional BOFs */ 500 - ret = irda_param_insert(self, PI_ADD_BOFS, skb->tail, 500 + ret = irda_param_insert(self, PI_ADD_BOFS, skb_tail_pointer(skb), 501 501 skb_tailroom(skb), &irlap_param_info); 502 502 if (ret < 0) 503 503 return ret; 504 504 skb_put(skb, ret); 505 505 506 506 /* Insert minimum turnaround time */ 507 - ret = irda_param_insert(self, PI_MIN_TURN_TIME, skb->tail, 507 + ret = irda_param_insert(self, PI_MIN_TURN_TIME, skb_tail_pointer(skb), 508 508 skb_tailroom(skb), &irlap_param_info); 509 509 if (ret < 0) 510 510 return ret; 511 511 skb_put(skb, ret); 512 512 513 513 /* Insert link disconnect/threshold time */ 514 - ret = irda_param_insert(self, PI_LINK_DISC, skb->tail, 514 + ret = irda_param_insert(self, PI_LINK_DISC, skb_tail_pointer(skb), 515 515 skb_tailroom(skb), &irlap_param_info); 516 516 if (ret < 0) 517 517 return ret;
+6 -10
net/netfilter/nf_conntrack_netlink.c
··· 268 268 struct nlmsghdr *nlh; 269 269 struct nfgenmsg *nfmsg; 270 270 struct nfattr *nest_parms; 271 - unsigned char *b; 272 - 273 - b = skb->tail; 271 + unsigned char *b = skb_tail_pointer(skb); 274 272 275 273 event |= NFNL_SUBSYS_CTNETLINK << 8; 276 274 nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(struct nfgenmsg)); ··· 301 303 ctnetlink_dump_use(skb, ct) < 0) 302 304 goto nfattr_failure; 303 305 304 - nlh->nlmsg_len = skb->tail - b; 306 + nlh->nlmsg_len = skb_tail_pointer(skb) - b; 305 307 return skb->len; 306 308 307 309 nlmsg_failure: ··· 320 322 struct nf_conn *ct = (struct nf_conn *)ptr; 321 323 struct sk_buff *skb; 322 324 unsigned int type; 323 - unsigned char *b; 325 + sk_buff_data_t b; 324 326 unsigned int flags = 0, group; 325 327 326 328 /* ignore our fake conntrack entry */ ··· 1150 1152 { 1151 1153 struct nlmsghdr *nlh; 1152 1154 struct nfgenmsg *nfmsg; 1153 - unsigned char *b; 1154 - 1155 - b = skb->tail; 1155 + unsigned char *b = skb_tail_pointer(skb); 1156 1156 1157 1157 event |= NFNL_SUBSYS_CTNETLINK_EXP << 8; 1158 1158 nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(struct nfgenmsg)); ··· 1164 1168 if (ctnetlink_exp_dump_expect(skb, exp) < 0) 1165 1169 goto nfattr_failure; 1166 1170 1167 - nlh->nlmsg_len = skb->tail - b; 1171 + nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1168 1172 return skb->len; 1169 1173 1170 1174 nlmsg_failure: ··· 1182 1186 struct nf_conntrack_expect *exp = (struct nf_conntrack_expect *)ptr; 1183 1187 struct sk_buff *skb; 1184 1188 unsigned int type; 1185 - unsigned char *b; 1189 + sk_buff_data_t b; 1186 1190 int flags = 0; 1187 1191 1188 1192 if (events & IPEXP_NEW) {
+1 -1
net/netlink/af_netlink.c
··· 785 785 786 786 skb_orphan(skb); 787 787 788 - delta = skb->end - skb->tail; 788 + delta = skb->end - skb_tail_pointer(skb); 789 789 if (delta * 2 < skb->truesize) 790 790 return skb; 791 791
+1 -1
net/packet/af_packet.c
··· 775 775 err = -EINVAL; 776 776 res = dev->hard_header(skb, dev, ntohs(proto), addr, NULL, len); 777 777 if (sock->type != SOCK_DGRAM) { 778 - skb->tail = skb->data; 778 + skb_reset_tail_pointer(skb); 779 779 skb->len = 0; 780 780 } else if (res < 0) 781 781 goto out_free;
+26 -26
net/sched/act_api.c
··· 93 93 continue; 94 94 a->priv = p; 95 95 a->order = n_i; 96 - r = (struct rtattr*) skb->tail; 96 + r = (struct rtattr *)skb_tail_pointer(skb); 97 97 RTA_PUT(skb, a->order, 0, NULL); 98 98 err = tcf_action_dump_1(skb, a, 0, 0); 99 99 if (err < 0) { ··· 101 101 skb_trim(skb, (u8*)r - skb->data); 102 102 goto done; 103 103 } 104 - r->rta_len = skb->tail - (u8*)r; 104 + r->rta_len = skb_tail_pointer(skb) - (u8 *)r; 105 105 n_i++; 106 106 if (n_i >= TCA_ACT_MAX_PRIO) 107 107 goto done; ··· 125 125 struct rtattr *r ; 126 126 int i= 0, n_i = 0; 127 127 128 - r = (struct rtattr*) skb->tail; 128 + r = (struct rtattr *)skb_tail_pointer(skb); 129 129 RTA_PUT(skb, a->order, 0, NULL); 130 130 RTA_PUT(skb, TCA_KIND, IFNAMSIZ, a->ops->kind); 131 131 for (i = 0; i < (hinfo->hmask + 1); i++) { ··· 140 140 } 141 141 } 142 142 RTA_PUT(skb, TCA_FCNT, 4, &n_i); 143 - r->rta_len = skb->tail - (u8*)r; 143 + r->rta_len = skb_tail_pointer(skb) - (u8 *)r; 144 144 145 145 return n_i; 146 146 rtattr_failure: ··· 423 423 tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 424 424 { 425 425 int err = -EINVAL; 426 - unsigned char *b = skb->tail; 426 + unsigned char *b = skb_tail_pointer(skb); 427 427 struct rtattr *r; 428 428 429 429 if (a->ops == NULL || a->ops->dump == NULL) ··· 432 432 RTA_PUT(skb, TCA_KIND, IFNAMSIZ, a->ops->kind); 433 433 if (tcf_action_copy_stats(skb, a, 0)) 434 434 goto rtattr_failure; 435 - r = (struct rtattr*) skb->tail; 435 + r = (struct rtattr *)skb_tail_pointer(skb); 436 436 RTA_PUT(skb, TCA_OPTIONS, 0, NULL); 437 437 if ((err = tcf_action_dump_old(skb, a, bind, ref)) > 0) { 438 - r->rta_len = skb->tail - (u8*)r; 438 + r->rta_len = skb_tail_pointer(skb) - (u8 *)r; 439 439 return err; 440 440 } 441 441 ··· 449 449 { 450 450 struct tc_action *a; 451 451 int err = -EINVAL; 452 - unsigned char *b = skb->tail; 452 + unsigned char *b = skb_tail_pointer(skb); 453 453 struct rtattr *r ; 454 454 455 455 while ((a = act) != NULL) { 456 - r = (struct rtattr*) skb->tail; 456 + r = (struct rtattr *)skb_tail_pointer(skb); 457 457 act = a->next; 458 458 RTA_PUT(skb, a->order, 0, NULL); 459 459 err = tcf_action_dump_1(skb, a, bind, ref); 460 460 if (err < 0) 461 461 goto errout; 462 - r->rta_len = skb->tail - (u8*)r; 462 + r->rta_len = skb_tail_pointer(skb) - (u8 *)r; 463 463 } 464 464 465 465 return 0; ··· 635 635 { 636 636 struct tcamsg *t; 637 637 struct nlmsghdr *nlh; 638 - unsigned char *b = skb->tail; 638 + unsigned char *b = skb_tail_pointer(skb); 639 639 struct rtattr *x; 640 640 641 641 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags); ··· 645 645 t->tca__pad1 = 0; 646 646 t->tca__pad2 = 0; 647 647 648 - x = (struct rtattr*) skb->tail; 648 + x = (struct rtattr *)skb_tail_pointer(skb); 649 649 RTA_PUT(skb, TCA_ACT_TAB, 0, NULL); 650 650 651 651 if (tcf_action_dump(skb, a, bind, ref) < 0) 652 652 goto rtattr_failure; 653 653 654 - x->rta_len = skb->tail - (u8*)x; 654 + x->rta_len = skb_tail_pointer(skb) - (u8 *)x; 655 655 656 - nlh->nlmsg_len = skb->tail - b; 656 + nlh->nlmsg_len = skb_tail_pointer(skb) - b; 657 657 return skb->len; 658 658 659 659 rtattr_failure: ··· 767 767 return -ENOBUFS; 768 768 } 769 769 770 - b = (unsigned char *)skb->tail; 770 + b = skb_tail_pointer(skb); 771 771 772 772 if (rtattr_parse_nested(tb, TCA_ACT_MAX, rta) < 0) 773 773 goto err_out; ··· 783 783 t->tca__pad1 = 0; 784 784 t->tca__pad2 = 0; 785 785 786 - x = (struct rtattr *) skb->tail; 786 + x = (struct rtattr *)skb_tail_pointer(skb); 787 787 RTA_PUT(skb, TCA_ACT_TAB, 0, NULL); 788 788 789 789 err = a->ops->walk(skb, &dcb, RTM_DELACTION, a); 790 790 if (err < 0) 791 791 goto rtattr_failure; 792 792 793 - x->rta_len = skb->tail - (u8 *) x; 793 + x->rta_len = skb_tail_pointer(skb) - (u8 *)x; 794 794 795 - nlh->nlmsg_len = skb->tail - b; 795 + nlh->nlmsg_len = skb_tail_pointer(skb) - b; 796 796 nlh->nlmsg_flags |= NLM_F_ROOT; 797 797 module_put(a->ops->owner); 798 798 kfree(a); ··· 884 884 if (!skb) 885 885 return -ENOBUFS; 886 886 887 - b = (unsigned char *)skb->tail; 887 + b = skb_tail_pointer(skb); 888 888 889 889 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*t), flags); 890 890 t = NLMSG_DATA(nlh); ··· 892 892 t->tca__pad1 = 0; 893 893 t->tca__pad2 = 0; 894 894 895 - x = (struct rtattr*) skb->tail; 895 + x = (struct rtattr *)skb_tail_pointer(skb); 896 896 RTA_PUT(skb, TCA_ACT_TAB, 0, NULL); 897 897 898 898 if (tcf_action_dump(skb, a, 0, 0) < 0) 899 899 goto rtattr_failure; 900 900 901 - x->rta_len = skb->tail - (u8*)x; 901 + x->rta_len = skb_tail_pointer(skb) - (u8 *)x; 902 902 903 - nlh->nlmsg_len = skb->tail - b; 903 + nlh->nlmsg_len = skb_tail_pointer(skb) - b; 904 904 NETLINK_CB(skb).dst_group = RTNLGRP_TC; 905 905 906 906 err = rtnetlink_send(skb, pid, RTNLGRP_TC, flags&NLM_F_ECHO); ··· 1015 1015 tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) 1016 1016 { 1017 1017 struct nlmsghdr *nlh; 1018 - unsigned char *b = skb->tail; 1018 + unsigned char *b = skb_tail_pointer(skb); 1019 1019 struct rtattr *x; 1020 1020 struct tc_action_ops *a_o; 1021 1021 struct tc_action a; ··· 1048 1048 t->tca__pad1 = 0; 1049 1049 t->tca__pad2 = 0; 1050 1050 1051 - x = (struct rtattr *) skb->tail; 1051 + x = (struct rtattr *)skb_tail_pointer(skb); 1052 1052 RTA_PUT(skb, TCA_ACT_TAB, 0, NULL); 1053 1053 1054 1054 ret = a_o->walk(skb, cb, RTM_GETACTION, &a); ··· 1056 1056 goto rtattr_failure; 1057 1057 1058 1058 if (ret > 0) { 1059 - x->rta_len = skb->tail - (u8 *) x; 1059 + x->rta_len = skb_tail_pointer(skb) - (u8 *)x; 1060 1060 ret = skb->len; 1061 1061 } else 1062 1062 skb_trim(skb, (u8*)x - skb->data); 1063 1063 1064 - nlh->nlmsg_len = skb->tail - b; 1064 + nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1065 1065 if (NETLINK_CB(cb->skb).pid && ret) 1066 1066 nlh->nlmsg_flags |= NLM_F_MULTI; 1067 1067 module_put(a_o->owner);
+1 -1
net/sched/act_gact.c
··· 155 155 156 156 static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 157 157 { 158 - unsigned char *b = skb->tail; 158 + unsigned char *b = skb_tail_pointer(skb); 159 159 struct tc_gact opt; 160 160 struct tcf_gact *gact = a->priv; 161 161 struct tcf_t t;
+1 -1
net/sched/act_ipt.c
··· 245 245 246 246 static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 247 247 { 248 - unsigned char *b = skb->tail; 248 + unsigned char *b = skb_tail_pointer(skb); 249 249 struct tcf_ipt *ipt = a->priv; 250 250 struct ipt_entry_target *t; 251 251 struct tcf_t tm;
+1 -1
net/sched/act_mirred.c
··· 206 206 207 207 static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 208 208 { 209 - unsigned char *b = skb->tail; 209 + unsigned char *b = skb_tail_pointer(skb); 210 210 struct tcf_mirred *m = a->priv; 211 211 struct tc_mirred opt; 212 212 struct tcf_t t;
+1 -1
net/sched/act_pedit.c
··· 195 195 static int tcf_pedit_dump(struct sk_buff *skb, struct tc_action *a, 196 196 int bind, int ref) 197 197 { 198 - unsigned char *b = skb->tail; 198 + unsigned char *b = skb_tail_pointer(skb); 199 199 struct tcf_pedit *p = a->priv; 200 200 struct tc_pedit *opt; 201 201 struct tcf_t t;
+4 -4
net/sched/act_police.c
··· 80 80 continue; 81 81 a->priv = p; 82 82 a->order = index; 83 - r = (struct rtattr*) skb->tail; 83 + r = (struct rtattr *)skb_tail_pointer(skb); 84 84 RTA_PUT(skb, a->order, 0, NULL); 85 85 if (type == RTM_DELACTION) 86 86 err = tcf_action_dump_1(skb, a, 0, 1); ··· 91 91 skb_trim(skb, (u8*)r - skb->data); 92 92 goto done; 93 93 } 94 - r->rta_len = skb->tail - (u8*)r; 94 + r->rta_len = skb_tail_pointer(skb) - (u8 *)r; 95 95 n_i++; 96 96 } 97 97 } ··· 326 326 static int 327 327 tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 328 328 { 329 - unsigned char *b = skb->tail; 329 + unsigned char *b = skb_tail_pointer(skb); 330 330 struct tcf_police *police = a->priv; 331 331 struct tc_police opt; 332 332 ··· 572 572 573 573 int tcf_police_dump(struct sk_buff *skb, struct tcf_police *police) 574 574 { 575 - unsigned char *b = skb->tail; 575 + unsigned char *b = skb_tail_pointer(skb); 576 576 struct tc_police opt; 577 577 578 578 opt.index = police->tcf_index;
+1 -1
net/sched/act_simple.c
··· 155 155 static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a, 156 156 int bind, int ref) 157 157 { 158 - unsigned char *b = skb->tail; 158 + unsigned char *b = skb_tail_pointer(skb); 159 159 struct tcf_defact *d = a->priv; 160 160 struct tc_defact opt; 161 161 struct tcf_t t;
+7 -7
net/sched/cls_api.c
··· 323 323 { 324 324 struct tcmsg *tcm; 325 325 struct nlmsghdr *nlh; 326 - unsigned char *b = skb->tail; 326 + unsigned char *b = skb_tail_pointer(skb); 327 327 328 328 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); 329 329 tcm = NLMSG_DATA(nlh); ··· 340 340 if (tp->ops->dump && tp->ops->dump(tp, fh, skb, tcm) < 0) 341 341 goto rtattr_failure; 342 342 } 343 - nlh->nlmsg_len = skb->tail - b; 343 + nlh->nlmsg_len = skb_tail_pointer(skb) - b; 344 344 return skb->len; 345 345 346 346 nlmsg_failure: ··· 563 563 * to work with both old and new modes of entering 564 564 * tc data even if iproute2 was newer - jhs 565 565 */ 566 - struct rtattr * p_rta = (struct rtattr*) skb->tail; 566 + struct rtattr *p_rta = (struct rtattr *)skb_tail_pointer(skb); 567 567 568 568 if (exts->action->type != TCA_OLD_COMPAT) { 569 569 RTA_PUT(skb, map->action, 0, NULL); 570 570 if (tcf_action_dump(skb, exts->action, 0, 0) < 0) 571 571 goto rtattr_failure; 572 - p_rta->rta_len = skb->tail - (u8*)p_rta; 572 + p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta; 573 573 } else if (map->police) { 574 574 RTA_PUT(skb, map->police, 0, NULL); 575 575 if (tcf_action_dump_old(skb, exts->action, 0, 0) < 0) 576 576 goto rtattr_failure; 577 - p_rta->rta_len = skb->tail - (u8*)p_rta; 577 + p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta; 578 578 } 579 579 } 580 580 #elif defined CONFIG_NET_CLS_POLICE 581 581 if (map->police && exts->police) { 582 - struct rtattr * p_rta = (struct rtattr*) skb->tail; 582 + struct rtattr *p_rta = (struct rtattr *)skb_tail_pointer(skb); 583 583 584 584 RTA_PUT(skb, map->police, 0, NULL); 585 585 586 586 if (tcf_police_dump(skb, exts->police) < 0) 587 587 goto rtattr_failure; 588 588 589 - p_rta->rta_len = skb->tail - (u8*)p_rta; 589 + p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta; 590 590 } 591 591 #endif 592 592 return 0;
+2 -2
net/sched/cls_basic.c
··· 245 245 struct sk_buff *skb, struct tcmsg *t) 246 246 { 247 247 struct basic_filter *f = (struct basic_filter *) fh; 248 - unsigned char *b = skb->tail; 248 + unsigned char *b = skb_tail_pointer(skb); 249 249 struct rtattr *rta; 250 250 251 251 if (f == NULL) ··· 263 263 tcf_em_tree_dump(skb, &f->ematches, TCA_BASIC_EMATCHES) < 0) 264 264 goto rtattr_failure; 265 265 266 - rta->rta_len = (skb->tail - b); 266 + rta->rta_len = skb_tail_pointer(skb) - b; 267 267 return skb->len; 268 268 269 269 rtattr_failure:
+2 -2
net/sched/cls_fw.c
··· 348 348 { 349 349 struct fw_head *head = (struct fw_head *)tp->root; 350 350 struct fw_filter *f = (struct fw_filter*)fh; 351 - unsigned char *b = skb->tail; 351 + unsigned char *b = skb_tail_pointer(skb); 352 352 struct rtattr *rta; 353 353 354 354 if (f == NULL) ··· 374 374 if (tcf_exts_dump(skb, &f->exts, &fw_ext_map) < 0) 375 375 goto rtattr_failure; 376 376 377 - rta->rta_len = skb->tail - b; 377 + rta->rta_len = skb_tail_pointer(skb) - b; 378 378 379 379 if (tcf_exts_dump_stats(skb, &f->exts, &fw_ext_map) < 0) 380 380 goto rtattr_failure;
+2 -2
net/sched/cls_route.c
··· 562 562 struct sk_buff *skb, struct tcmsg *t) 563 563 { 564 564 struct route4_filter *f = (struct route4_filter*)fh; 565 - unsigned char *b = skb->tail; 565 + unsigned char *b = skb_tail_pointer(skb); 566 566 struct rtattr *rta; 567 567 u32 id; 568 568 ··· 591 591 if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0) 592 592 goto rtattr_failure; 593 593 594 - rta->rta_len = skb->tail - b; 594 + rta->rta_len = skb_tail_pointer(skb) - b; 595 595 596 596 if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0) 597 597 goto rtattr_failure;
+2 -2
net/sched/cls_rsvp.h
··· 593 593 { 594 594 struct rsvp_filter *f = (struct rsvp_filter*)fh; 595 595 struct rsvp_session *s; 596 - unsigned char *b = skb->tail; 596 + unsigned char *b = skb_tail_pointer(skb); 597 597 struct rtattr *rta; 598 598 struct tc_rsvp_pinfo pinfo; 599 599 ··· 623 623 if (tcf_exts_dump(skb, &f->exts, &rsvp_ext_map) < 0) 624 624 goto rtattr_failure; 625 625 626 - rta->rta_len = skb->tail - b; 626 + rta->rta_len = skb_tail_pointer(skb) - b; 627 627 628 628 if (tcf_exts_dump_stats(skb, &f->exts, &rsvp_ext_map) < 0) 629 629 goto rtattr_failure;
+3 -3
net/sched/cls_tcindex.c
··· 448 448 { 449 449 struct tcindex_data *p = PRIV(tp); 450 450 struct tcindex_filter_result *r = (struct tcindex_filter_result *) fh; 451 - unsigned char *b = skb->tail; 451 + unsigned char *b = skb_tail_pointer(skb); 452 452 struct rtattr *rta; 453 453 454 454 DPRINTK("tcindex_dump(tp %p,fh 0x%lx,skb %p,t %p),p %p,r %p,b %p\n", ··· 463 463 RTA_PUT(skb,TCA_TCINDEX_SHIFT,sizeof(p->shift),&p->shift); 464 464 RTA_PUT(skb,TCA_TCINDEX_FALL_THROUGH,sizeof(p->fall_through), 465 465 &p->fall_through); 466 - rta->rta_len = skb->tail-b; 466 + rta->rta_len = skb_tail_pointer(skb) - b; 467 467 } else { 468 468 if (p->perfect) { 469 469 t->tcm_handle = r-p->perfect; ··· 486 486 487 487 if (tcf_exts_dump(skb, &r->exts, &tcindex_ext_map) < 0) 488 488 goto rtattr_failure; 489 - rta->rta_len = skb->tail-b; 489 + rta->rta_len = skb_tail_pointer(skb) - b; 490 490 491 491 if (tcf_exts_dump_stats(skb, &r->exts, &tcindex_ext_map) < 0) 492 492 goto rtattr_failure;
+3 -3
net/sched/cls_u32.c
··· 213 213 off2 = 0; 214 214 } 215 215 216 - if (ptr < skb->tail) 216 + if (ptr < skb_tail_pointer(skb)) 217 217 goto next_ht; 218 218 } 219 219 ··· 718 718 struct sk_buff *skb, struct tcmsg *t) 719 719 { 720 720 struct tc_u_knode *n = (struct tc_u_knode*)fh; 721 - unsigned char *b = skb->tail; 721 + unsigned char *b = skb_tail_pointer(skb); 722 722 struct rtattr *rta; 723 723 724 724 if (n == NULL) ··· 765 765 #endif 766 766 } 767 767 768 - rta->rta_len = skb->tail - b; 768 + rta->rta_len = skb_tail_pointer(skb) - b; 769 769 if (TC_U32_KEY(n->handle)) 770 770 if (tcf_exts_dump_stats(skb, &n->exts, &u32_ext_map) < 0) 771 771 goto rtattr_failure;
+10 -7
net/sched/ematch.c
··· 418 418 int tcf_em_tree_dump(struct sk_buff *skb, struct tcf_ematch_tree *tree, int tlv) 419 419 { 420 420 int i; 421 - struct rtattr * top_start = (struct rtattr*) skb->tail; 422 - struct rtattr * list_start; 421 + u8 *tail; 422 + struct rtattr *top_start = (struct rtattr *)skb_tail_pointer(skb); 423 + struct rtattr *list_start; 423 424 424 425 RTA_PUT(skb, tlv, 0, NULL); 425 426 RTA_PUT(skb, TCA_EMATCH_TREE_HDR, sizeof(tree->hdr), &tree->hdr); 426 427 427 - list_start = (struct rtattr *) skb->tail; 428 + list_start = (struct rtattr *)skb_tail_pointer(skb); 428 429 RTA_PUT(skb, TCA_EMATCH_TREE_LIST, 0, NULL); 429 430 431 + tail = skb_tail_pointer(skb); 430 432 for (i = 0; i < tree->hdr.nmatches; i++) { 431 - struct rtattr *match_start = (struct rtattr*) skb->tail; 433 + struct rtattr *match_start = (struct rtattr *)tail; 432 434 struct tcf_ematch *em = tcf_em_get_match(tree, i); 433 435 struct tcf_ematch_hdr em_hdr = { 434 436 .kind = em->ops ? em->ops->kind : TCF_EM_CONTAINER, ··· 449 447 } else if (em->datalen > 0) 450 448 RTA_PUT_NOHDR(skb, em->datalen, (void *) em->data); 451 449 452 - match_start->rta_len = skb->tail - (u8*) match_start; 450 + tail = skb_tail_pointer(skb); 451 + match_start->rta_len = tail - (u8 *)match_start; 453 452 } 454 453 455 - list_start->rta_len = skb->tail - (u8 *) list_start; 456 - top_start->rta_len = skb->tail - (u8 *) top_start; 454 + list_start->rta_len = tail - (u8 *)list_start; 455 + top_start->rta_len = tail - (u8 *)top_start; 457 456 458 457 return 0; 459 458
+4 -4
net/sched/sch_api.c
··· 813 813 { 814 814 struct tcmsg *tcm; 815 815 struct nlmsghdr *nlh; 816 - unsigned char *b = skb->tail; 816 + unsigned char *b = skb_tail_pointer(skb); 817 817 struct gnet_dump d; 818 818 819 819 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*tcm), flags); ··· 847 847 if (gnet_stats_finish_copy(&d) < 0) 848 848 goto rtattr_failure; 849 849 850 - nlh->nlmsg_len = skb->tail - b; 850 + nlh->nlmsg_len = skb_tail_pointer(skb) - b; 851 851 return skb->len; 852 852 853 853 nlmsg_failure: ··· 1051 1051 { 1052 1052 struct tcmsg *tcm; 1053 1053 struct nlmsghdr *nlh; 1054 - unsigned char *b = skb->tail; 1054 + unsigned char *b = skb_tail_pointer(skb); 1055 1055 struct gnet_dump d; 1056 1056 struct Qdisc_class_ops *cl_ops = q->ops->cl_ops; 1057 1057 ··· 1076 1076 if (gnet_stats_finish_copy(&d) < 0) 1077 1077 goto rtattr_failure; 1078 1078 1079 - nlh->nlmsg_len = skb->tail - b; 1079 + nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1080 1080 return skb->len; 1081 1081 1082 1082 nlmsg_failure:
+2 -2
net/sched/sch_atm.c
··· 631 631 { 632 632 struct atm_qdisc_data *p = PRIV(sch); 633 633 struct atm_flow_data *flow = (struct atm_flow_data *) cl; 634 - unsigned char *b = skb->tail; 634 + unsigned char *b = skb_tail_pointer(skb); 635 635 struct rtattr *rta; 636 636 637 637 DPRINTK("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n", ··· 661 661 662 662 RTA_PUT(skb,TCA_ATM_EXCESS,sizeof(zero),&zero); 663 663 } 664 - rta->rta_len = skb->tail-b; 664 + rta->rta_len = skb_tail_pointer(skb) - b; 665 665 return skb->len; 666 666 667 667 rtattr_failure:
+10 -10
net/sched/sch_cbq.c
··· 1465 1465 1466 1466 static __inline__ int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl) 1467 1467 { 1468 - unsigned char *b = skb->tail; 1468 + unsigned char *b = skb_tail_pointer(skb); 1469 1469 1470 1470 RTA_PUT(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate); 1471 1471 return skb->len; ··· 1477 1477 1478 1478 static __inline__ int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl) 1479 1479 { 1480 - unsigned char *b = skb->tail; 1480 + unsigned char *b = skb_tail_pointer(skb); 1481 1481 struct tc_cbq_lssopt opt; 1482 1482 1483 1483 opt.flags = 0; ··· 1502 1502 1503 1503 static __inline__ int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) 1504 1504 { 1505 - unsigned char *b = skb->tail; 1505 + unsigned char *b = skb_tail_pointer(skb); 1506 1506 struct tc_cbq_wrropt opt; 1507 1507 1508 1508 opt.flags = 0; ··· 1520 1520 1521 1521 static __inline__ int cbq_dump_ovl(struct sk_buff *skb, struct cbq_class *cl) 1522 1522 { 1523 - unsigned char *b = skb->tail; 1523 + unsigned char *b = skb_tail_pointer(skb); 1524 1524 struct tc_cbq_ovl opt; 1525 1525 1526 1526 opt.strategy = cl->ovl_strategy; ··· 1537 1537 1538 1538 static __inline__ int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl) 1539 1539 { 1540 - unsigned char *b = skb->tail; 1540 + unsigned char *b = skb_tail_pointer(skb); 1541 1541 struct tc_cbq_fopt opt; 1542 1542 1543 1543 if (cl->split || cl->defmap) { ··· 1556 1556 #ifdef CONFIG_NET_CLS_POLICE 1557 1557 static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl) 1558 1558 { 1559 - unsigned char *b = skb->tail; 1559 + unsigned char *b = skb_tail_pointer(skb); 1560 1560 struct tc_cbq_police opt; 1561 1561 1562 1562 if (cl->police) { ··· 1590 1590 static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb) 1591 1591 { 1592 1592 struct cbq_sched_data *q = qdisc_priv(sch); 1593 - unsigned char *b = skb->tail; 1593 + unsigned char *b = skb_tail_pointer(skb); 1594 1594 struct rtattr *rta; 1595 1595 1596 1596 rta = (struct rtattr*)b; 1597 1597 RTA_PUT(skb, TCA_OPTIONS, 0, NULL); 1598 1598 if (cbq_dump_attr(skb, &q->link) < 0) 1599 1599 goto rtattr_failure; 1600 - rta->rta_len = skb->tail - b; 1600 + rta->rta_len = skb_tail_pointer(skb) - b; 1601 1601 return skb->len; 1602 1602 1603 1603 rtattr_failure: ··· 1619 1619 struct sk_buff *skb, struct tcmsg *tcm) 1620 1620 { 1621 1621 struct cbq_class *cl = (struct cbq_class*)arg; 1622 - unsigned char *b = skb->tail; 1622 + unsigned char *b = skb_tail_pointer(skb); 1623 1623 struct rtattr *rta; 1624 1624 1625 1625 if (cl->tparent) ··· 1633 1633 RTA_PUT(skb, TCA_OPTIONS, 0, NULL); 1634 1634 if (cbq_dump_attr(skb, cl) < 0) 1635 1635 goto rtattr_failure; 1636 - rta->rta_len = skb->tail - b; 1636 + rta->rta_len = skb_tail_pointer(skb) - b; 1637 1637 return skb->len; 1638 1638 1639 1639 rtattr_failure:
+3 -3
net/sched/sch_hfsc.c
··· 1363 1363 struct tcmsg *tcm) 1364 1364 { 1365 1365 struct hfsc_class *cl = (struct hfsc_class *)arg; 1366 - unsigned char *b = skb->tail; 1366 + unsigned char *b = skb_tail_pointer(skb); 1367 1367 struct rtattr *rta = (struct rtattr *)b; 1368 1368 1369 1369 tcm->tcm_parent = cl->cl_parent ? cl->cl_parent->classid : TC_H_ROOT; ··· 1374 1374 RTA_PUT(skb, TCA_OPTIONS, 0, NULL); 1375 1375 if (hfsc_dump_curves(skb, cl) < 0) 1376 1376 goto rtattr_failure; 1377 - rta->rta_len = skb->tail - b; 1377 + rta->rta_len = skb_tail_pointer(skb) - b; 1378 1378 return skb->len; 1379 1379 1380 1380 rtattr_failure: ··· 1576 1576 hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb) 1577 1577 { 1578 1578 struct hfsc_sched *q = qdisc_priv(sch); 1579 - unsigned char *b = skb->tail; 1579 + unsigned char *b = skb_tail_pointer(skb); 1580 1580 struct tc_hfsc_qopt qopt; 1581 1581 1582 1582 qopt.defcls = q->defcls;
+5 -5
net/sched/sch_htb.c
··· 1110 1110 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb) 1111 1111 { 1112 1112 struct htb_sched *q = qdisc_priv(sch); 1113 - unsigned char *b = skb->tail; 1113 + unsigned char *b = skb_tail_pointer(skb); 1114 1114 struct rtattr *rta; 1115 1115 struct tc_htb_glob gopt; 1116 1116 spin_lock_bh(&sch->dev->queue_lock); ··· 1123 1123 rta = (struct rtattr *)b; 1124 1124 RTA_PUT(skb, TCA_OPTIONS, 0, NULL); 1125 1125 RTA_PUT(skb, TCA_HTB_INIT, sizeof(gopt), &gopt); 1126 - rta->rta_len = skb->tail - b; 1126 + rta->rta_len = skb_tail_pointer(skb) - b; 1127 1127 spin_unlock_bh(&sch->dev->queue_lock); 1128 1128 return skb->len; 1129 1129 rtattr_failure: 1130 1130 spin_unlock_bh(&sch->dev->queue_lock); 1131 - skb_trim(skb, skb->tail - skb->data); 1131 + skb_trim(skb, skb_tail_pointer(skb) - skb->data); 1132 1132 return -1; 1133 1133 } 1134 1134 ··· 1136 1136 struct sk_buff *skb, struct tcmsg *tcm) 1137 1137 { 1138 1138 struct htb_class *cl = (struct htb_class *)arg; 1139 - unsigned char *b = skb->tail; 1139 + unsigned char *b = skb_tail_pointer(skb); 1140 1140 struct rtattr *rta; 1141 1141 struct tc_htb_opt opt; 1142 1142 ··· 1159 1159 opt.prio = cl->un.leaf.prio; 1160 1160 opt.level = cl->level; 1161 1161 RTA_PUT(skb, TCA_HTB_PARMS, sizeof(opt), &opt); 1162 - rta->rta_len = skb->tail - b; 1162 + rta->rta_len = skb_tail_pointer(skb) - b; 1163 1163 spin_unlock_bh(&sch->dev->queue_lock); 1164 1164 return skb->len; 1165 1165 rtattr_failure:
+2 -2
net/sched/sch_ingress.c
··· 362 362 363 363 static int ingress_dump(struct Qdisc *sch, struct sk_buff *skb) 364 364 { 365 - unsigned char *b = skb->tail; 365 + unsigned char *b = skb_tail_pointer(skb); 366 366 struct rtattr *rta; 367 367 368 368 rta = (struct rtattr *) b; 369 369 RTA_PUT(skb, TCA_OPTIONS, 0, NULL); 370 - rta->rta_len = skb->tail - b; 370 + rta->rta_len = skb_tail_pointer(skb) - b; 371 371 return skb->len; 372 372 373 373 rtattr_failure:
+2 -2
net/sched/sch_netem.c
··· 583 583 static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) 584 584 { 585 585 const struct netem_sched_data *q = qdisc_priv(sch); 586 - unsigned char *b = skb->tail; 586 + unsigned char *b = skb_tail_pointer(skb); 587 587 struct rtattr *rta = (struct rtattr *) b; 588 588 struct tc_netem_qopt qopt; 589 589 struct tc_netem_corr cor; ··· 611 611 corrupt.correlation = q->corrupt_cor.rho; 612 612 RTA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt); 613 613 614 - rta->rta_len = skb->tail - b; 614 + rta->rta_len = skb_tail_pointer(skb) - b; 615 615 616 616 return skb->len; 617 617
+1 -1
net/sched/sch_prio.c
··· 271 271 static int prio_dump(struct Qdisc *sch, struct sk_buff *skb) 272 272 { 273 273 struct prio_sched_data *q = qdisc_priv(sch); 274 - unsigned char *b = skb->tail; 274 + unsigned char *b = skb_tail_pointer(skb); 275 275 struct tc_prio_qopt opt; 276 276 277 277 opt.bands = q->bands;
+1 -1
net/sched/sch_sfq.c
··· 461 461 static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb) 462 462 { 463 463 struct sfq_sched_data *q = qdisc_priv(sch); 464 - unsigned char *b = skb->tail; 464 + unsigned char *b = skb_tail_pointer(skb); 465 465 struct tc_sfq_qopt opt; 466 466 467 467 opt.quantum = q->quantum;
+2 -2
net/sched/sch_tbf.c
··· 387 387 static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb) 388 388 { 389 389 struct tbf_sched_data *q = qdisc_priv(sch); 390 - unsigned char *b = skb->tail; 390 + unsigned char *b = skb_tail_pointer(skb); 391 391 struct rtattr *rta; 392 392 struct tc_tbf_qopt opt; 393 393 ··· 403 403 opt.mtu = q->mtu; 404 404 opt.buffer = q->buffer; 405 405 RTA_PUT(skb, TCA_TBF_PARMS, sizeof(opt), &opt); 406 - rta->rta_len = skb->tail - b; 406 + rta->rta_len = skb_tail_pointer(skb) - b; 407 407 408 408 return skb->len; 409 409
+2 -2
net/sctp/input.c
··· 612 612 break; 613 613 614 614 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); 615 - if (ch_end > skb->tail) 615 + if (ch_end > skb_tail_pointer(skb)) 616 616 break; 617 617 618 618 /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the ··· 644 644 } 645 645 646 646 ch = (sctp_chunkhdr_t *) ch_end; 647 - } while (ch_end < skb->tail); 647 + } while (ch_end < skb_tail_pointer(skb)); 648 648 649 649 return 0; 650 650
+4 -4
net/sctp/inqueue.c
··· 159 159 * the skb->tail. 160 160 */ 161 161 if (unlikely(skb_is_nonlinear(chunk->skb))) { 162 - if (chunk->chunk_end > chunk->skb->tail) 163 - chunk->chunk_end = chunk->skb->tail; 162 + if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) 163 + chunk->chunk_end = skb_tail_pointer(chunk->skb); 164 164 } 165 165 skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t)); 166 166 chunk->subh.v = NULL; /* Subheader is no longer valid. */ 167 167 168 - if (chunk->chunk_end < chunk->skb->tail) { 168 + if (chunk->chunk_end < skb_tail_pointer(chunk->skb)) { 169 169 /* This is not a singleton */ 170 170 chunk->singleton = 0; 171 - } else if (chunk->chunk_end > chunk->skb->tail) { 171 + } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) { 172 172 /* RFC 2960, Section 6.10 Bundling 173 173 * 174 174 * Partial chunks MUST NOT be placed in an SCTP packet.
+2 -2
net/sctp/sm_make_chunk.c
··· 1143 1143 1144 1144 /* Adjust the chunk length field. */ 1145 1145 chunk->chunk_hdr->length = htons(chunklen + padlen + len); 1146 - chunk->chunk_end = chunk->skb->tail; 1146 + chunk->chunk_end = skb_tail_pointer(chunk->skb); 1147 1147 1148 1148 return target; 1149 1149 } ··· 1168 1168 /* Adjust the chunk length field. */ 1169 1169 chunk->chunk_hdr->length = 1170 1170 htons(ntohs(chunk->chunk_hdr->length) + len); 1171 - chunk->chunk_end = chunk->skb->tail; 1171 + chunk->chunk_end = skb_tail_pointer(chunk->skb); 1172 1172 1173 1173 out: 1174 1174 return err;
+2 -2
net/sctp/sm_statefuns.c
··· 3115 3115 break; 3116 3116 3117 3117 ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); 3118 - if (ch_end > skb->tail) 3118 + if (ch_end > skb_tail_pointer(skb)) 3119 3119 break; 3120 3120 3121 3121 if (SCTP_CID_SHUTDOWN_ACK == ch->type) ··· 3130 3130 return sctp_sf_pdiscard(ep, asoc, type, arg, commands); 3131 3131 3132 3132 ch = (sctp_chunkhdr_t *) ch_end; 3133 - } while (ch_end < skb->tail); 3133 + } while (ch_end < skb_tail_pointer(skb)); 3134 3134 3135 3135 if (ootb_shut_ack) 3136 3136 sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands);
+1 -1
net/tipc/config.c
··· 89 89 int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type, 90 90 void *tlv_data, int tlv_data_size) 91 91 { 92 - struct tlv_desc *tlv = (struct tlv_desc *)buf->tail; 92 + struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(buf); 93 93 int new_tlv_space = TLV_SPACE(tlv_data_size); 94 94 95 95 if (skb_tailroom(buf) < new_tlv_space) {
+1 -1
net/tipc/socket.c
··· 1020 1020 1021 1021 if (!err) { 1022 1022 buf_crs = (unsigned char *)(TIPC_SKB_CB(buf)->handle); 1023 - sz = buf->tail - buf_crs; 1023 + sz = skb_tail_pointer(buf) - buf_crs; 1024 1024 1025 1025 needed = (buf_len - sz_copied); 1026 1026 sz_to_copy = (sz <= needed) ? sz : needed;
+20 -20
net/xfrm/xfrm_user.c
··· 576 576 struct sk_buff *skb = sp->out_skb; 577 577 struct xfrm_usersa_info *p; 578 578 struct nlmsghdr *nlh; 579 - unsigned char *b = skb->tail; 579 + unsigned char *b = skb_tail_pointer(skb); 580 580 581 581 if (sp->this_idx < sp->start_idx) 582 582 goto out; ··· 621 621 if (x->lastused) 622 622 RTA_PUT(skb, XFRMA_LASTUSED, sizeof(x->lastused), &x->lastused); 623 623 624 - nlh->nlmsg_len = skb->tail - b; 624 + nlh->nlmsg_len = skb_tail_pointer(skb) - b; 625 625 out: 626 626 sp->this_idx++; 627 627 return 0; ··· 1157 1157 struct sk_buff *in_skb = sp->in_skb; 1158 1158 struct sk_buff *skb = sp->out_skb; 1159 1159 struct nlmsghdr *nlh; 1160 - unsigned char *b = skb->tail; 1160 + unsigned char *b = skb_tail_pointer(skb); 1161 1161 1162 1162 if (sp->this_idx < sp->start_idx) 1163 1163 goto out; ··· 1176 1176 if (copy_to_user_policy_type(xp->type, skb) < 0) 1177 1177 goto nlmsg_failure; 1178 1178 1179 - nlh->nlmsg_len = skb->tail - b; 1179 + nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1180 1180 out: 1181 1181 sp->this_idx++; 1182 1182 return 0; ··· 1330 1330 struct xfrm_aevent_id *id; 1331 1331 struct nlmsghdr *nlh; 1332 1332 struct xfrm_lifetime_cur ltime; 1333 - unsigned char *b = skb->tail; 1333 + unsigned char *b = skb_tail_pointer(skb); 1334 1334 1335 1335 nlh = NLMSG_PUT(skb, c->pid, c->seq, XFRM_MSG_NEWAE, sizeof(*id)); 1336 1336 id = NLMSG_DATA(nlh); ··· 1362 1362 RTA_PUT(skb,XFRMA_ETIMER_THRESH,sizeof(u32),&etimer); 1363 1363 } 1364 1364 1365 - nlh->nlmsg_len = skb->tail - b; 1365 + nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1366 1366 return skb->len; 1367 1367 1368 1368 rtattr_failure: ··· 1744 1744 struct xfrm_migrate *mp; 1745 1745 struct xfrm_userpolicy_id *pol_id; 1746 1746 struct nlmsghdr *nlh; 1747 - unsigned char *b = skb->tail; 1747 + unsigned char *b = skb_tail_pointer(skb); 1748 1748 int i; 1749 1749 1750 1750 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id)); ··· 1764 1764 goto nlmsg_failure; 1765 1765 } 1766 1766 1767 - nlh->nlmsg_len = skb->tail - b; 1767 + nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1768 1768 return skb->len; 1769 1769 nlmsg_failure: 1770 1770 skb_trim(skb, b - skb->data); ··· 1942 1942 { 1943 1943 struct xfrm_user_expire *ue; 1944 1944 struct nlmsghdr *nlh; 1945 - unsigned char *b = skb->tail; 1945 + unsigned char *b = skb_tail_pointer(skb); 1946 1946 1947 1947 nlh = NLMSG_PUT(skb, c->pid, 0, XFRM_MSG_EXPIRE, 1948 1948 sizeof(*ue)); ··· 1952 1952 copy_to_user_state(x, &ue->state); 1953 1953 ue->hard = (c->data.hard != 0) ? 1 : 0; 1954 1954 1955 - nlh->nlmsg_len = skb->tail - b; 1955 + nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1956 1956 return skb->len; 1957 1957 1958 1958 nlmsg_failure: ··· 1999 1999 struct xfrm_usersa_flush *p; 2000 2000 struct nlmsghdr *nlh; 2001 2001 struct sk_buff *skb; 2002 - unsigned char *b; 2002 + sk_buff_data_t b; 2003 2003 int len = NLMSG_LENGTH(sizeof(struct xfrm_usersa_flush)); 2004 2004 2005 2005 skb = alloc_skb(len, GFP_ATOMIC); ··· 2045 2045 struct xfrm_usersa_id *id; 2046 2046 struct nlmsghdr *nlh; 2047 2047 struct sk_buff *skb; 2048 - unsigned char *b; 2048 + sk_buff_data_t b; 2049 2049 int len = xfrm_sa_len(x); 2050 2050 int headlen; 2051 2051 ··· 2129 2129 { 2130 2130 struct xfrm_user_acquire *ua; 2131 2131 struct nlmsghdr *nlh; 2132 - unsigned char *b = skb->tail; 2132 + unsigned char *b = skb_tail_pointer(skb); 2133 2133 __u32 seq = xfrm_get_acqseq(); 2134 2134 2135 2135 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_ACQUIRE, ··· 2153 2153 if (copy_to_user_policy_type(xp->type, skb) < 0) 2154 2154 goto nlmsg_failure; 2155 2155 2156 - nlh->nlmsg_len = skb->tail - b; 2156 + nlh->nlmsg_len = skb_tail_pointer(skb) - b; 2157 2157 return skb->len; 2158 2158 2159 2159 nlmsg_failure: ··· 2249 2249 struct xfrm_user_polexpire *upe; 2250 2250 struct nlmsghdr *nlh; 2251 2251 int hard = c->data.hard; 2252 - unsigned char *b = skb->tail; 2252 + unsigned char *b = skb_tail_pointer(skb); 2253 2253 2254 2254 nlh = NLMSG_PUT(skb, c->pid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe)); 2255 2255 upe = NLMSG_DATA(nlh); ··· 2264 2264 goto nlmsg_failure; 2265 2265 upe->hard = !!hard; 2266 2266 2267 - nlh->nlmsg_len = skb->tail - b; 2267 + nlh->nlmsg_len = skb_tail_pointer(skb) - b; 2268 2268 return skb->len; 2269 2269 2270 2270 nlmsg_failure: ··· 2300 2300 struct xfrm_userpolicy_id *id; 2301 2301 struct nlmsghdr *nlh; 2302 2302 struct sk_buff *skb; 2303 - unsigned char *b; 2303 + sk_buff_data_t b; 2304 2304 int len = RTA_SPACE(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr); 2305 2305 int headlen; 2306 2306 ··· 2357 2357 { 2358 2358 struct nlmsghdr *nlh; 2359 2359 struct sk_buff *skb; 2360 - unsigned char *b; 2360 + sk_buff_data_t b; 2361 2361 int len = 0; 2362 2362 #ifdef CONFIG_XFRM_SUB_POLICY 2363 2363 len += RTA_SPACE(sizeof(struct xfrm_userpolicy_type)); ··· 2410 2410 { 2411 2411 struct xfrm_user_report *ur; 2412 2412 struct nlmsghdr *nlh; 2413 - unsigned char *b = skb->tail; 2413 + unsigned char *b = skb_tail_pointer(skb); 2414 2414 2415 2415 nlh = NLMSG_PUT(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur)); 2416 2416 ur = NLMSG_DATA(nlh); ··· 2422 2422 if (addr) 2423 2423 RTA_PUT(skb, XFRMA_COADDR, sizeof(*addr), addr); 2424 2424 2425 - nlh->nlmsg_len = skb->tail - b; 2425 + nlh->nlmsg_len = skb_tail_pointer(skb) - b; 2426 2426 return skb->len; 2427 2427 2428 2428 nlmsg_failure:
+1 -1
security/selinux/netlink.c
··· 66 66 static void selnl_notify(int msgtype, void *data) 67 67 { 68 68 int len; 69 - unsigned char *tmp; 69 + sk_buff_data_t tmp; 70 70 struct sk_buff *skb; 71 71 struct nlmsghdr *nlh; 72 72