Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: add skb frag size accessors

To ease skb->truesize sanitization, its better to be able to localize
all references to skb frags size.

Define accessors : skb_frag_size() to fetch frag size, and
skb_frag_size_{set|add|sub}() to manipulate it.

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Eric Dumazet and committed by
David S. Miller
9e903e08 dd767856

+387 -357
+1 -1
drivers/atm/eni.c
··· 1136 1136 put_dma(tx->index,eni_dev->dma,&j,(unsigned long) 1137 1137 skb_frag_page(&skb_shinfo(skb)->frags[i]) + 1138 1138 skb_shinfo(skb)->frags[i].page_offset, 1139 - skb_shinfo(skb)->frags[i].size); 1139 + skb_frag_size(&skb_shinfo(skb)->frags[i])); 1140 1140 } 1141 1141 if (skb->len & 3) 1142 1142 put_dma(tx->index,eni_dev->dma,&j,zeroes,4-(skb->len & 3));
+2 -2
drivers/infiniband/hw/amso1100/c2.c
··· 800 800 /* Loop thru additional data fragments and queue them */ 801 801 if (skb_shinfo(skb)->nr_frags) { 802 802 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 803 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 804 - maplen = frag->size; 803 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 804 + maplen = skb_frag_size(frag); 805 805 mapaddr = skb_frag_dma_map(&c2dev->pcidev->dev, frag, 806 806 0, maplen, DMA_TO_DEVICE); 807 807 elem = elem->next;
+5 -5
drivers/infiniband/hw/nes/nes_nic.c
··· 444 444 skb_frag_t *frag = 445 445 &skb_shinfo(skb)->frags[skb_fragment_index]; 446 446 bus_address = skb_frag_dma_map(&nesdev->pcidev->dev, 447 - frag, 0, frag->size, 447 + frag, 0, skb_frag_size(frag), 448 448 DMA_TO_DEVICE); 449 449 wqe_fragment_length[wqe_fragment_index] = 450 - cpu_to_le16(skb_shinfo(skb)->frags[skb_fragment_index].size); 450 + cpu_to_le16(skb_frag_size(&skb_shinfo(skb)->frags[skb_fragment_index])); 451 451 set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index), 452 452 bus_address); 453 453 wqe_fragment_index++; ··· 565 565 &skb_shinfo(skb)->frags[tso_frag_count]; 566 566 tso_bus_address[tso_frag_count] = 567 567 skb_frag_dma_map(&nesdev->pcidev->dev, 568 - frag, 0, frag->size, 568 + frag, 0, skb_frag_size(frag), 569 569 DMA_TO_DEVICE); 570 570 } 571 571 ··· 637 637 } 638 638 while (wqe_fragment_index < 5) { 639 639 wqe_fragment_length[wqe_fragment_index] = 640 - cpu_to_le16(skb_shinfo(skb)->frags[tso_frag_index].size); 640 + cpu_to_le16(skb_frag_size(&skb_shinfo(skb)->frags[tso_frag_index])); 641 641 set_wqe_64bit_value(nic_sqe->wqe_words, NES_NIC_SQ_WQE_FRAG0_LOW_IDX+(2*wqe_fragment_index), 642 642 (u64)tso_bus_address[tso_frag_index]); 643 643 wqe_fragment_index++; 644 - tso_wqe_length += skb_shinfo(skb)->frags[tso_frag_index++].size; 644 + tso_wqe_length += skb_frag_size(&skb_shinfo(skb)->frags[tso_frag_index++]); 645 645 if (wqe_fragment_index < 5) 646 646 wqe_fragment_length[wqe_fragment_index] = 0; 647 647 if (tso_frag_index == tso_frag_count)
+1 -1
drivers/infiniband/ulp/ipoib/ipoib_cm.c
··· 543 543 } else { 544 544 size = min(length, (unsigned) PAGE_SIZE); 545 545 546 - frag->size = size; 546 + skb_frag_size_set(frag, size); 547 547 skb->data_len += size; 548 548 skb->truesize += size; 549 549 skb->len += size;
+10 -8
drivers/infiniband/ulp/ipoib/ipoib_ib.c
··· 117 117 118 118 size = length - IPOIB_UD_HEAD_SIZE; 119 119 120 - frag->size = size; 120 + skb_frag_size_set(frag, size); 121 121 skb->data_len += size; 122 122 skb->truesize += size; 123 123 } else ··· 322 322 off = 0; 323 323 324 324 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { 325 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 325 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 326 326 mapping[i + off] = ib_dma_map_page(ca, 327 327 skb_frag_page(frag), 328 - frag->page_offset, frag->size, 328 + frag->page_offset, skb_frag_size(frag), 329 329 DMA_TO_DEVICE); 330 330 if (unlikely(ib_dma_mapping_error(ca, mapping[i + off]))) 331 331 goto partial_error; ··· 334 334 335 335 partial_error: 336 336 for (; i > 0; --i) { 337 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 338 - ib_dma_unmap_page(ca, mapping[i - !off], frag->size, DMA_TO_DEVICE); 337 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 338 + 339 + ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE); 339 340 } 340 341 341 342 if (off) ··· 360 359 off = 0; 361 360 362 361 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { 363 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 364 - ib_dma_unmap_page(ca, mapping[i + off], frag->size, 362 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 363 + 364 + ib_dma_unmap_page(ca, mapping[i + off], skb_frag_size(frag), 365 365 DMA_TO_DEVICE); 366 366 } 367 367 } ··· 512 510 513 511 for (i = 0; i < nr_frags; ++i) { 514 512 priv->tx_sge[i + off].addr = mapping[i + off]; 515 - priv->tx_sge[i + off].length = frags[i].size; 513 + priv->tx_sge[i + off].length = skb_frag_size(&frags[i]); 516 514 } 517 515 priv->tx_wr.num_sge = nr_frags + off; 518 516 priv->tx_wr.wr_id = wr_id;
+3 -3
drivers/net/ethernet/3com/3c59x.c
··· 2182 2182 cpu_to_le32(pci_map_single( 2183 2183 VORTEX_PCI(vp), 2184 2184 (void *)skb_frag_address(frag), 2185 - frag->size, PCI_DMA_TODEVICE)); 2185 + skb_frag_size(frag), PCI_DMA_TODEVICE)); 2186 2186 2187 2187 if (i == skb_shinfo(skb)->nr_frags-1) 2188 - vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG); 2188 + vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG); 2189 2189 else 2190 - vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size); 2190 + vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)); 2191 2191 } 2192 2192 } 2193 2193 #else
+3 -3
drivers/net/ethernet/3com/typhoon.c
··· 810 810 txd->frag.addrHi = 0; 811 811 first_txd->numDesc++; 812 812 813 - for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 814 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 813 + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 814 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 815 815 void *frag_addr; 816 816 817 817 txd = (struct tx_desc *) (txRing->ringBase + 818 818 txRing->lastWrite); 819 819 typhoon_inc_tx_index(&txRing->lastWrite, 1); 820 820 821 - len = frag->size; 821 + len = skb_frag_size(frag); 822 822 frag_addr = skb_frag_address(frag); 823 823 skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len, 824 824 PCI_DMA_TODEVICE);
+4 -4
drivers/net/ethernet/adaptec/starfire.c
··· 1256 1256 np->tx_info[entry].mapping = 1257 1257 pci_map_single(np->pci_dev, skb->data, skb_first_frag_len(skb), PCI_DMA_TODEVICE); 1258 1258 } else { 1259 - skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1]; 1260 - status |= this_frag->size; 1259 + const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[i - 1]; 1260 + status |= skb_frag_size(this_frag); 1261 1261 np->tx_info[entry].mapping = 1262 1262 pci_map_single(np->pci_dev, 1263 1263 skb_frag_address(this_frag), 1264 - this_frag->size, 1264 + skb_frag_size(this_frag), 1265 1265 PCI_DMA_TODEVICE); 1266 1266 } 1267 1267 ··· 1378 1378 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1379 1379 pci_unmap_single(np->pci_dev, 1380 1380 np->tx_info[entry].mapping, 1381 - skb_shinfo(skb)->frags[i].size, 1381 + skb_frag_size(&skb_shinfo(skb)->frags[i]), 1382 1382 PCI_DMA_TODEVICE); 1383 1383 np->dirty_tx++; 1384 1384 entry++;
+4 -4
drivers/net/ethernet/aeroflex/greth.c
··· 198 198 199 199 dma_unmap_page(greth->dev, 200 200 greth_read_bd(&tx_bdp->addr), 201 - frag->size, 201 + skb_frag_size(frag), 202 202 DMA_TO_DEVICE); 203 203 204 204 greth->tx_last = NEXT_TX(greth->tx_last); ··· 517 517 status = GRETH_BD_EN; 518 518 if (skb->ip_summed == CHECKSUM_PARTIAL) 519 519 status |= GRETH_TXBD_CSALL; 520 - status |= frag->size & GRETH_BD_LEN; 520 + status |= skb_frag_size(frag) & GRETH_BD_LEN; 521 521 522 522 /* Wrap around descriptor ring */ 523 523 if (curr_tx == GRETH_TXBD_NUM_MASK) ··· 531 531 532 532 greth_write_bd(&bdp->stat, status); 533 533 534 - dma_addr = skb_frag_dma_map(greth->dev, frag, 0, frag->size, 534 + dma_addr = skb_frag_dma_map(greth->dev, frag, 0, skb_frag_size(frag), 535 535 DMA_TO_DEVICE); 536 536 537 537 if (unlikely(dma_mapping_error(greth->dev, dma_addr))) ··· 713 713 714 714 dma_unmap_page(greth->dev, 715 715 greth_read_bd(&bdp->addr), 716 - frag->size, 716 + skb_frag_size(frag), 717 717 DMA_TO_DEVICE); 718 718 719 719 greth->tx_last = NEXT_TX(greth->tx_last);
+5 -5
drivers/net/ethernet/alteon/acenic.c
··· 2478 2478 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); 2479 2479 2480 2480 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2481 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2481 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2482 2482 struct tx_ring_info *info; 2483 2483 2484 - len += frag->size; 2484 + len += skb_frag_size(frag); 2485 2485 info = ap->skb->tx_skbuff + idx; 2486 2486 desc = ap->tx_ring + idx; 2487 2487 2488 2488 mapping = skb_frag_dma_map(&ap->pdev->dev, frag, 0, 2489 - frag->size, 2489 + skb_frag_size(frag), 2490 2490 DMA_TO_DEVICE); 2491 2491 2492 - flagsize = (frag->size << 16); 2492 + flagsize = skb_frag_size(frag) << 16; 2493 2493 if (skb->ip_summed == CHECKSUM_PARTIAL) 2494 2494 flagsize |= BD_FLG_TCP_UDP_SUM; 2495 2495 idx = (idx + 1) % ACE_TX_RING_ENTRIES(ap); ··· 2508 2508 info->skb = NULL; 2509 2509 } 2510 2510 dma_unmap_addr_set(info, mapping, mapping); 2511 - dma_unmap_len_set(info, maplen, frag->size); 2511 + dma_unmap_len_set(info, maplen, skb_frag_size(frag)); 2512 2512 ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag); 2513 2513 } 2514 2514 }
+1 -1
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
··· 2179 2179 memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc)); 2180 2180 2181 2181 buffer_info = atl1c_get_tx_buffer(adapter, use_tpd); 2182 - buffer_info->length = frag->size; 2182 + buffer_info->length = skb_frag_size(frag); 2183 2183 buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev, 2184 2184 frag, 0, 2185 2185 buffer_info->length,
+3 -3
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
··· 1593 1593 u16 proto_hdr_len = 0; 1594 1594 1595 1595 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1596 - fg_size = skb_shinfo(skb)->frags[i].size; 1596 + fg_size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 1597 1597 tpd_req += ((fg_size + MAX_TX_BUF_LEN - 1) >> MAX_TX_BUF_SHIFT); 1598 1598 } 1599 1599 ··· 1744 1744 } 1745 1745 1746 1746 for (f = 0; f < nr_frags; f++) { 1747 - struct skb_frag_struct *frag; 1747 + const struct skb_frag_struct *frag; 1748 1748 u16 i; 1749 1749 u16 seg_num; 1750 1750 1751 1751 frag = &skb_shinfo(skb)->frags[f]; 1752 - buf_len = frag->size; 1752 + buf_len = skb_frag_size(frag); 1753 1753 1754 1754 seg_num = (buf_len + MAX_TX_BUF_LEN - 1) / MAX_TX_BUF_LEN; 1755 1755 for (i = 0; i < seg_num; i++) {
+5 -7
drivers/net/ethernet/atheros/atlx/atl1.c
··· 2267 2267 } 2268 2268 2269 2269 for (f = 0; f < nr_frags; f++) { 2270 - struct skb_frag_struct *frag; 2270 + const struct skb_frag_struct *frag; 2271 2271 u16 i, nseg; 2272 2272 2273 2273 frag = &skb_shinfo(skb)->frags[f]; 2274 - buf_len = frag->size; 2274 + buf_len = skb_frag_size(frag); 2275 2275 2276 2276 nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) / 2277 2277 ATL1_MAX_TX_BUF_LEN; ··· 2356 2356 int count = 1; 2357 2357 int ret_val; 2358 2358 struct tx_packet_desc *ptpd; 2359 - u16 frag_size; 2360 2359 u16 vlan_tag; 2361 2360 unsigned int nr_frags = 0; 2362 2361 unsigned int mss = 0; ··· 2371 2372 2372 2373 nr_frags = skb_shinfo(skb)->nr_frags; 2373 2374 for (f = 0; f < nr_frags; f++) { 2374 - frag_size = skb_shinfo(skb)->frags[f].size; 2375 - if (frag_size) 2376 - count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) / 2377 - ATL1_MAX_TX_BUF_LEN; 2375 + unsigned int f_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); 2376 + count += (f_size + ATL1_MAX_TX_BUF_LEN - 1) / 2377 + ATL1_MAX_TX_BUF_LEN; 2378 2378 } 2379 2379 2380 2380 mss = skb_shinfo(skb)->gso_size;
+6 -6
drivers/net/ethernet/broadcom/bnx2.c
··· 2871 2871 dma_unmap_addr( 2872 2872 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)], 2873 2873 mapping), 2874 - skb_shinfo(skb)->frags[i].size, 2874 + skb_frag_size(&skb_shinfo(skb)->frags[i]), 2875 2875 PCI_DMA_TODEVICE); 2876 2876 } 2877 2877 ··· 3049 3049 } else { 3050 3050 skb_frag_t *frag = 3051 3051 &skb_shinfo(skb)->frags[i - 1]; 3052 - frag->size -= tail; 3052 + skb_frag_size_sub(frag, tail); 3053 3053 skb->data_len -= tail; 3054 3054 } 3055 3055 return 0; ··· 5395 5395 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)]; 5396 5396 dma_unmap_page(&bp->pdev->dev, 5397 5397 dma_unmap_addr(tx_buf, mapping), 5398 - skb_shinfo(skb)->frags[k].size, 5398 + skb_frag_size(&skb_shinfo(skb)->frags[k]), 5399 5399 PCI_DMA_TODEVICE); 5400 5400 } 5401 5401 dev_kfree_skb(skb); ··· 6530 6530 tx_buf->is_gso = skb_is_gso(skb); 6531 6531 6532 6532 for (i = 0; i < last_frag; i++) { 6533 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 6533 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 6534 6534 6535 6535 prod = NEXT_TX_BD(prod); 6536 6536 ring_prod = TX_RING_IDX(prod); 6537 6537 txbd = &txr->tx_desc_ring[ring_prod]; 6538 6538 6539 - len = frag->size; 6539 + len = skb_frag_size(frag); 6540 6540 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len, 6541 6541 DMA_TO_DEVICE); 6542 6542 if (dma_mapping_error(&bp->pdev->dev, mapping)) ··· 6594 6594 ring_prod = TX_RING_IDX(prod); 6595 6595 tx_buf = &txr->tx_buf_ring[ring_prod]; 6596 6596 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), 6597 - skb_shinfo(skb)->frags[i].size, 6597 + skb_frag_size(&skb_shinfo(skb)->frags[i]), 6598 6598 PCI_DMA_TODEVICE); 6599 6599 } 6600 6600
+7 -7
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
··· 2363 2363 /* Calculate the first sum - it's special */ 2364 2364 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++) 2365 2365 wnd_sum += 2366 - skb_shinfo(skb)->frags[frag_idx].size; 2366 + skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]); 2367 2367 2368 2368 /* If there was data on linear skb data - check it */ 2369 2369 if (first_bd_sz > 0) { ··· 2379 2379 check all windows */ 2380 2380 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) { 2381 2381 wnd_sum += 2382 - skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size; 2382 + skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]); 2383 2383 2384 2384 if (unlikely(wnd_sum < lso_mss)) { 2385 2385 to_copy = 1; 2386 2386 break; 2387 2387 } 2388 2388 wnd_sum -= 2389 - skb_shinfo(skb)->frags[wnd_idx].size; 2389 + skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]); 2390 2390 } 2391 2391 } else { 2392 2392 /* in non-LSO too fragmented packet should always ··· 2796 2796 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2797 2797 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2798 2798 2799 - mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, frag->size, 2800 - DMA_TO_DEVICE); 2799 + mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, 2800 + skb_frag_size(frag), DMA_TO_DEVICE); 2801 2801 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 2802 2802 2803 2803 DP(NETIF_MSG_TX_QUEUED, "Unable to map page - " ··· 2821 2821 2822 2822 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 2823 2823 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 2824 - tx_data_bd->nbytes = cpu_to_le16(frag->size); 2825 - le16_add_cpu(&pkt_size, frag->size); 2824 + tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag)); 2825 + le16_add_cpu(&pkt_size, skb_frag_size(frag)); 2826 2826 nbd++; 2827 2827 2828 2828 DP(NETIF_MSG_TX_QUEUED,
+4 -4
drivers/net/ethernet/broadcom/tg3.c
··· 5356 5356 5357 5357 pci_unmap_page(tp->pdev, 5358 5358 dma_unmap_addr(ri, mapping), 5359 - skb_shinfo(skb)->frags[i].size, 5359 + skb_frag_size(&skb_shinfo(skb)->frags[i]), 5360 5360 PCI_DMA_TODEVICE); 5361 5361 5362 5362 while (ri->fragmented) { ··· 6510 6510 } 6511 6511 6512 6512 for (i = 0; i < last; i++) { 6513 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 6513 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 6514 6514 6515 6515 entry = NEXT_TX(entry); 6516 6516 txb = &tnapi->tx_buffers[entry]; 6517 6517 6518 6518 pci_unmap_page(tnapi->tp->pdev, 6519 6519 dma_unmap_addr(txb, mapping), 6520 - frag->size, PCI_DMA_TODEVICE); 6520 + skb_frag_size(frag), PCI_DMA_TODEVICE); 6521 6521 6522 6522 while (txb->fragmented) { 6523 6523 txb->fragmented = false; ··· 6777 6777 for (i = 0; i <= last; i++) { 6778 6778 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 6779 6779 6780 - len = frag->size; 6780 + len = skb_frag_size(frag); 6781 6781 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0, 6782 6782 len, DMA_TO_DEVICE); 6783 6783
+3 -3
drivers/net/ethernet/brocade/bna/bnad.c
··· 116 116 117 117 for (j = 0; j < frag; j++) { 118 118 dma_unmap_page(pdev, dma_unmap_addr(&array[index], dma_addr), 119 - skb_shinfo(skb)->frags[j].size, DMA_TO_DEVICE); 119 + skb_frag_size(&skb_shinfo(skb)->frags[j]), DMA_TO_DEVICE); 120 120 dma_unmap_addr_set(&array[index], dma_addr, 0); 121 121 BNA_QE_INDX_ADD(index, 1, depth); 122 122 } ··· 2741 2741 wis_used = 1; 2742 2742 2743 2743 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2744 - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 2745 - u16 size = frag->size; 2744 + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 2745 + u16 size = skb_frag_size(frag); 2746 2746 2747 2747 if (unlikely(size == 0)) { 2748 2748 unmap_prod = unmap_q->producer_index;
+5 -5
drivers/net/ethernet/chelsio/cxgb/sge.c
··· 1135 1135 len -= SGE_TX_DESC_MAX_PLEN; 1136 1136 } 1137 1137 for (i = 0; nfrags--; i++) { 1138 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1139 - len = frag->size; 1138 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1139 + len = skb_frag_size(frag); 1140 1140 while (len > SGE_TX_DESC_MAX_PLEN) { 1141 1141 count++; 1142 1142 len -= SGE_TX_DESC_MAX_PLEN; ··· 1278 1278 } 1279 1279 1280 1280 mapping = skb_frag_dma_map(&adapter->pdev->dev, frag, 0, 1281 - frag->size, DMA_TO_DEVICE); 1281 + skb_frag_size(frag), DMA_TO_DEVICE); 1282 1282 desc_mapping = mapping; 1283 - desc_len = frag->size; 1283 + desc_len = skb_frag_size(frag); 1284 1284 1285 1285 pidx = write_large_page_tx_descs(pidx, &e1, &ce, &gen, 1286 1286 &desc_mapping, &desc_len, ··· 1290 1290 nfrags == 0); 1291 1291 ce->skb = NULL; 1292 1292 dma_unmap_addr_set(ce, dma_addr, mapping); 1293 - dma_unmap_len_set(ce, dma_len, frag->size); 1293 + dma_unmap_len_set(ce, dma_len, skb_frag_size(frag)); 1294 1294 } 1295 1295 ce->skb = skb; 1296 1296 wmb();
+6 -6
drivers/net/ethernet/chelsio/cxgb3/sge.c
··· 254 254 255 255 while (frag_idx < nfrags && curflit < WR_FLITS) { 256 256 pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]), 257 - skb_shinfo(skb)->frags[frag_idx].size, 257 + skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]), 258 258 PCI_DMA_TODEVICE); 259 259 j ^= 1; 260 260 if (j == 0) { ··· 977 977 978 978 nfrags = skb_shinfo(skb)->nr_frags; 979 979 for (i = 0; i < nfrags; i++) { 980 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 980 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 981 981 982 - mapping = skb_frag_dma_map(&pdev->dev, frag, 0, frag->size, 982 + mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), 983 983 DMA_TO_DEVICE); 984 - sgp->len[j] = cpu_to_be32(frag->size); 984 + sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); 985 985 sgp->addr[j] = cpu_to_be64(mapping); 986 986 j ^= 1; 987 987 if (j == 0) ··· 1544 1544 1545 1545 si = skb_shinfo(skb); 1546 1546 for (i = 0; i < si->nr_frags; i++) 1547 - pci_unmap_page(dui->pdev, *p++, si->frags[i].size, 1547 + pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]), 1548 1548 PCI_DMA_TODEVICE); 1549 1549 } 1550 1550 ··· 2118 2118 rx_frag += nr_frags; 2119 2119 __skb_frag_set_page(rx_frag, sd->pg_chunk.page); 2120 2120 rx_frag->page_offset = sd->pg_chunk.offset + offset; 2121 - rx_frag->size = len; 2121 + skb_frag_size_set(rx_frag, len); 2122 2122 2123 2123 skb->len += len; 2124 2124 skb->data_len += len;
+13 -13
drivers/net/ethernet/chelsio/cxgb4/sge.c
··· 215 215 end = &si->frags[si->nr_frags]; 216 216 217 217 for (fp = si->frags; fp < end; fp++) { 218 - *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size, 219 - DMA_TO_DEVICE); 218 + *++addr = dma_map_page(dev, fp->page, fp->page_offset, 219 + skb_frag_size(fp), DMA_TO_DEVICE); 220 220 if (dma_mapping_error(dev, *addr)) 221 221 goto unwind; 222 222 } ··· 224 224 225 225 unwind: 226 226 while (fp-- > si->frags) 227 - dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE); 227 + dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); 228 228 229 229 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); 230 230 out_err: ··· 243 243 si = skb_shinfo(skb); 244 244 end = &si->frags[si->nr_frags]; 245 245 for (fp = si->frags; fp < end; fp++) 246 - dma_unmap_page(dev, *addr++, fp->size, DMA_TO_DEVICE); 246 + dma_unmap_page(dev, *addr++, skb_frag_size(fp), DMA_TO_DEVICE); 247 247 } 248 248 249 249 /** ··· 717 717 sgl->addr0 = cpu_to_be64(addr[0] + start); 718 718 nfrags++; 719 719 } else { 720 - sgl->len0 = htonl(si->frags[0].size); 720 + sgl->len0 = htonl(skb_frag_size(&si->frags[0])); 721 721 sgl->addr0 = cpu_to_be64(addr[1]); 722 722 } 723 723 ··· 732 732 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; 733 733 734 734 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { 735 - to->len[0] = cpu_to_be32(si->frags[i].size); 736 - to->len[1] = cpu_to_be32(si->frags[++i].size); 735 + to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); 736 + to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i])); 737 737 to->addr[0] = cpu_to_be64(addr[i]); 738 738 to->addr[1] = cpu_to_be64(addr[++i]); 739 739 } 740 740 if (nfrags) { 741 - to->len[0] = cpu_to_be32(si->frags[i].size); 741 + to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); 742 742 to->len[1] = cpu_to_be32(0); 743 743 to->addr[0] = cpu_to_be64(addr[i + 1]); 744 744 } ··· 1417 1417 /* usually there's just one frag */ 1418 1418 ssi->frags[0].page = gl->frags[0].page; 1419 1419 ssi->frags[0].page_offset = gl->frags[0].page_offset + offset; 1420 - ssi->frags[0].size = gl->frags[0].size - offset; 1420 + skb_frag_size_set(&ssi->frags[0], skb_frag_size(&gl->frags[0]) - offset); 1421 1421 ssi->nr_frags = gl->nfrags; 1422 1422 n = gl->nfrags - 1; 1423 1423 if (n) ··· 1718 1718 bufsz = get_buf_size(rsd); 1719 1719 fp->page = rsd->page; 1720 1720 fp->page_offset = q->offset; 1721 - fp->size = min(bufsz, len); 1722 - len -= fp->size; 1721 + skb_frag_size_set(fp, min(bufsz, len)); 1722 + len -= skb_frag_size(fp); 1723 1723 if (!len) 1724 1724 break; 1725 1725 unmap_rx_buf(q->adap, &rxq->fl); ··· 1731 1731 */ 1732 1732 dma_sync_single_for_cpu(q->adap->pdev_dev, 1733 1733 get_buf_addr(rsd), 1734 - fp->size, DMA_FROM_DEVICE); 1734 + skb_frag_size(fp), DMA_FROM_DEVICE); 1735 1735 1736 1736 si.va = page_address(si.frags[0].page) + 1737 1737 si.frags[0].page_offset; ··· 1740 1740 si.nfrags = frags + 1; 1741 1741 ret = q->handler(q, q->cur_desc, &si); 1742 1742 if (likely(ret == 0)) 1743 - q->offset += ALIGN(fp->size, FL_ALIGN); 1743 + q->offset += ALIGN(skb_frag_size(fp), FL_ALIGN); 1744 1744 else 1745 1745 restore_rx_bufs(&si, &rxq->fl, frags); 1746 1746 } else if (likely(rsp_type == RSP_TYPE_CPL)) {
+13 -13
drivers/net/ethernet/chelsio/cxgb4vf/sge.c
··· 296 296 si = skb_shinfo(skb); 297 297 end = &si->frags[si->nr_frags]; 298 298 for (fp = si->frags; fp < end; fp++) { 299 - *++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size, 300 - DMA_TO_DEVICE); 299 + *++addr = dma_map_page(dev, fp->page, fp->page_offset, 300 + skb_frag_size(fp), DMA_TO_DEVICE); 301 301 if (dma_mapping_error(dev, *addr)) 302 302 goto unwind; 303 303 } ··· 305 305 306 306 unwind: 307 307 while (fp-- > si->frags) 308 - dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE); 308 + dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); 309 309 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); 310 310 311 311 out_err: ··· 899 899 sgl->addr0 = cpu_to_be64(addr[0] + start); 900 900 nfrags++; 901 901 } else { 902 - sgl->len0 = htonl(si->frags[0].size); 902 + sgl->len0 = htonl(skb_frag_size(&si->frags[0])); 903 903 sgl->addr0 = cpu_to_be64(addr[1]); 904 904 } 905 905 ··· 915 915 to = (u8 *)end > (u8 *)tq->stat ? buf : sgl->sge; 916 916 917 917 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { 918 - to->len[0] = cpu_to_be32(si->frags[i].size); 919 - to->len[1] = cpu_to_be32(si->frags[++i].size); 918 + to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); 919 + to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i])); 920 920 to->addr[0] = cpu_to_be64(addr[i]); 921 921 to->addr[1] = cpu_to_be64(addr[++i]); 922 922 } 923 923 if (nfrags) { 924 - to->len[0] = cpu_to_be32(si->frags[i].size); 924 + to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); 925 925 to->len[1] = cpu_to_be32(0); 926 926 to->addr[0] = cpu_to_be64(addr[i + 1]); 927 927 } ··· 1399 1399 ssi = skb_shinfo(skb); 1400 1400 ssi->frags[0].page = gl->frags[0].page; 1401 1401 ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len; 1402 - ssi->frags[0].size = gl->frags[0].size - pull_len; 1402 + skb_frag_size_set(&ssi->frags[0], skb_frag_size(&gl->frags[0]) - pull_len); 1403 1403 if (gl->nfrags > 1) 1404 1404 memcpy(&ssi->frags[1], &gl->frags[1], 1405 1405 (gl->nfrags-1) * sizeof(skb_frag_t)); ··· 1451 1451 /* usually there's just one frag */ 1452 1452 si->frags[0].page = gl->frags[0].page; 1453 1453 si->frags[0].page_offset = gl->frags[0].page_offset + offset; 1454 - si->frags[0].size = gl->frags[0].size - offset; 1454 + skb_frag_size_set(&si->frags[0], skb_frag_size(&gl->frags[0]) - offset); 1455 1455 si->nr_frags = gl->nfrags; 1456 1456 1457 1457 n = gl->nfrags - 1; ··· 1702 1702 bufsz = get_buf_size(sdesc); 1703 1703 fp->page = sdesc->page; 1704 1704 fp->page_offset = rspq->offset; 1705 - fp->size = min(bufsz, len); 1706 - len -= fp->size; 1705 + skb_frag_size_set(fp, min(bufsz, len)); 1706 + len -= skb_frag_size(fp); 1707 1707 if (!len) 1708 1708 break; 1709 1709 unmap_rx_buf(rspq->adapter, &rxq->fl); ··· 1717 1717 */ 1718 1718 dma_sync_single_for_cpu(rspq->adapter->pdev_dev, 1719 1719 get_buf_addr(sdesc), 1720 - fp->size, DMA_FROM_DEVICE); 1720 + skb_frag_size(fp), DMA_FROM_DEVICE); 1721 1721 gl.va = (page_address(gl.frags[0].page) + 1722 1722 gl.frags[0].page_offset); 1723 1723 prefetch(gl.va); ··· 1728 1728 */ 1729 1729 ret = rspq->handler(rspq, rspq->cur_desc, &gl); 1730 1730 if (likely(ret == 0)) 1731 - rspq->offset += ALIGN(fp->size, FL_ALIGN); 1731 + rspq->offset += ALIGN(skb_frag_size(fp), FL_ALIGN); 1732 1732 else 1733 1733 restore_rx_bufs(&gl, &rxq->fl, frag); 1734 1734 } else if (likely(rsp_type == RSP_TYPE_CPL)) {
+6 -6
drivers/net/ethernet/cisco/enic/enic_main.c
··· 599 599 struct vnic_wq *wq, struct sk_buff *skb, 600 600 unsigned int len_left, int loopback) 601 601 { 602 - skb_frag_t *frag; 602 + const skb_frag_t *frag; 603 603 604 604 /* Queue additional data fragments */ 605 605 for (frag = skb_shinfo(skb)->frags; len_left; frag++) { 606 - len_left -= frag->size; 606 + len_left -= skb_frag_size(frag); 607 607 enic_queue_wq_desc_cont(wq, skb, 608 608 skb_frag_dma_map(&enic->pdev->dev, 609 - frag, 0, frag->size, 609 + frag, 0, skb_frag_size(frag), 610 610 DMA_TO_DEVICE), 611 - frag->size, 611 + skb_frag_size(frag), 612 612 (len_left == 0), /* EOP? */ 613 613 loopback); 614 614 } ··· 717 717 * for additional data fragments 718 718 */ 719 719 for (frag = skb_shinfo(skb)->frags; len_left; frag++) { 720 - len_left -= frag->size; 721 - frag_len_left = frag->size; 720 + len_left -= skb_frag_size(frag); 721 + frag_len_left = skb_frag_size(frag); 722 722 offset = 0; 723 723 724 724 while (frag_len_left) {
+9 -9
drivers/net/ethernet/emulex/benet/be_main.c
··· 636 636 } 637 637 638 638 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 639 - struct skb_frag_struct *frag = 639 + const struct skb_frag_struct *frag = 640 640 &skb_shinfo(skb)->frags[i]; 641 641 busaddr = skb_frag_dma_map(dev, frag, 0, 642 - frag->size, DMA_TO_DEVICE); 642 + skb_frag_size(frag), DMA_TO_DEVICE); 643 643 if (dma_mapping_error(dev, busaddr)) 644 644 goto dma_err; 645 645 wrb = queue_head_node(txq); 646 - wrb_fill(wrb, busaddr, frag->size); 646 + wrb_fill(wrb, busaddr, skb_frag_size(frag)); 647 647 be_dws_cpu_to_le(wrb, sizeof(*wrb)); 648 648 queue_head_inc(txq); 649 - copied += frag->size; 649 + copied += skb_frag_size(frag); 650 650 } 651 651 652 652 if (dummy_wrb) { ··· 1069 1069 skb_frag_set_page(skb, 0, page_info->page); 1070 1070 skb_shinfo(skb)->frags[0].page_offset = 1071 1071 page_info->page_offset + hdr_len; 1072 - skb_shinfo(skb)->frags[0].size = curr_frag_len - hdr_len; 1072 + skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len); 1073 1073 skb->data_len = curr_frag_len - hdr_len; 1074 1074 skb->truesize += rx_frag_size; 1075 1075 skb->tail += hdr_len; ··· 1095 1095 skb_frag_set_page(skb, j, page_info->page); 1096 1096 skb_shinfo(skb)->frags[j].page_offset = 1097 1097 page_info->page_offset; 1098 - skb_shinfo(skb)->frags[j].size = 0; 1098 + skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0); 1099 1099 skb_shinfo(skb)->nr_frags++; 1100 1100 } else { 1101 1101 put_page(page_info->page); 1102 1102 } 1103 1103 1104 - skb_shinfo(skb)->frags[j].size += curr_frag_len; 1104 + skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len); 1105 1105 skb->len += curr_frag_len; 1106 1106 skb->data_len += curr_frag_len; 1107 1107 skb->truesize += rx_frag_size; ··· 1176 1176 skb_frag_set_page(skb, j, page_info->page); 1177 1177 skb_shinfo(skb)->frags[j].page_offset = 1178 1178 page_info->page_offset; 1179 - skb_shinfo(skb)->frags[j].size = 0; 1179 + skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0); 1180 1180 } else { 1181 1181 put_page(page_info->page); 1182 1182 } 1183 - skb_shinfo(skb)->frags[j].size += curr_frag_len; 1183 + skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len); 1184 1184 skb->truesize += rx_frag_size; 1185 1185 remaining -= curr_frag_len; 1186 1186 index_inc(&rxcp->rxq_idx, rxq->len);
+2 -2
drivers/net/ethernet/ibm/ehea/ehea_main.c
··· 1676 1676 1677 1677 /* copy sg1entry data */ 1678 1678 sg1entry->l_key = lkey; 1679 - sg1entry->len = frag->size; 1679 + sg1entry->len = skb_frag_size(frag); 1680 1680 sg1entry->vaddr = 1681 1681 ehea_map_vaddr(skb_frag_address(frag)); 1682 1682 swqe->descriptors++; ··· 1689 1689 sgentry = &sg_list[i - sg1entry_contains_frag_data]; 1690 1690 1691 1691 sgentry->l_key = lkey; 1692 - sgentry->len = frag->size; 1692 + sgentry->len = frag_size(frag); 1693 1693 sgentry->vaddr = ehea_map_vaddr(skb_frag_address(frag)); 1694 1694 swqe->descriptors++; 1695 1695 }
+1 -1
drivers/net/ethernet/ibm/emac/core.c
··· 1453 1453 /* skb fragments */ 1454 1454 for (i = 0; i < nr_frags; ++i) { 1455 1455 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 1456 - len = frag->size; 1456 + len = skb_frag_size(frag); 1457 1457 1458 1458 if (unlikely(dev->tx_cnt + mal_tx_chunks(len) >= NUM_TX_BUFF)) 1459 1459 goto undo_frame;
+3 -3
drivers/net/ethernet/ibm/ibmveth.c
··· 1014 1014 1015 1015 /* Map the frags */ 1016 1016 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1017 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1017 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1018 1018 1019 1019 dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0, 1020 - frag->size, DMA_TO_DEVICE); 1020 + skb_frag_size(frag), DMA_TO_DEVICE); 1021 1021 1022 1022 if (dma_mapping_error(&adapter->vdev->dev, dma_addr)) 1023 1023 goto map_failed_frags; 1024 1024 1025 - descs[i+1].fields.flags_len = desc_flags | frag->size; 1025 + descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag); 1026 1026 descs[i+1].fields.address = dma_addr; 1027 1027 } 1028 1028
+3 -3
drivers/net/ethernet/intel/e1000/e1000_main.c
··· 2894 2894 } 2895 2895 2896 2896 for (f = 0; f < nr_frags; f++) { 2897 - struct skb_frag_struct *frag; 2897 + const struct skb_frag_struct *frag; 2898 2898 2899 2899 frag = &skb_shinfo(skb)->frags[f]; 2900 - len = frag->size; 2900 + len = skb_frag_size(frag); 2901 2901 offset = 0; 2902 2902 2903 2903 while (len) { ··· 3183 3183 3184 3184 nr_frags = skb_shinfo(skb)->nr_frags; 3185 3185 for (f = 0; f < nr_frags; f++) 3186 - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, 3186 + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), 3187 3187 max_txd_pwr); 3188 3188 if (adapter->pcix_82544) 3189 3189 count += nr_frags;
+3 -3
drivers/net/ethernet/intel/e1000e/netdev.c
··· 4673 4673 } 4674 4674 4675 4675 for (f = 0; f < nr_frags; f++) { 4676 - struct skb_frag_struct *frag; 4676 + const struct skb_frag_struct *frag; 4677 4677 4678 4678 frag = &skb_shinfo(skb)->frags[f]; 4679 - len = frag->size; 4679 + len = skb_frag_size(frag); 4680 4680 offset = 0; 4681 4681 4682 4682 while (len) { ··· 4943 4943 4944 4944 nr_frags = skb_shinfo(skb)->nr_frags; 4945 4945 for (f = 0; f < nr_frags; f++) 4946 - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size, 4946 + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), 4947 4947 max_txd_pwr); 4948 4948 4949 4949 if (adapter->hw.mac.tx_pkt_filtering)
+1 -1
drivers/net/ethernet/intel/igb/igb_main.c
··· 4268 4268 i = 0; 4269 4269 } 4270 4270 4271 - size = frag->size; 4271 + size = skb_frag_size(frag); 4272 4272 data_len -= size; 4273 4273 4274 4274 dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
+2 -2
drivers/net/ethernet/intel/igbvf/netdev.c
··· 2045 2045 2046 2046 2047 2047 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) { 2048 - struct skb_frag_struct *frag; 2048 + const struct skb_frag_struct *frag; 2049 2049 2050 2050 count++; 2051 2051 i++; ··· 2053 2053 i = 0; 2054 2054 2055 2055 frag = &skb_shinfo(skb)->frags[f]; 2056 - len = frag->size; 2056 + len = skb_frag_size(frag); 2057 2057 2058 2058 buffer_info = &tx_ring->buffer_info[i]; 2059 2059 BUG_ON(len >= IGBVF_MAX_DATA_PER_TXD);
+2 -2
drivers/net/ethernet/intel/ixgb/ixgb_main.c
··· 1383 1383 } 1384 1384 1385 1385 for (f = 0; f < nr_frags; f++) { 1386 - struct skb_frag_struct *frag; 1386 + const struct skb_frag_struct *frag; 1387 1387 1388 1388 frag = &skb_shinfo(skb)->frags[f]; 1389 - len = frag->size; 1389 + len = skb_frag_size(frag); 1390 1390 offset = 0; 1391 1391 1392 1392 while (len) {
+2 -2
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 6545 6545 6546 6546 frag = &skb_shinfo(skb)->frags[f]; 6547 6547 #ifdef IXGBE_FCOE 6548 - size = min_t(unsigned int, data_len, frag->size); 6548 + size = min_t(unsigned int, data_len, skb_frag_size(frag)); 6549 6549 #else 6550 - size = frag->size; 6550 + size = skb_frag_size(frag); 6551 6551 #endif 6552 6552 data_len -= size; 6553 6553 f++;
+3 -3
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
··· 2912 2912 } 2913 2913 2914 2914 for (f = 0; f < nr_frags; f++) { 2915 - struct skb_frag_struct *frag; 2915 + const struct skb_frag_struct *frag; 2916 2916 2917 2917 frag = &skb_shinfo(skb)->frags[f]; 2918 - len = min((unsigned int)frag->size, total); 2918 + len = min((unsigned int)skb_frag_size(frag), total); 2919 2919 offset = 0; 2920 2920 2921 2921 while (len) { ··· 3096 3096 3097 3097 count += TXD_USE_COUNT(skb_headlen(skb)); 3098 3098 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 3099 - count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 3099 + count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f])); 3100 3100 3101 3101 if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) { 3102 3102 adapter->tx_busy++;
+2 -2
drivers/net/ethernet/jme.c
··· 1920 1920 u8 hidma = jme->dev->features & NETIF_F_HIGHDMA; 1921 1921 int i, nr_frags = skb_shinfo(skb)->nr_frags; 1922 1922 int mask = jme->tx_ring_mask; 1923 - struct skb_frag_struct *frag; 1923 + const struct skb_frag_struct *frag; 1924 1924 u32 len; 1925 1925 1926 1926 for (i = 0 ; i < nr_frags ; ++i) { ··· 1930 1930 1931 1931 jme_fill_tx_map(jme->pdev, ctxdesc, ctxbi, 1932 1932 skb_frag_page(frag), 1933 - frag->page_offset, frag->size, hidma); 1933 + frag->page_offset, skb_frag_size(frag), hidma); 1934 1934 } 1935 1935 1936 1936 len = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
+5 -4
drivers/net/ethernet/marvell/mv643xx_eth.c
··· 713 713 int frag; 714 714 715 715 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 716 - skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; 717 - if (fragp->size <= 8 && fragp->page_offset & 7) 716 + const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; 717 + 718 + if (skb_frag_size(fragp) <= 8 && fragp->page_offset & 7) 718 719 return 1; 719 720 } 720 721 ··· 752 751 } 753 752 754 753 desc->l4i_chk = 0; 755 - desc->byte_cnt = this_frag->size; 754 + desc->byte_cnt = skb_frag_size(this_frag); 756 755 desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent, 757 756 this_frag, 0, 758 - this_frag->size, 757 + skb_frag_size(this_frag), 759 758 DMA_TO_DEVICE); 760 759 } 761 760 }
+4 -4
drivers/net/ethernet/marvell/skge.c
··· 2770 2770 2771 2771 control |= BMU_STFWD; 2772 2772 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2773 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2773 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2774 2774 2775 2775 map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, 2776 - frag->size, DMA_TO_DEVICE); 2776 + skb_frag_size(frag), DMA_TO_DEVICE); 2777 2777 2778 2778 e = e->next; 2779 2779 e->skb = skb; ··· 2783 2783 tf->dma_lo = map; 2784 2784 tf->dma_hi = (u64) map >> 32; 2785 2785 dma_unmap_addr_set(e, mapaddr, map); 2786 - dma_unmap_len_set(e, maplen, frag->size); 2786 + dma_unmap_len_set(e, maplen, skb_frag_size(frag)); 2787 2787 2788 - tf->control = BMU_OWN | BMU_SW | control | frag->size; 2788 + tf->control = BMU_OWN | BMU_SW | control | skb_frag_size(frag); 2789 2789 } 2790 2790 tf->control |= BMU_EOF | BMU_IRQ_EOF; 2791 2791 }
+8 -8
drivers/net/ethernet/marvell/sky2.c
··· 1225 1225 dma_unmap_len_set(re, data_size, size); 1226 1226 1227 1227 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1228 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1228 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1229 1229 1230 1230 re->frag_addr[i] = skb_frag_dma_map(&pdev->dev, frag, 0, 1231 - frag->size, 1231 + skb_frag_size(frag), 1232 1232 DMA_FROM_DEVICE); 1233 1233 1234 1234 if (dma_mapping_error(&pdev->dev, re->frag_addr[i])) ··· 1239 1239 map_page_error: 1240 1240 while (--i >= 0) { 1241 1241 pci_unmap_page(pdev, re->frag_addr[i], 1242 - skb_shinfo(skb)->frags[i].size, 1242 + skb_frag_size(&skb_shinfo(skb)->frags[i]), 1243 1243 PCI_DMA_FROMDEVICE); 1244 1244 } 1245 1245 ··· 1263 1263 1264 1264 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) 1265 1265 pci_unmap_page(pdev, re->frag_addr[i], 1266 - skb_shinfo(skb)->frags[i].size, 1266 + skb_frag_size(&skb_shinfo(skb)->frags[i]), 1267 1267 PCI_DMA_FROMDEVICE); 1268 1268 } 1269 1269 ··· 1936 1936 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1937 1937 1938 1938 mapping = skb_frag_dma_map(&hw->pdev->dev, frag, 0, 1939 - frag->size, DMA_TO_DEVICE); 1939 + skb_frag_size(frag), DMA_TO_DEVICE); 1940 1940 1941 1941 if (dma_mapping_error(&hw->pdev->dev, mapping)) 1942 1942 goto mapping_unwind; ··· 1952 1952 re = sky2->tx_ring + slot; 1953 1953 re->flags = TX_MAP_PAGE; 1954 1954 dma_unmap_addr_set(re, mapaddr, mapping); 1955 - dma_unmap_len_set(re, maplen, frag->size); 1955 + dma_unmap_len_set(re, maplen, skb_frag_size(frag)); 1956 1956 1957 1957 le = get_tx_le(sky2, &slot); 1958 1958 le->addr = cpu_to_le32(lower_32_bits(mapping)); 1959 - le->length = cpu_to_le16(frag->size); 1959 + le->length = cpu_to_le16(skb_frag_size(frag)); 1960 1960 le->ctrl = ctrl; 1961 1961 le->opcode = OP_BUFFER | HW_OWNER; 1962 1962 } ··· 2484 2484 } else { 2485 2485 size = min(length, (unsigned) PAGE_SIZE); 2486 2486 2487 - frag->size = size; 2487 + skb_frag_size_set(frag, size); 2488 2488 skb->data_len += size; 2489 2489 skb->truesize += PAGE_SIZE; 2490 2490 skb->len += size;
+7 -7
drivers/net/ethernet/mellanox/mlx4/en_rx.c
··· 135 135 136 136 /* Set size and memtype fields */ 137 137 for (i = 0; i < priv->num_frags; i++) { 138 - skb_frags[i].size = priv->frag_info[i].frag_size; 138 + skb_frag_size_set(&skb_frags[i], priv->frag_info[i].frag_size); 139 139 rx_desc->data[i].byte_count = 140 140 cpu_to_be32(priv->frag_info[i].frag_size); 141 141 rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key); ··· 194 194 dma = be64_to_cpu(rx_desc->data[nr].addr); 195 195 196 196 en_dbg(DRV, priv, "Unmapping buffer at dma:0x%llx\n", (u64) dma); 197 - pci_unmap_single(mdev->pdev, dma, skb_frags[nr].size, 197 + pci_unmap_single(mdev->pdev, dma, skb_frag_size(&skb_frags[nr]), 198 198 PCI_DMA_FROMDEVICE); 199 199 put_page(skb_frags[nr].page); 200 200 } ··· 421 421 422 422 /* Save page reference in skb */ 423 423 skb_frags_rx[nr].page = skb_frags[nr].page; 424 - skb_frags_rx[nr].size = skb_frags[nr].size; 424 + skb_frag_size_set(&skb_frags_rx[nr], skb_frag_size(&skb_frags[nr])); 425 425 skb_frags_rx[nr].page_offset = skb_frags[nr].page_offset; 426 426 dma = be64_to_cpu(rx_desc->data[nr].addr); 427 427 ··· 430 430 goto fail; 431 431 432 432 /* Unmap buffer */ 433 - pci_unmap_single(mdev->pdev, dma, skb_frags_rx[nr].size, 433 + pci_unmap_single(mdev->pdev, dma, skb_frag_size(&skb_frags_rx[nr]), 434 434 PCI_DMA_FROMDEVICE); 435 435 } 436 436 /* Adjust size of last fragment to match actual length */ 437 437 if (nr > 0) 438 - skb_frags_rx[nr - 1].size = length - 439 - priv->frag_info[nr - 1].frag_prefix_size; 438 + skb_frag_size_set(&skb_frags_rx[nr - 1], 439 + length - priv->frag_info[nr - 1].frag_prefix_size); 440 440 return nr; 441 441 442 442 fail: ··· 506 506 skb_shinfo(skb)->frags[0].page_offset += HEADER_COPY_SIZE; 507 507 508 508 /* Adjust size of first fragment */ 509 - skb_shinfo(skb)->frags[0].size -= HEADER_COPY_SIZE; 509 + skb_frag_size_sub(&skb_shinfo(skb)->frags[0], HEADER_COPY_SIZE); 510 510 skb->data_len = length - HEADER_COPY_SIZE; 511 511 } 512 512 return skb;
+6 -6
drivers/net/ethernet/mellanox/mlx4/en_tx.c
··· 226 226 frag = &skb_shinfo(skb)->frags[i]; 227 227 pci_unmap_page(mdev->pdev, 228 228 (dma_addr_t) be64_to_cpu(data[i].addr), 229 - frag->size, PCI_DMA_TODEVICE); 229 + skb_frag_size(frag), PCI_DMA_TODEVICE); 230 230 } 231 231 } 232 232 /* Stamp the freed descriptor */ ··· 256 256 frag = &skb_shinfo(skb)->frags[i]; 257 257 pci_unmap_page(mdev->pdev, 258 258 (dma_addr_t) be64_to_cpu(data->addr), 259 - frag->size, PCI_DMA_TODEVICE); 259 + skb_frag_size(frag), PCI_DMA_TODEVICE); 260 260 ++data; 261 261 } 262 262 } ··· 550 550 skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb)); 551 551 if (skb_shinfo(skb)->nr_frags) 552 552 memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr, 553 - skb_shinfo(skb)->frags[0].size); 553 + skb_frag_size(&skb_shinfo(skb)->frags[0])); 554 554 555 555 } else { 556 556 inl->byte_count = cpu_to_be32(1 << 31 | spc); ··· 570 570 skb_headlen(skb) - spc); 571 571 if (skb_shinfo(skb)->nr_frags) 572 572 memcpy(((void *)(inl + 1)) + skb_headlen(skb) - spc, 573 - fragptr, skb_shinfo(skb)->frags[0].size); 573 + fragptr, skb_frag_size(&skb_shinfo(skb)->frags[0])); 574 574 } 575 575 576 576 wmb(); ··· 757 757 for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) { 758 758 frag = &skb_shinfo(skb)->frags[i]; 759 759 dma = pci_map_page(mdev->dev->pdev, frag->page, frag->page_offset, 760 - frag->size, PCI_DMA_TODEVICE); 760 + skb_frag_size(frag), PCI_DMA_TODEVICE); 761 761 data->addr = cpu_to_be64(dma); 762 762 data->lkey = cpu_to_be32(mdev->mr.key); 763 763 wmb(); 764 - data->byte_count = cpu_to_be32(frag->size); 764 + data->byte_count = cpu_to_be32(skb_frag_size(frag)); 765 765 --data; 766 766 } 767 767
+1 -1
drivers/net/ethernet/micrel/ksz884x.c
··· 4700 4700 ++hw->tx_int_cnt; 4701 4701 4702 4702 dma_buf = DMA_BUFFER(desc); 4703 - dma_buf->len = this_frag->size; 4703 + dma_buf->len = skb_frag_size(this_frag); 4704 4704 4705 4705 dma_buf->dma = pci_map_single( 4706 4706 hw_priv->pdev,
+7 -7
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
··· 1216 1216 skb_frags = skb_shinfo(skb)->frags; 1217 1217 while (len > 0) { 1218 1218 memcpy(skb_frags, rx_frags, sizeof(*skb_frags)); 1219 - len -= rx_frags->size; 1219 + len -= skb_frag_size(rx_frags); 1220 1220 skb_frags++; 1221 1221 rx_frags++; 1222 1222 skb_shinfo(skb)->nr_frags++; ··· 1228 1228 * manually */ 1229 1229 skb_copy_to_linear_data(skb, va, hlen); 1230 1230 skb_shinfo(skb)->frags[0].page_offset += hlen; 1231 - skb_shinfo(skb)->frags[0].size -= hlen; 1231 + skb_frag_size_sub(&skb_shinfo(skb)->frags[0], hlen); 1232 1232 skb->data_len -= hlen; 1233 1233 skb->tail += hlen; 1234 1234 skb_pull(skb, MXGEFW_PAD); ··· 1345 1345 __skb_frag_set_page(&rx_frags[i], rx->info[idx].page); 1346 1346 rx_frags[i].page_offset = rx->info[idx].page_offset; 1347 1347 if (remainder < MYRI10GE_ALLOC_SIZE) 1348 - rx_frags[i].size = remainder; 1348 + skb_frag_size_set(&rx_frags[i], remainder); 1349 1349 else 1350 - rx_frags[i].size = MYRI10GE_ALLOC_SIZE; 1350 + skb_frag_size_set(&rx_frags[i], MYRI10GE_ALLOC_SIZE); 1351 1351 rx->cnt++; 1352 1352 idx = rx->cnt & rx->mask; 1353 1353 remainder -= MYRI10GE_ALLOC_SIZE; ··· 1355 1355 1356 1356 if (lro_enabled) { 1357 1357 rx_frags[0].page_offset += MXGEFW_PAD; 1358 - rx_frags[0].size -= MXGEFW_PAD; 1358 + skb_frag_size_sub(&rx_frags[0], MXGEFW_PAD); 1359 1359 len -= MXGEFW_PAD; 1360 1360 lro_receive_frags(&ss->rx_done.lro_mgr, rx_frags, 1361 1361 /* opaque, will come back in get_frag_header */ ··· 1382 1382 1383 1383 /* Attach the pages to the skb, and trim off any padding */ 1384 1384 myri10ge_rx_skb_build(skb, va, rx_frags, len, hlen); 1385 - if (skb_shinfo(skb)->frags[0].size <= 0) { 1385 + if (skb_frag_size(&skb_shinfo(skb)->frags[0]) <= 0) { 1386 1386 skb_frag_unref(skb, 0); 1387 1387 skb_shinfo(skb)->nr_frags = 0; 1388 1388 } ··· 2926 2926 idx = (count + tx->req) & tx->mask; 2927 2927 frag = &skb_shinfo(skb)->frags[frag_idx]; 2928 2928 frag_idx++; 2929 - len = frag->size; 2929 + len = skb_frag_size(frag); 2930 2930 bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len, 2931 2931 DMA_TO_DEVICE); 2932 2932 dma_unmap_addr_set(&tx->info[idx], bus, bus);
+2 -2
drivers/net/ethernet/natsemi/ns83820.c
··· 1161 1161 break; 1162 1162 1163 1163 buf = skb_frag_dma_map(&dev->pci_dev->dev, frag, 0, 1164 - frag->size, DMA_TO_DEVICE); 1164 + skb_frag_size(frag), DMA_TO_DEVICE); 1165 1165 dprintk("frag: buf=%08Lx page=%08lx offset=%08lx\n", 1166 1166 (long long)buf, (long) page_to_pfn(frag->page), 1167 1167 frag->page_offset); 1168 - len = frag->size; 1168 + len = skb_frag_size(frag); 1169 1169 frag++; 1170 1170 nr_frags--; 1171 1171 }
+6 -6
drivers/net/ethernet/neterion/s2io.c
··· 2350 2350 if (frg_cnt) { 2351 2351 txds++; 2352 2352 for (j = 0; j < frg_cnt; j++, txds++) { 2353 - skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 2353 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[j]; 2354 2354 if (!txds->Buffer_Pointer) 2355 2355 break; 2356 2356 pci_unmap_page(nic->pdev, 2357 2357 (dma_addr_t)txds->Buffer_Pointer, 2358 - frag->size, PCI_DMA_TODEVICE); 2358 + skb_frag_size(frag), PCI_DMA_TODEVICE); 2359 2359 } 2360 2360 } 2361 2361 memset(txdlp, 0, (sizeof(struct TxD) * fifo_data->max_txds)); ··· 4185 4185 frg_cnt = skb_shinfo(skb)->nr_frags; 4186 4186 /* For fragmented SKB. */ 4187 4187 for (i = 0; i < frg_cnt; i++) { 4188 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4188 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 4189 4189 /* A '0' length fragment will be ignored */ 4190 - if (!frag->size) 4190 + if (!skb_frag_size(frag)) 4191 4191 continue; 4192 4192 txdp++; 4193 4193 txdp->Buffer_Pointer = (u64)skb_frag_dma_map(&sp->pdev->dev, 4194 4194 frag, 0, 4195 - frag->size, 4195 + skb_frag_size(frag), 4196 4196 DMA_TO_DEVICE); 4197 - txdp->Control_1 = TXD_BUFFER0_SIZE(frag->size); 4197 + txdp->Control_1 = TXD_BUFFER0_SIZE(skb_frag_size(frag)); 4198 4198 if (offload_type == SKB_GSO_UDP) 4199 4199 txdp->Control_1 |= TXD_UFO_EN; 4200 4200 }
+6 -6
drivers/net/ethernet/neterion/vxge/vxge-main.c
··· 585 585 for (j = 0; j < frg_cnt; j++) { 586 586 pci_unmap_page(fifo->pdev, 587 587 txd_priv->dma_buffers[i++], 588 - frag->size, PCI_DMA_TODEVICE); 588 + skb_frag_size(frag), PCI_DMA_TODEVICE); 589 589 frag += 1; 590 590 } 591 591 ··· 920 920 frag = &skb_shinfo(skb)->frags[0]; 921 921 for (i = 0; i < frg_cnt; i++) { 922 922 /* ignore 0 length fragment */ 923 - if (!frag->size) 923 + if (!skb_frag_size(frag)) 924 924 continue; 925 925 926 926 dma_pointer = (u64)skb_frag_dma_map(&fifo->pdev->dev, frag, 927 - 0, frag->size, 927 + 0, skb_frag_size(frag), 928 928 DMA_TO_DEVICE); 929 929 930 930 if (unlikely(dma_mapping_error(&fifo->pdev->dev, dma_pointer))) ··· 936 936 937 937 txdl_priv->dma_buffers[j] = dma_pointer; 938 938 vxge_hw_fifo_txdl_buffer_set(fifo_hw, dtr, j++, dma_pointer, 939 - frag->size); 939 + skb_frag_size(frag)); 940 940 frag += 1; 941 941 } 942 942 ··· 979 979 980 980 for (; j < i; j++) { 981 981 pci_unmap_page(fifo->pdev, txdl_priv->dma_buffers[j], 982 - frag->size, PCI_DMA_TODEVICE); 982 + skb_frag_size(frag), PCI_DMA_TODEVICE); 983 983 frag += 1; 984 984 } 985 985 ··· 1050 1050 1051 1051 for (j = 0; j < frg_cnt; j++) { 1052 1052 pci_unmap_page(fifo->pdev, txd_priv->dma_buffers[i++], 1053 - frag->size, PCI_DMA_TODEVICE); 1053 + skb_frag_size(frag), PCI_DMA_TODEVICE); 1054 1054 frag += 1; 1055 1055 } 1056 1056
+11 -7
drivers/net/ethernet/nvidia/forcedeth.c
··· 2099 2099 2100 2100 /* add fragments to entries count */ 2101 2101 for (i = 0; i < fragments; i++) { 2102 - entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + 2103 - ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2102 + u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2103 + 2104 + entries += (size >> NV_TX2_TSO_MAX_SHIFT) + 2105 + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2104 2106 } 2105 2107 2106 2108 spin_lock_irqsave(&np->lock, flags); ··· 2140 2138 2141 2139 /* setup the fragments */ 2142 2140 for (i = 0; i < fragments; i++) { 2143 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2144 - u32 size = frag->size; 2141 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2142 + u32 size = skb_frag_size(frag); 2145 2143 offset = 0; 2146 2144 2147 2145 do { ··· 2213 2211 2214 2212 /* add fragments to entries count */ 2215 2213 for (i = 0; i < fragments; i++) { 2216 - entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + 2217 - ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2214 + u32 size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2215 + 2216 + entries += (size >> NV_TX2_TSO_MAX_SHIFT) + 2217 + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2218 2218 } 2219 2219 2220 2220 spin_lock_irqsave(&np->lock, flags); ··· 2257 2253 /* setup the fragments */ 2258 2254 for (i = 0; i < fragments; i++) { 2259 2255 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2260 - u32 size = frag->size; 2256 + u32 size = skb_frag_size(frag); 2261 2257 offset = 0; 2262 2258 2263 2259 do {
+4 -4
drivers/net/ethernet/pasemi/pasemi_mac.c
··· 300 300 pci_unmap_single(pdev, dmas[0], skb_headlen(skb), PCI_DMA_TODEVICE); 301 301 302 302 for (f = 0; f < nfrags; f++) { 303 - skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 303 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; 304 304 305 - pci_unmap_page(pdev, dmas[f+1], frag->size, PCI_DMA_TODEVICE); 305 + pci_unmap_page(pdev, dmas[f+1], skb_frag_size(frag), PCI_DMA_TODEVICE); 306 306 } 307 307 dev_kfree_skb_irq(skb); 308 308 ··· 1506 1506 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1507 1507 1508 1508 map[i + 1] = skb_frag_dma_map(&mac->dma_pdev->dev, frag, 0, 1509 - frag->size, DMA_TO_DEVICE); 1510 - map_size[i+1] = frag->size; 1509 + skb_frag_size(frag), DMA_TO_DEVICE); 1510 + map_size[i+1] = skb_frag_size(frag); 1511 1511 if (dma_mapping_error(&mac->dma_pdev->dev, map[i + 1])) { 1512 1512 nfrags = i; 1513 1513 goto out_err_nolock;
+3 -3
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
··· 1905 1905 frag = &skb_shinfo(skb)->frags[i]; 1906 1906 nf = &pbuf->frag_array[i+1]; 1907 1907 1908 - map = skb_frag_dma_map(&pdev->dev, frag, 0, frag->size, 1908 + map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), 1909 1909 DMA_TO_DEVICE); 1910 1910 if (dma_mapping_error(&pdev->dev, map)) 1911 1911 goto unwind; 1912 1912 1913 1913 nf->dma = map; 1914 - nf->length = frag->size; 1914 + nf->length = skb_frag_size(frag); 1915 1915 } 1916 1916 1917 1917 return 0; ··· 1962 1962 1963 1963 for (i = 0; i < (frag_count - NETXEN_MAX_FRAGS_PER_TX); i++) { 1964 1964 frag = &skb_shinfo(skb)->frags[i]; 1965 - delta += frag->size; 1965 + delta += skb_frag_size(frag); 1966 1966 } 1967 1967 1968 1968 if (!__pskb_pull_tail(skb, delta))
+3 -3
drivers/net/ethernet/qlogic/qla3xxx.c
··· 2388 2388 seg++; 2389 2389 } 2390 2390 2391 - map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, frag->size, 2391 + map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag), 2392 2392 DMA_TO_DEVICE); 2393 2393 2394 2394 err = dma_mapping_error(&qdev->pdev->dev, map); ··· 2401 2401 2402 2402 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2403 2403 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2404 - oal_entry->len = cpu_to_le32(frag->size); 2404 + oal_entry->len = cpu_to_le32(skb_frag_size(frag)); 2405 2405 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2406 - dma_unmap_len_set(&tx_cb->map[seg], maplen, frag->size); 2406 + dma_unmap_len_set(&tx_cb->map[seg], maplen, skb_frag_size(frag)); 2407 2407 } 2408 2408 /* Terminate the last segment. */ 2409 2409 oal_entry->len |= cpu_to_le32(OAL_LAST_ENTRY);
+3 -3
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
··· 2135 2135 frag = &skb_shinfo(skb)->frags[i]; 2136 2136 nf = &pbuf->frag_array[i+1]; 2137 2137 2138 - map = skb_frag_dma_map(&pdev->dev, frag, 0, frag->size, 2138 + map = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), 2139 2139 DMA_TO_DEVICE); 2140 2140 if (dma_mapping_error(&pdev->dev, map)) 2141 2141 goto unwind; 2142 2142 2143 2143 nf->dma = map; 2144 - nf->length = frag->size; 2144 + nf->length = skb_frag_size(frag); 2145 2145 } 2146 2146 2147 2147 return 0; ··· 2221 2221 if (!skb_is_gso(skb) && frag_count > QLCNIC_MAX_FRAGS_PER_TX) { 2222 2222 2223 2223 for (i = 0; i < (frag_count - QLCNIC_MAX_FRAGS_PER_TX); i++) 2224 - delta += skb_shinfo(skb)->frags[i].size; 2224 + delta += skb_frag_size(&skb_shinfo(skb)->frags[i]); 2225 2225 2226 2226 if (!__pskb_pull_tail(skb, delta)) 2227 2227 goto drop_packet;
+3 -3
drivers/net/ethernet/qlogic/qlge/qlge_main.c
··· 1431 1431 map_idx++; 1432 1432 } 1433 1433 1434 - map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, frag->size, 1434 + map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag), 1435 1435 DMA_TO_DEVICE); 1436 1436 1437 1437 err = dma_mapping_error(&qdev->pdev->dev, map); ··· 1443 1443 } 1444 1444 1445 1445 tbd->addr = cpu_to_le64(map); 1446 - tbd->len = cpu_to_le32(frag->size); 1446 + tbd->len = cpu_to_le32(skb_frag_size(frag)); 1447 1447 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); 1448 1448 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, 1449 - frag->size); 1449 + skb_frag_size(frag)); 1450 1450 1451 1451 } 1452 1452 /* Save the number of segments we've mapped. */
+2 -2
drivers/net/ethernet/realtek/8139cp.c
··· 777 777 entry = NEXT_TX(entry); 778 778 779 779 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 780 - skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; 780 + const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; 781 781 u32 len; 782 782 u32 ctrl; 783 783 dma_addr_t mapping; 784 784 785 - len = this_frag->size; 785 + len = skb_frag_size(this_frag); 786 786 mapping = dma_map_single(&cp->pdev->dev, 787 787 skb_frag_address(this_frag), 788 788 len, PCI_DMA_TODEVICE);
+2 -2
drivers/net/ethernet/realtek/r8169.c
··· 5413 5413 5414 5414 entry = tp->cur_tx; 5415 5415 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { 5416 - skb_frag_t *frag = info->frags + cur_frag; 5416 + const skb_frag_t *frag = info->frags + cur_frag; 5417 5417 dma_addr_t mapping; 5418 5418 u32 status, len; 5419 5419 void *addr; ··· 5421 5421 entry = (entry + 1) % NUM_TX_DESC; 5422 5422 5423 5423 txd = tp->TxDescArray + entry; 5424 - len = frag->size; 5424 + len = skb_frag_size(frag); 5425 5425 addr = skb_frag_address(frag); 5426 5426 mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE); 5427 5427 if (unlikely(dma_mapping_error(d, mapping))) {
+1 -1
drivers/net/ethernet/sfc/rx.c
··· 481 481 skb_frag_set_page(skb, 0, page); 482 482 skb_shinfo(skb)->frags[0].page_offset = 483 483 efx_rx_buf_offset(efx, rx_buf); 484 - skb_shinfo(skb)->frags[0].size = rx_buf->len; 484 + skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx_buf->len); 485 485 skb_shinfo(skb)->nr_frags = 1; 486 486 487 487 skb->len = rx_buf->len;
+4 -4
drivers/net/ethernet/sfc/tx.c
··· 238 238 if (i >= skb_shinfo(skb)->nr_frags) 239 239 break; 240 240 fragment = &skb_shinfo(skb)->frags[i]; 241 - len = fragment->size; 241 + len = skb_frag_size(fragment); 242 242 i++; 243 243 /* Map for DMA */ 244 244 unmap_single = false; ··· 926 926 skb_frag_t *frag) 927 927 { 928 928 st->unmap_addr = skb_frag_dma_map(&efx->pci_dev->dev, frag, 0, 929 - frag->size, DMA_TO_DEVICE); 929 + skb_frag_size(frag), DMA_TO_DEVICE); 930 930 if (likely(!dma_mapping_error(&efx->pci_dev->dev, st->unmap_addr))) { 931 931 st->unmap_single = false; 932 - st->unmap_len = frag->size; 933 - st->in_len = frag->size; 932 + st->unmap_len = skb_frag_size(frag); 933 + st->in_len = skb_frag_size(frag); 934 934 st->dma_addr = st->unmap_addr; 935 935 return 0; 936 936 }
+2 -2
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 1106 1106 } 1107 1107 1108 1108 for (i = 0; i < nfrags; i++) { 1109 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1110 - int len = frag->size; 1109 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1110 + int len = skb_frag_size(frag); 1111 1111 1112 1112 entry = (++priv->cur_tx) % txsize; 1113 1113 desc = priv->dma_tx + entry;
+4 -4
drivers/net/ethernet/sun/cassini.c
··· 2051 2051 __skb_frag_set_page(frag, page->buffer); 2052 2052 __skb_frag_ref(frag); 2053 2053 frag->page_offset = off; 2054 - frag->size = hlen - swivel; 2054 + skb_frag_size_set(frag, hlen - swivel); 2055 2055 2056 2056 /* any more data? */ 2057 2057 if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { ··· 2075 2075 __skb_frag_set_page(frag, page->buffer); 2076 2076 __skb_frag_ref(frag); 2077 2077 frag->page_offset = 0; 2078 - frag->size = hlen; 2078 + skb_frag_size_set(frag, hlen); 2079 2079 RX_USED_ADD(page, hlen + cp->crc_size); 2080 2080 } 2081 2081 ··· 2826 2826 entry = TX_DESC_NEXT(ring, entry); 2827 2827 2828 2828 for (frag = 0; frag < nr_frags; frag++) { 2829 - skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; 2829 + const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; 2830 2830 2831 - len = fragp->size; 2831 + len = skb_frag_size(fragp); 2832 2832 mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len, 2833 2833 DMA_TO_DEVICE); 2834 2834
+3 -3
drivers/net/ethernet/sun/niu.c
··· 3594 3594 tb = &rp->tx_buffs[idx]; 3595 3595 BUG_ON(tb->skb != NULL); 3596 3596 np->ops->unmap_page(np->device, tb->mapping, 3597 - skb_shinfo(skb)->frags[i].size, 3597 + skb_frag_size(&skb_shinfo(skb)->frags[i]), 3598 3598 DMA_TO_DEVICE); 3599 3599 idx = NEXT_TX(rp, idx); 3600 3600 } ··· 6727 6727 } 6728 6728 6729 6729 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 6730 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 6730 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 6731 6731 6732 - len = frag->size; 6732 + len = skb_frag_size(frag); 6733 6733 mapping = np->ops->map_page(np->device, skb_frag_page(frag), 6734 6734 frag->page_offset, len, 6735 6735 DMA_TO_DEVICE);
+2 -2
drivers/net/ethernet/sun/sungem.c
··· 1065 1065 entry = NEXT_TX(entry); 1066 1066 1067 1067 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 1068 - skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; 1068 + const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; 1069 1069 u32 len; 1070 1070 dma_addr_t mapping; 1071 1071 u64 this_ctrl; 1072 1072 1073 - len = this_frag->size; 1073 + len = skb_frag_size(this_frag); 1074 1074 mapping = skb_frag_dma_map(&gp->pdev->dev, this_frag, 1075 1075 0, len, DMA_TO_DEVICE); 1076 1076 this_ctrl = ctrl;
+2 -2
drivers/net/ethernet/sun/sunhme.c
··· 2305 2305 entry = NEXT_TX(entry); 2306 2306 2307 2307 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 2308 - skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; 2308 + const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; 2309 2309 u32 len, mapping, this_txflags; 2310 2310 2311 - len = this_frag->size; 2311 + len = skb_frag_size(this_frag); 2312 2312 mapping = skb_frag_dma_map(hp->dma_dev, this_frag, 2313 2313 0, len, DMA_TO_DEVICE); 2314 2314 this_txflags = tx_flags;
+3 -3
drivers/net/ethernet/tehuti/tehuti.c
··· 1493 1493 bdx_tx_db_inc_wptr(db); 1494 1494 1495 1495 for (i = 0; i < nr_frags; i++) { 1496 - struct skb_frag_struct *frag; 1496 + const struct skb_frag_struct *frag; 1497 1497 1498 1498 frag = &skb_shinfo(skb)->frags[i]; 1499 - db->wptr->len = frag->size; 1499 + db->wptr->len = skb_frag_size(frag); 1500 1500 db->wptr->addr.dma = skb_frag_dma_map(&priv->pdev->dev, frag, 1501 - 0, frag->size, 1501 + 0, skb_frag_size(frag), 1502 1502 DMA_TO_DEVICE); 1503 1503 1504 1504 pbl++;
+1 -1
drivers/net/ethernet/tile/tilepro.c
··· 1713 1713 cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset; 1714 1714 frags[n].cpa_lo = cpa; 1715 1715 frags[n].cpa_hi = cpa >> 32; 1716 - frags[n].length = f->size; 1716 + frags[n].length = skb_frag_size(f); 1717 1717 frags[n].hash_for_home = hash_for_home; 1718 1718 n++; 1719 1719 }
+3 -3
drivers/net/ethernet/tundra/tsi108_eth.c
··· 709 709 data->txring[tx].len = skb_headlen(skb); 710 710 misc |= TSI108_TX_SOF; 711 711 } else { 712 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 712 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 713 713 714 714 data->txring[tx].buf0 = skb_frag_dma_map(NULL, frag, 715 715 0, 716 - frag->size, 716 + skb_frag_size(frag), 717 717 DMA_TO_DEVICE); 718 - data->txring[tx].len = frag->size; 718 + data->txring[tx].len = skb_frag_size(frag); 719 719 } 720 720 721 721 if (i == frags - 1)
+3 -3
drivers/net/ethernet/via/via-velocity.c
··· 2554 2554 2555 2555 /* Handle fragments */ 2556 2556 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2557 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2557 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2558 2558 2559 2559 tdinfo->skb_dma[i + 1] = skb_frag_dma_map(&vptr->pdev->dev, 2560 2560 frag, 0, 2561 - frag->size, 2561 + skb_frag_size(frag), 2562 2562 DMA_TO_DEVICE); 2563 2563 2564 2564 td_ptr->td_buf[i + 1].pa_low = cpu_to_le32(tdinfo->skb_dma[i + 1]); 2565 2565 td_ptr->td_buf[i + 1].pa_high = 0; 2566 - td_ptr->td_buf[i + 1].size = cpu_to_le16(frag->size); 2566 + td_ptr->td_buf[i + 1].size = cpu_to_le16(skb_frag_size(frag)); 2567 2567 } 2568 2568 tdinfo->nskb_dma = i + 1; 2569 2569
+2 -2
drivers/net/ethernet/xilinx/ll_temac_main.c
··· 716 716 cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 717 717 cur_p->phys = dma_map_single(ndev->dev.parent, 718 718 skb_frag_address(frag), 719 - frag->size, DMA_TO_DEVICE); 720 - cur_p->len = frag->size; 719 + frag_size(frag), DMA_TO_DEVICE); 720 + cur_p->len = frag_size(frag); 721 721 cur_p->app0 = 0; 722 722 frag++; 723 723 }
+4 -4
drivers/net/virtio_net.c
··· 147 147 skb_frag_t *f; 148 148 149 149 f = &skb_shinfo(skb)->frags[i]; 150 - f->size = min((unsigned)PAGE_SIZE - offset, *len); 150 + skb_frag_size_set(f, min((unsigned)PAGE_SIZE - offset, *len)); 151 151 f->page_offset = offset; 152 152 __skb_frag_set_page(f, page); 153 153 154 - skb->data_len += f->size; 155 - skb->len += f->size; 154 + skb->data_len += skb_frag_size(f); 155 + skb->len += skb_frag_size(f); 156 156 skb_shinfo(skb)->nr_frags++; 157 - *len -= f->size; 157 + *len -= skb_frag_size(f); 158 158 } 159 159 160 160 static struct sk_buff *page_to_skb(struct virtnet_info *vi,
+6 -6
drivers/net/vmxnet3/vmxnet3_drv.c
··· 656 656 657 657 __skb_frag_set_page(frag, rbi->page); 658 658 frag->page_offset = 0; 659 - frag->size = rcd->len; 660 - skb->data_len += frag->size; 659 + skb_frag_size_set(frag, rcd->len); 660 + skb->data_len += rcd->len; 661 661 skb->truesize += PAGE_SIZE; 662 662 skb_shinfo(skb)->nr_frags++; 663 663 } ··· 745 745 } 746 746 747 747 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 748 - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 748 + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 749 749 750 750 tbi = tq->buf_info + tq->tx_ring.next2fill; 751 751 tbi->map_type = VMXNET3_MAP_PAGE; 752 752 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, 753 - 0, frag->size, 753 + 0, skb_frag_size(frag), 754 754 DMA_TO_DEVICE); 755 755 756 - tbi->len = frag->size; 756 + tbi->len = skb_frag_size(frag); 757 757 758 758 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 759 759 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 760 760 761 761 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); 762 - gdesc->dword[2] = cpu_to_le32(dw2 | frag->size); 762 + gdesc->dword[2] = cpu_to_le32(dw2 | skb_frag_size(frag)); 763 763 gdesc->dword[3] = 0; 764 764 765 765 dev_dbg(&adapter->netdev->dev,
+2 -2
drivers/net/xen-netback/netback.c
··· 334 334 count++; 335 335 336 336 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 337 - unsigned long size = skb_shinfo(skb)->frags[i].size; 337 + unsigned long size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 338 338 unsigned long bytes; 339 339 while (size > 0) { 340 340 BUG_ON(copy_off > MAX_BUFFER_OFFSET); ··· 526 526 for (i = 0; i < nr_frags; i++) { 527 527 netbk_gop_frag_copy(vif, skb, npo, 528 528 skb_frag_page(&skb_shinfo(skb)->frags[i]), 529 - skb_shinfo(skb)->frags[i].size, 529 + skb_frag_size(&skb_shinfo(skb)->frags[i]), 530 530 skb_shinfo(skb)->frags[i].page_offset, 531 531 &head); 532 532 }
+2 -2
drivers/net/xen-netfront.c
··· 467 467 468 468 tx->gref = np->grant_tx_ref[id] = ref; 469 469 tx->offset = frag->page_offset; 470 - tx->size = frag->size; 470 + tx->size = skb_frag_size(frag); 471 471 tx->flags = 0; 472 472 } 473 473 ··· 965 965 if (rx->status > len) { 966 966 skb_shinfo(skb)->frags[0].page_offset = 967 967 rx->offset + len; 968 - skb_shinfo(skb)->frags[0].size = rx->status - len; 968 + skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status - len); 969 969 skb->data_len = rx->status - len; 970 970 } else { 971 971 __skb_fill_page_desc(skb, 0, NULL, 0, 0);
+5 -5
drivers/scsi/cxgbi/libcxgbi.c
··· 1814 1814 copy = min(datalen, sglen); 1815 1815 if (i && page == frags[i - 1].page && 1816 1816 sgoffset + sg->offset == 1817 - frags[i - 1].page_offset + frags[i - 1].size) { 1818 - frags[i - 1].size += copy; 1817 + frags[i - 1].page_offset + skb_frag_size(&frags[i - 1])) { 1818 + skb_frag_size_add(&frags[i - 1], copy); 1819 1819 } else { 1820 1820 if (i >= frag_max) { 1821 1821 pr_warn("too many pages %u, dlen %u.\n", ··· 1825 1825 1826 1826 frags[i].page = page; 1827 1827 frags[i].page_offset = sg->offset + sgoffset; 1828 - frags[i].size = copy; 1828 + skb_frag_size_set(&frags[i], copy); 1829 1829 i++; 1830 1830 } 1831 1831 datalen -= copy; ··· 1951 1951 char *src = kmap_atomic(frag->page, 1952 1952 KM_SOFTIRQ0); 1953 1953 1954 - memcpy(dst, src+frag->page_offset, frag->size); 1955 - dst += frag->size; 1954 + memcpy(dst, src+frag->page_offset, skb_frag_size(frag)); 1955 + dst += skb_frag_size(frag); 1956 1956 kunmap_atomic(src, KM_SOFTIRQ0); 1957 1957 } 1958 1958 if (padlen) {
+1 -1
drivers/scsi/fcoe/fcoe_transport.c
··· 105 105 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 106 106 frag = &skb_shinfo(skb)->frags[i]; 107 107 off = frag->page_offset; 108 - len = frag->size; 108 + len = skb_frag_size(frag); 109 109 while (len > 0) { 110 110 clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK)); 111 111 data = kmap_atomic(
+2 -2
drivers/staging/hv/netvsc_drv.c
··· 169 169 170 170 /* Additional fragments are after SKB data */ 171 171 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 172 - skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 172 + const skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 173 173 174 174 packet->page_buf[i+2].pfn = page_to_pfn(skb_frag_page(f)); 175 175 packet->page_buf[i+2].offset = f->page_offset; 176 - packet->page_buf[i+2].len = f->size; 176 + packet->page_buf[i+2].len = skb_frag_size(f); 177 177 } 178 178 179 179 /* Set the completion routine */
+24 -4
include/linux/skbuff.h
··· 150 150 #endif 151 151 }; 152 152 153 + static inline unsigned int skb_frag_size(const skb_frag_t *frag) 154 + { 155 + return frag->size; 156 + } 157 + 158 + static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size) 159 + { 160 + frag->size = size; 161 + } 162 + 163 + static inline void skb_frag_size_add(skb_frag_t *frag, int delta) 164 + { 165 + frag->size += delta; 166 + } 167 + 168 + static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) 169 + { 170 + frag->size -= delta; 171 + } 172 + 153 173 #define HAVE_HW_TIME_STAMP 154 174 155 175 /** ··· 1152 1132 int i, len = 0; 1153 1133 1154 1134 for (i = (int)skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) 1155 - len += skb_shinfo(skb)->frags[i].size; 1135 + len += skb_frag_size(&skb_shinfo(skb)->frags[i]); 1156 1136 return len + skb_headlen(skb); 1157 1137 } 1158 1138 ··· 1176 1156 1177 1157 frag->page = page; 1178 1158 frag->page_offset = off; 1179 - frag->size = size; 1159 + skb_frag_size_set(frag, size); 1180 1160 } 1181 1161 1182 1162 /** ··· 1927 1907 const struct page *page, int off) 1928 1908 { 1929 1909 if (i) { 1930 - struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1910 + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1]; 1931 1911 1932 1912 return page == skb_frag_page(frag) && 1933 - off == frag->page_offset + frag->size; 1913 + off == frag->page_offset + skb_frag_size(frag); 1934 1914 } 1935 1915 return 0; 1936 1916 }
+2 -3
net/appletalk/ddp.c
··· 951 951 /* checksum stuff in frags */ 952 952 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 953 953 int end; 954 - 954 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 955 955 WARN_ON(start > offset + len); 956 956 957 - end = start + skb_shinfo(skb)->frags[i].size; 957 + end = start + skb_frag_size(frag); 958 958 if ((copy = end - offset) > 0) { 959 959 u8 *vaddr; 960 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 961 960 962 961 if (copy > len) 963 962 copy = len;
+8 -8
net/core/datagram.c
··· 324 324 /* Copy paged appendix. Hmm... why does this look so complicated? */ 325 325 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 326 326 int end; 327 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 327 328 328 329 WARN_ON(start > offset + len); 329 330 330 - end = start + skb_shinfo(skb)->frags[i].size; 331 + end = start + skb_frag_size(frag); 331 332 if ((copy = end - offset) > 0) { 332 333 int err; 333 334 u8 *vaddr; 334 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 335 335 struct page *page = skb_frag_page(frag); 336 336 337 337 if (copy > len) ··· 410 410 /* Copy paged appendix. Hmm... why does this look so complicated? */ 411 411 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 412 412 int end; 413 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 413 414 414 415 WARN_ON(start > offset + len); 415 416 416 - end = start + skb_shinfo(skb)->frags[i].size; 417 + end = start + skb_frag_size(frag); 417 418 if ((copy = end - offset) > 0) { 418 419 int err; 419 420 u8 *vaddr; 420 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 421 421 struct page *page = skb_frag_page(frag); 422 422 423 423 if (copy > len) ··· 500 500 /* Copy paged appendix. Hmm... why does this look so complicated? */ 501 501 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 502 502 int end; 503 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 503 504 504 505 WARN_ON(start > offset + len); 505 506 506 - end = start + skb_shinfo(skb)->frags[i].size; 507 + end = start + skb_frag_size(frag); 507 508 if ((copy = end - offset) > 0) { 508 509 int err; 509 510 u8 *vaddr; 510 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 511 511 struct page *page = skb_frag_page(frag); 512 512 513 513 if (copy > len) ··· 585 585 586 586 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 587 587 int end; 588 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 588 589 589 590 WARN_ON(start > offset + len); 590 591 591 - end = start + skb_shinfo(skb)->frags[i].size; 592 + end = start + skb_frag_size(frag); 592 593 if ((copy = end - offset) > 0) { 593 594 __wsum csum2; 594 595 int err = 0; 595 596 u8 *vaddr; 596 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 597 597 struct page *page = skb_frag_page(frag); 598 598 599 599 if (copy > len)
+3 -3
net/core/dev.c
··· 3489 3489 skb->data_len -= grow; 3490 3490 3491 3491 skb_shinfo(skb)->frags[0].page_offset += grow; 3492 - skb_shinfo(skb)->frags[0].size -= grow; 3492 + skb_frag_size_sub(&skb_shinfo(skb)->frags[0], grow); 3493 3493 3494 - if (unlikely(!skb_shinfo(skb)->frags[0].size)) { 3494 + if (unlikely(!skb_frag_size(&skb_shinfo(skb)->frags[0]))) { 3495 3495 skb_frag_unref(skb, 0); 3496 3496 memmove(skb_shinfo(skb)->frags, 3497 3497 skb_shinfo(skb)->frags + 1, ··· 3559 3559 !PageHighMem(skb_frag_page(&skb_shinfo(skb)->frags[0]))) { 3560 3560 NAPI_GRO_CB(skb)->frag0 = 3561 3561 skb_frag_address(&skb_shinfo(skb)->frags[0]); 3562 - NAPI_GRO_CB(skb)->frag0_len = skb_shinfo(skb)->frags[0].size; 3562 + NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(&skb_shinfo(skb)->frags[0]); 3563 3563 } 3564 3564 } 3565 3565 EXPORT_SYMBOL(skb_gro_reset_offset);
+6 -6
net/core/pktgen.c
··· 2606 2606 skb_shinfo(skb)->frags[i].page_offset = 0; 2607 2607 /*last fragment, fill rest of data*/ 2608 2608 if (i == (frags - 1)) 2609 - skb_shinfo(skb)->frags[i].size = 2610 - (datalen < PAGE_SIZE ? datalen : PAGE_SIZE); 2609 + skb_frag_size_set(&skb_shinfo(skb)->frags[i], 2610 + (datalen < PAGE_SIZE ? datalen : PAGE_SIZE)); 2611 2611 else 2612 - skb_shinfo(skb)->frags[i].size = frag_len; 2613 - datalen -= skb_shinfo(skb)->frags[i].size; 2614 - skb->len += skb_shinfo(skb)->frags[i].size; 2615 - skb->data_len += skb_shinfo(skb)->frags[i].size; 2612 + skb_frag_size_set(&skb_shinfo(skb)->frags[i], frag_len); 2613 + datalen -= skb_frag_size(&skb_shinfo(skb)->frags[i]); 2614 + skb->len += skb_frag_size(&skb_shinfo(skb)->frags[i]); 2615 + skb->data_len += skb_frag_size(&skb_shinfo(skb)->frags[i]); 2616 2616 i++; 2617 2617 skb_shinfo(skb)->nr_frags = i; 2618 2618 }
+38 -34
net/core/skbuff.c
··· 659 659 } 660 660 vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); 661 661 memcpy(page_address(page), 662 - vaddr + f->page_offset, f->size); 662 + vaddr + f->page_offset, skb_frag_size(f)); 663 663 kunmap_skb_frag(vaddr); 664 664 page->private = (unsigned long)head; 665 665 head = page; ··· 1190 1190 goto drop_pages; 1191 1191 1192 1192 for (; i < nfrags; i++) { 1193 - int end = offset + skb_shinfo(skb)->frags[i].size; 1193 + int end = offset + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1194 1194 1195 1195 if (end < len) { 1196 1196 offset = end; 1197 1197 continue; 1198 1198 } 1199 1199 1200 - skb_shinfo(skb)->frags[i++].size = len - offset; 1200 + skb_frag_size_set(&skb_shinfo(skb)->frags[i++], len - offset); 1201 1201 1202 1202 drop_pages: 1203 1203 skb_shinfo(skb)->nr_frags = i; ··· 1306 1306 /* Estimate size of pulled pages. */ 1307 1307 eat = delta; 1308 1308 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1309 - if (skb_shinfo(skb)->frags[i].size >= eat) 1309 + int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 1310 + 1311 + if (size >= eat) 1310 1312 goto pull_pages; 1311 - eat -= skb_shinfo(skb)->frags[i].size; 1313 + eat -= size; 1312 1314 } 1313 1315 1314 1316 /* If we need update frag list, we are in troubles. ··· 1373 1371 eat = delta; 1374 1372 k = 0; 1375 1373 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1376 - if (skb_shinfo(skb)->frags[i].size <= eat) { 1374 + int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 1375 + 1376 + if (size <= eat) { 1377 1377 skb_frag_unref(skb, i); 1378 - eat -= skb_shinfo(skb)->frags[i].size; 1378 + eat -= size; 1379 1379 } else { 1380 1380 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1381 1381 if (eat) { 1382 1382 skb_shinfo(skb)->frags[k].page_offset += eat; 1383 - skb_shinfo(skb)->frags[k].size -= eat; 1383 + skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); 1384 1384 eat = 0; 1385 1385 } 1386 1386 k++; ··· 1437 1433 1438 1434 WARN_ON(start > offset + len); 1439 1435 1440 - end = start + skb_shinfo(skb)->frags[i].size; 1436 + end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1441 1437 if ((copy = end - offset) > 0) { 1442 1438 u8 *vaddr; 1443 1439 ··· 1636 1632 const skb_frag_t *f = &skb_shinfo(skb)->frags[seg]; 1637 1633 1638 1634 if (__splice_segment(skb_frag_page(f), 1639 - f->page_offset, f->size, 1635 + f->page_offset, skb_frag_size(f), 1640 1636 offset, len, skb, spd, 0, sk, pipe)) 1641 1637 return 1; 1642 1638 } ··· 1746 1742 1747 1743 WARN_ON(start > offset + len); 1748 1744 1749 - end = start + frag->size; 1745 + end = start + skb_frag_size(frag); 1750 1746 if ((copy = end - offset) > 0) { 1751 1747 u8 *vaddr; 1752 1748 ··· 1819 1815 1820 1816 WARN_ON(start > offset + len); 1821 1817 1822 - end = start + skb_shinfo(skb)->frags[i].size; 1818 + end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1823 1819 if ((copy = end - offset) > 0) { 1824 1820 __wsum csum2; 1825 1821 u8 *vaddr; ··· 1894 1890 1895 1891 WARN_ON(start > offset + len); 1896 1892 1897 - end = start + skb_shinfo(skb)->frags[i].size; 1893 + end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 1898 1894 if ((copy = end - offset) > 0) { 1899 1895 __wsum csum2; 1900 1896 u8 *vaddr; ··· 2167 2163 skb->data_len = len - pos; 2168 2164 2169 2165 for (i = 0; i < nfrags; i++) { 2170 - int size = skb_shinfo(skb)->frags[i].size; 2166 + int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 2171 2167 2172 2168 if (pos + size > len) { 2173 2169 skb_shinfo(skb1)->frags[k] = skb_shinfo(skb)->frags[i]; ··· 2183 2179 */ 2184 2180 skb_frag_ref(skb, i); 2185 2181 skb_shinfo(skb1)->frags[0].page_offset += len - pos; 2186 - skb_shinfo(skb1)->frags[0].size -= len - pos; 2187 - skb_shinfo(skb)->frags[i].size = len - pos; 2182 + skb_frag_size_sub(&skb_shinfo(skb1)->frags[0], len - pos); 2183 + skb_frag_size_set(&skb_shinfo(skb)->frags[i], len - pos); 2188 2184 skb_shinfo(skb)->nr_frags++; 2189 2185 } 2190 2186 k++; ··· 2262 2258 } else { 2263 2259 merge = to - 1; 2264 2260 2265 - todo -= fragfrom->size; 2261 + todo -= skb_frag_size(fragfrom); 2266 2262 if (todo < 0) { 2267 2263 if (skb_prepare_for_shift(skb) || 2268 2264 skb_prepare_for_shift(tgt)) ··· 2272 2268 fragfrom = &skb_shinfo(skb)->frags[from]; 2273 2269 fragto = &skb_shinfo(tgt)->frags[merge]; 2274 2270 2275 - fragto->size += shiftlen; 2276 - fragfrom->size -= shiftlen; 2271 + skb_frag_size_add(fragto, shiftlen); 2272 + skb_frag_size_sub(fragfrom, shiftlen); 2277 2273 fragfrom->page_offset += shiftlen; 2278 2274 2279 2275 goto onlymerged; ··· 2297 2293 fragfrom = &skb_shinfo(skb)->frags[from]; 2298 2294 fragto = &skb_shinfo(tgt)->frags[to]; 2299 2295 2300 - if (todo >= fragfrom->size) { 2296 + if (todo >= skb_frag_size(fragfrom)) { 2301 2297 *fragto = *fragfrom; 2302 - todo -= fragfrom->size; 2298 + todo -= skb_frag_size(fragfrom); 2303 2299 from++; 2304 2300 to++; 2305 2301 ··· 2307 2303 __skb_frag_ref(fragfrom); 2308 2304 fragto->page = fragfrom->page; 2309 2305 fragto->page_offset = fragfrom->page_offset; 2310 - fragto->size = todo; 2306 + skb_frag_size_set(fragto, todo); 2311 2307 2312 2308 fragfrom->page_offset += todo; 2313 - fragfrom->size -= todo; 2309 + skb_frag_size_sub(fragfrom, todo); 2314 2310 todo = 0; 2315 2311 2316 2312 to++; ··· 2325 2321 fragfrom = &skb_shinfo(skb)->frags[0]; 2326 2322 fragto = &skb_shinfo(tgt)->frags[merge]; 2327 2323 2328 - fragto->size += fragfrom->size; 2324 + skb_frag_size_add(fragto, skb_frag_size(fragfrom)); 2329 2325 __skb_frag_unref(fragfrom); 2330 2326 } 2331 2327 ··· 2423 2419 2424 2420 while (st->frag_idx < skb_shinfo(st->cur_skb)->nr_frags) { 2425 2421 frag = &skb_shinfo(st->cur_skb)->frags[st->frag_idx]; 2426 - block_limit = frag->size + st->stepped_offset; 2422 + block_limit = skb_frag_size(frag) + st->stepped_offset; 2427 2423 2428 2424 if (abs_offset < block_limit) { 2429 2425 if (!st->frag_data) ··· 2441 2437 } 2442 2438 2443 2439 st->frag_idx++; 2444 - st->stepped_offset += frag->size; 2440 + st->stepped_offset += skb_frag_size(frag); 2445 2441 } 2446 2442 2447 2443 if (st->frag_data) { ··· 2571 2567 left = PAGE_SIZE - frag->page_offset; 2572 2568 copy = (length > left)? left : length; 2573 2569 2574 - ret = getfrag(from, skb_frag_address(frag) + frag->size, 2570 + ret = getfrag(from, skb_frag_address(frag) + skb_frag_size(frag), 2575 2571 offset, copy, 0, skb); 2576 2572 if (ret < 0) 2577 2573 return -EFAULT; 2578 2574 2579 2575 /* copy was successful so update the size parameters */ 2580 - frag->size += copy; 2576 + skb_frag_size_add(frag, copy); 2581 2577 skb->len += copy; 2582 2578 skb->data_len += copy; 2583 2579 offset += copy; ··· 2724 2720 while (pos < offset + len && i < nfrags) { 2725 2721 *frag = skb_shinfo(skb)->frags[i]; 2726 2722 __skb_frag_ref(frag); 2727 - size = frag->size; 2723 + size = skb_frag_size(frag); 2728 2724 2729 2725 if (pos < offset) { 2730 2726 frag->page_offset += offset - pos; 2731 - frag->size -= offset - pos; 2727 + skb_frag_size_sub(frag, offset - pos); 2732 2728 } 2733 2729 2734 2730 skb_shinfo(nskb)->nr_frags++; ··· 2737 2733 i++; 2738 2734 pos += size; 2739 2735 } else { 2740 - frag->size -= pos + size - (offset + len); 2736 + skb_frag_size_sub(frag, pos + size - (offset + len)); 2741 2737 goto skip_fraglist; 2742 2738 } 2743 2739 ··· 2817 2813 } while (--i); 2818 2814 2819 2815 frag->page_offset += offset; 2820 - frag->size -= offset; 2816 + skb_frag_size_sub(frag, offset); 2821 2817 2822 2818 skb->truesize -= skb->data_len; 2823 2819 skb->len -= skb->data_len; ··· 2869 2865 unsigned int eat = offset - headlen; 2870 2866 2871 2867 skbinfo->frags[0].page_offset += eat; 2872 - skbinfo->frags[0].size -= eat; 2868 + skb_frag_size_sub(&skbinfo->frags[0], eat); 2873 2869 skb->data_len -= eat; 2874 2870 skb->len -= eat; 2875 2871 offset = headlen; ··· 2940 2936 2941 2937 WARN_ON(start > offset + len); 2942 2938 2943 - end = start + skb_shinfo(skb)->frags[i].size; 2939 + end = start + skb_frag_size(&skb_shinfo(skb)->frags[i]); 2944 2940 if ((copy = end - offset) > 0) { 2945 2941 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2946 2942
+2 -2
net/core/user_dma.c
··· 71 71 /* Copy paged appendix. Hmm... why does this look so complicated? */ 72 72 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 73 73 int end; 74 + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 74 75 75 76 WARN_ON(start > offset + len); 76 77 77 - end = start + skb_shinfo(skb)->frags[i].size; 78 + end = start + skb_frag_size(frag); 78 79 copy = end - offset; 79 80 if (copy > 0) { 80 - skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 81 81 struct page *page = skb_frag_page(frag); 82 82 83 83 if (copy > len)
+4 -4
net/ipv4/inet_lro.c
··· 244 244 skb->truesize += truesize; 245 245 246 246 skb_frags[0].page_offset += hlen; 247 - skb_frags[0].size -= hlen; 247 + skb_frag_size_sub(&skb_frags[0], hlen); 248 248 249 249 while (tcp_data_len > 0) { 250 250 *(lro_desc->next_frag) = *skb_frags; 251 - tcp_data_len -= skb_frags->size; 251 + tcp_data_len -= skb_frag_size(skb_frags); 252 252 lro_desc->next_frag++; 253 253 skb_frags++; 254 254 skb_shinfo(skb)->nr_frags++; ··· 400 400 skb_frags = skb_shinfo(skb)->frags; 401 401 while (data_len > 0) { 402 402 *skb_frags = *frags; 403 - data_len -= frags->size; 403 + data_len -= skb_frag_size(frags); 404 404 skb_frags++; 405 405 frags++; 406 406 skb_shinfo(skb)->nr_frags++; 407 407 } 408 408 409 409 skb_shinfo(skb)->frags[0].page_offset += hdr_len; 410 - skb_shinfo(skb)->frags[0].size -= hdr_len; 410 + skb_frag_size_sub(&skb_shinfo(skb)->frags[0], hdr_len); 411 411 412 412 skb->ip_summed = ip_summed; 413 413 skb->csum = sum;
+2 -2
net/ipv4/ip_fragment.c
··· 599 599 head->next = clone; 600 600 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 601 601 skb_frag_list_init(head); 602 - for (i=0; i<skb_shinfo(head)->nr_frags; i++) 603 - plen += skb_shinfo(head)->frags[i].size; 602 + for (i = 0; i < skb_shinfo(head)->nr_frags; i++) 603 + plen += skb_frag_size(&skb_shinfo(head)->frags[i]); 604 604 clone->len = clone->data_len = head->data_len - plen; 605 605 head->data_len -= clone->len; 606 606 head->len -= clone->len;
+3 -3
net/ipv4/ip_output.c
··· 1015 1015 err = -EMSGSIZE; 1016 1016 goto error; 1017 1017 } 1018 - if (getfrag(from, skb_frag_address(frag)+frag->size, 1018 + if (getfrag(from, skb_frag_address(frag)+skb_frag_size(frag), 1019 1019 offset, copy, skb->len, skb) < 0) { 1020 1020 err = -EFAULT; 1021 1021 goto error; 1022 1022 } 1023 1023 cork->off += copy; 1024 - frag->size += copy; 1024 + skb_frag_size_add(frag, copy); 1025 1025 skb->len += copy; 1026 1026 skb->data_len += copy; 1027 1027 skb->truesize += copy; ··· 1230 1230 if (len > size) 1231 1231 len = size; 1232 1232 if (skb_can_coalesce(skb, i, page, offset)) { 1233 - skb_shinfo(skb)->frags[i-1].size += len; 1233 + skb_frag_size_add(&skb_shinfo(skb)->frags[i-1], len); 1234 1234 } else if (i < MAX_SKB_FRAGS) { 1235 1235 get_page(page); 1236 1236 skb_fill_page_desc(skb, i, page, offset, len);
+4 -5
net/ipv4/tcp.c
··· 813 813 goto wait_for_memory; 814 814 815 815 if (can_coalesce) { 816 - skb_shinfo(skb)->frags[i - 1].size += copy; 816 + skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 817 817 } else { 818 818 get_page(page); 819 819 skb_fill_page_desc(skb, i, page, offset, copy); ··· 1058 1058 1059 1059 /* Update the skb. */ 1060 1060 if (merge) { 1061 - skb_shinfo(skb)->frags[i - 1].size += 1062 - copy; 1061 + skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); 1063 1062 } else { 1064 1063 skb_fill_page_desc(skb, i, page, off, copy); 1065 1064 if (TCP_PAGE(sk)) { ··· 3030 3031 for (i = 0; i < shi->nr_frags; ++i) { 3031 3032 const struct skb_frag_struct *f = &shi->frags[i]; 3032 3033 struct page *page = skb_frag_page(f); 3033 - sg_set_page(&sg, page, f->size, f->page_offset); 3034 - if (crypto_hash_update(desc, &sg, f->size)) 3034 + sg_set_page(&sg, page, skb_frag_size(f), f->page_offset); 3035 + if (crypto_hash_update(desc, &sg, skb_frag_size(f))) 3035 3036 return 1; 3036 3037 } 3037 3038
+5 -3
net/ipv4/tcp_output.c
··· 1094 1094 eat = len; 1095 1095 k = 0; 1096 1096 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1097 - if (skb_shinfo(skb)->frags[i].size <= eat) { 1097 + int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); 1098 + 1099 + if (size <= eat) { 1098 1100 skb_frag_unref(skb, i); 1099 - eat -= skb_shinfo(skb)->frags[i].size; 1101 + eat -= size; 1100 1102 } else { 1101 1103 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; 1102 1104 if (eat) { 1103 1105 skb_shinfo(skb)->frags[k].page_offset += eat; 1104 - skb_shinfo(skb)->frags[k].size -= eat; 1106 + skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); 1105 1107 eat = 0; 1106 1108 } 1107 1109 k++;
+3 -2
net/ipv6/ip6_output.c
··· 1512 1512 err = -EMSGSIZE; 1513 1513 goto error; 1514 1514 } 1515 - if (getfrag(from, skb_frag_address(frag)+frag->size, 1515 + if (getfrag(from, 1516 + skb_frag_address(frag) + skb_frag_size(frag), 1516 1517 offset, copy, skb->len, skb) < 0) { 1517 1518 err = -EFAULT; 1518 1519 goto error; 1519 1520 } 1520 1521 sk->sk_sndmsg_off += copy; 1521 - frag->size += copy; 1522 + skb_frag_size_add(frag, copy); 1522 1523 skb->len += copy; 1523 1524 skb->data_len += copy; 1524 1525 skb->truesize += copy;
+2 -2
net/ipv6/netfilter/nf_conntrack_reasm.c
··· 378 378 head->next = clone; 379 379 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 380 380 skb_frag_list_init(head); 381 - for (i=0; i<skb_shinfo(head)->nr_frags; i++) 382 - plen += skb_shinfo(head)->frags[i].size; 381 + for (i = 0; i < skb_shinfo(head)->nr_frags; i++) 382 + plen += skb_frag_size(&skb_shinfo(head)->frags[i]); 383 383 clone->len = clone->data_len = head->data_len - plen; 384 384 head->data_len -= clone->len; 385 385 head->len -= clone->len;
+2 -2
net/ipv6/reassembly.c
··· 464 464 head->next = clone; 465 465 skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; 466 466 skb_frag_list_init(head); 467 - for (i=0; i<skb_shinfo(head)->nr_frags; i++) 468 - plen += skb_shinfo(head)->frags[i].size; 467 + for (i = 0; i < skb_shinfo(head)->nr_frags; i++) 468 + plen += skb_frag_size(&skb_shinfo(head)->frags[i]); 469 469 clone->len = clone->data_len = head->data_len - plen; 470 470 head->data_len -= clone->len; 471 471 head->len -= clone->len;
+1 -1
net/xfrm/xfrm_ipcomp.c
··· 90 90 len = dlen; 91 91 92 92 frag->page_offset = 0; 93 - frag->size = len; 93 + skb_frag_size_set(frag, len); 94 94 memcpy(skb_frag_address(frag), scratch, len); 95 95 96 96 skb->truesize += len;