Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

treewide: Use sizeof_field() macro

Replace all the occurrences of FIELD_SIZEOF() with sizeof_field() except
at places where these are defined. Later patches will remove the unused
definition of FIELD_SIZEOF().

This patch is generated using following script:

EXCLUDE_FILES="include/linux/stddef.h|include/linux/kernel.h"

git grep -l -e "\bFIELD_SIZEOF\b" | while read file;
do

if [[ "$file" =~ $EXCLUDE_FILES ]]; then
continue
fi
sed -i -e 's/\bFIELD_SIZEOF\b/sizeof_field/g' $file;
done

Signed-off-by: Pankaj Bharadiya <pankaj.laxminarayan.bharadiya@intel.com>
Link: https://lore.kernel.org/r/20190924105839.110713-3-pankaj.laxminarayan.bharadiya@intel.com
Co-developed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Kees Cook <keescook@chromium.org>
Acked-by: David Miller <davem@davemloft.net> # for net

authored by

Pankaj Bharadiya and committed by
Kees Cook
c593642c e4372329

+298 -298
+1 -1
Documentation/process/coding-style.rst
··· 988 988 989 989 .. code-block:: c 990 990 991 - #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) 991 + #define sizeof_field(t, f) (sizeof(((t*)0)->f)) 992 992 993 993 There are also min() and max() macros that do strict type checking if you 994 994 need them. Feel free to peruse that header file to see what else is already
+1 -1
Documentation/translations/it_IT/process/coding-style.rst
··· 1005 1005 1006 1006 .. code-block:: c 1007 1007 1008 - #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) 1008 + #define sizeof_field(t, f) (sizeof(((t*)0)->f)) 1009 1009 1010 1010 Ci sono anche le macro min() e max() che, se vi serve, effettuano un controllo 1011 1011 rigido sui tipi. Sentitevi liberi di leggere attentamente questo file
+1 -1
Documentation/translations/zh_CN/process/coding-style.rst
··· 826 826 827 827 .. code-block:: c 828 828 829 - #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) 829 + #define sizeof_field(t, f) (sizeof(((t*)0)->f)) 830 830 831 831 还有可以做严格的类型检查的 min() 和 max() 宏,如果你需要可以使用它们。你可以 832 832 自己看看那个头文件里还定义了什么你可以拿来用的东西,如果有定义的话,你就不应
+3 -3
arch/arc/kernel/unwind.c
··· 42 42 43 43 #define EXTRA_INFO(f) { \ 44 44 BUILD_BUG_ON_ZERO(offsetof(struct unwind_frame_info, f) \ 45 - % FIELD_SIZEOF(struct unwind_frame_info, f)) \ 45 + % sizeof_field(struct unwind_frame_info, f)) \ 46 46 + offsetof(struct unwind_frame_info, f) \ 47 - / FIELD_SIZEOF(struct unwind_frame_info, f), \ 48 - FIELD_SIZEOF(struct unwind_frame_info, f) \ 47 + / sizeof_field(struct unwind_frame_info, f), \ 48 + sizeof_field(struct unwind_frame_info, f) \ 49 49 } 50 50 #define PTREGS_INFO(f) EXTRA_INFO(regs.f) 51 51
+2 -2
arch/powerpc/net/bpf_jit32.h
··· 97 97 #ifdef CONFIG_SMP 98 98 #ifdef CONFIG_PPC64 99 99 #define PPC_BPF_LOAD_CPU(r) \ 100 - do { BUILD_BUG_ON(FIELD_SIZEOF(struct paca_struct, paca_index) != 2); \ 100 + do { BUILD_BUG_ON(sizeof_field(struct paca_struct, paca_index) != 2); \ 101 101 PPC_LHZ_OFFS(r, 13, offsetof(struct paca_struct, paca_index)); \ 102 102 } while (0) 103 103 #else 104 104 #define PPC_BPF_LOAD_CPU(r) \ 105 - do { BUILD_BUG_ON(FIELD_SIZEOF(struct task_struct, cpu) != 4); \ 105 + do { BUILD_BUG_ON(sizeof_field(struct task_struct, cpu) != 4); \ 106 106 PPC_LHZ_OFFS(r, 2, offsetof(struct task_struct, cpu)); \ 107 107 } while(0) 108 108 #endif
+8 -8
arch/powerpc/net/bpf_jit_comp.c
··· 321 321 ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf)); 322 322 break; 323 323 case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */ 324 - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); 324 + BUILD_BUG_ON(sizeof_field(struct sk_buff, len) != 4); 325 325 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len)); 326 326 break; 327 327 case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */ ··· 333 333 334 334 /*** Ancillary info loads ***/ 335 335 case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */ 336 - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 336 + BUILD_BUG_ON(sizeof_field(struct sk_buff, 337 337 protocol) != 2); 338 338 PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff, 339 339 protocol)); 340 340 break; 341 341 case BPF_ANC | SKF_AD_IFINDEX: 342 342 case BPF_ANC | SKF_AD_HATYPE: 343 - BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, 343 + BUILD_BUG_ON(sizeof_field(struct net_device, 344 344 ifindex) != 4); 345 - BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, 345 + BUILD_BUG_ON(sizeof_field(struct net_device, 346 346 type) != 2); 347 347 PPC_LL_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, 348 348 dev)); ··· 365 365 366 366 break; 367 367 case BPF_ANC | SKF_AD_MARK: 368 - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); 368 + BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4); 369 369 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, 370 370 mark)); 371 371 break; 372 372 case BPF_ANC | SKF_AD_RXHASH: 373 - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); 373 + BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4); 374 374 PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, 375 375 hash)); 376 376 break; 377 377 case BPF_ANC | SKF_AD_VLAN_TAG: 378 - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); 378 + BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_tci) != 2); 379 379 380 380 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, 381 381 vlan_tci)); ··· 388 388 PPC_ANDI(r_A, r_A, 1); 389 389 break; 390 390 case BPF_ANC | SKF_AD_QUEUE: 391 - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 391 + BUILD_BUG_ON(sizeof_field(struct sk_buff, 392 392 queue_mapping) != 2); 393 393 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, 394 394 queue_mapping));
+4 -4
arch/sparc/net/bpf_jit_comp_32.c
··· 180 180 181 181 #define emit_loadptr(BASE, STRUCT, FIELD, DEST) \ 182 182 do { unsigned int _off = offsetof(STRUCT, FIELD); \ 183 - BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(void *)); \ 183 + BUILD_BUG_ON(sizeof_field(STRUCT, FIELD) != sizeof(void *)); \ 184 184 *prog++ = LDPTRI | RS1(BASE) | S13(_off) | RD(DEST); \ 185 185 } while (0) 186 186 187 187 #define emit_load32(BASE, STRUCT, FIELD, DEST) \ 188 188 do { unsigned int _off = offsetof(STRUCT, FIELD); \ 189 - BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u32)); \ 189 + BUILD_BUG_ON(sizeof_field(STRUCT, FIELD) != sizeof(u32)); \ 190 190 *prog++ = LD32I | RS1(BASE) | S13(_off) | RD(DEST); \ 191 191 } while (0) 192 192 193 193 #define emit_load16(BASE, STRUCT, FIELD, DEST) \ 194 194 do { unsigned int _off = offsetof(STRUCT, FIELD); \ 195 - BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u16)); \ 195 + BUILD_BUG_ON(sizeof_field(STRUCT, FIELD) != sizeof(u16)); \ 196 196 *prog++ = LD16I | RS1(BASE) | S13(_off) | RD(DEST); \ 197 197 } while (0) 198 198 ··· 202 202 } while (0) 203 203 204 204 #define emit_load8(BASE, STRUCT, FIELD, DEST) \ 205 - do { BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u8)); \ 205 + do { BUILD_BUG_ON(sizeof_field(STRUCT, FIELD) != sizeof(u8)); \ 206 206 __emit_load8(BASE, STRUCT, FIELD, DEST); \ 207 207 } while (0) 208 208
+1 -1
arch/x86/kernel/fpu/xstate.c
··· 259 259 xmm_space); 260 260 261 261 xstate_offsets[XFEATURE_SSE] = xstate_sizes[XFEATURE_FP]; 262 - xstate_sizes[XFEATURE_SSE] = FIELD_SIZEOF(struct fxregs_state, 262 + xstate_sizes[XFEATURE_SSE] = sizeof_field(struct fxregs_state, 263 263 xmm_space); 264 264 265 265 for (i = FIRST_EXTENDED_XFEATURE; i < XFEATURE_MAX; i++) {
+2 -2
block/blk-core.c
··· 1792 1792 { 1793 1793 BUILD_BUG_ON(REQ_OP_LAST >= (1 << REQ_OP_BITS)); 1794 1794 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * 1795 - FIELD_SIZEOF(struct request, cmd_flags)); 1795 + sizeof_field(struct request, cmd_flags)); 1796 1796 BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 * 1797 - FIELD_SIZEOF(struct bio, bi_opf)); 1797 + sizeof_field(struct bio, bi_opf)); 1798 1798 1799 1799 /* used for unplugging and affects IO latency/throughput - HIGHPRI */ 1800 1800 kblockd_workqueue = alloc_workqueue("kblockd",
+2 -2
crypto/adiantum.c
··· 436 436 437 437 BUILD_BUG_ON(offsetofend(struct adiantum_request_ctx, u) != 438 438 sizeof(struct adiantum_request_ctx)); 439 - subreq_size = max(FIELD_SIZEOF(struct adiantum_request_ctx, 439 + subreq_size = max(sizeof_field(struct adiantum_request_ctx, 440 440 u.hash_desc) + 441 441 crypto_shash_descsize(hash), 442 - FIELD_SIZEOF(struct adiantum_request_ctx, 442 + sizeof_field(struct adiantum_request_ctx, 443 443 u.streamcipher_req) + 444 444 crypto_skcipher_reqsize(streamcipher)); 445 445
+1 -1
crypto/essiv.c
··· 347 347 if (IS_ERR(aead)) 348 348 return PTR_ERR(aead); 349 349 350 - subreq_size = FIELD_SIZEOF(struct essiv_aead_request_ctx, aead_req) + 350 + subreq_size = sizeof_field(struct essiv_aead_request_ctx, aead_req) + 351 351 crypto_aead_reqsize(aead); 352 352 353 353 tctx->ivoffset = offsetof(struct essiv_aead_request_ctx, aead_req) +
+1 -1
drivers/firmware/efi/efi.c
··· 681 681 { name }, \ 682 682 { prop }, \ 683 683 offsetof(struct efi_fdt_params, field), \ 684 - FIELD_SIZEOF(struct efi_fdt_params, field) \ 684 + sizeof_field(struct efi_fdt_params, field) \ 685 685 } 686 686 687 687 struct params {
+1 -1
drivers/infiniband/hw/efa/efa_verbs.c
··· 145 145 } 146 146 147 147 #define field_avail(x, fld, sz) (offsetof(typeof(x), fld) + \ 148 - FIELD_SIZEOF(typeof(x), fld) <= (sz)) 148 + sizeof_field(typeof(x), fld) <= (sz)) 149 149 150 150 #define is_reserved_cleared(reserved) \ 151 151 !memchr_inv(reserved, 0, sizeof(reserved))
+1 -1
drivers/infiniband/hw/hfi1/sdma.c
··· 848 848 .nelem_hint = NR_CPUS_HINT, 849 849 .head_offset = offsetof(struct sdma_rht_node, node), 850 850 .key_offset = offsetof(struct sdma_rht_node, cpu_id), 851 - .key_len = FIELD_SIZEOF(struct sdma_rht_node, cpu_id), 851 + .key_len = sizeof_field(struct sdma_rht_node, cpu_id), 852 852 .max_size = NR_CPUS, 853 853 .min_size = 8, 854 854 .automatic_shrinking = true,
+2 -2
drivers/infiniband/hw/hfi1/verbs.h
··· 107 107 HFI1_HAS_GRH = (1 << 0), 108 108 }; 109 109 110 - #define LRH_16B_BYTES (FIELD_SIZEOF(struct hfi1_16b_header, lrh)) 110 + #define LRH_16B_BYTES (sizeof_field(struct hfi1_16b_header, lrh)) 111 111 #define LRH_16B_DWORDS (LRH_16B_BYTES / sizeof(u32)) 112 - #define LRH_9B_BYTES (FIELD_SIZEOF(struct ib_header, lrh)) 112 + #define LRH_9B_BYTES (sizeof_field(struct ib_header, lrh)) 113 113 #define LRH_9B_DWORDS (LRH_9B_BYTES / sizeof(u32)) 114 114 115 115 /* 24Bits for qpn, upper 8Bits reserved */
+1 -1
drivers/infiniband/ulp/opa_vnic/opa_vnic_ethtool.c
··· 63 63 }; 64 64 }; 65 65 66 - #define VNIC_STAT(m) { FIELD_SIZEOF(struct opa_vnic_stats, m), \ 66 + #define VNIC_STAT(m) { sizeof_field(struct opa_vnic_stats, m), \ 67 67 offsetof(struct opa_vnic_stats, m) } 68 68 69 69 static struct vnic_stats vnic_gstrings_stats[] = {
+1 -1
drivers/md/raid5-ppl.c
··· 1360 1360 return -EINVAL; 1361 1361 } 1362 1362 1363 - max_disks = FIELD_SIZEOF(struct ppl_log, disk_flush_bitmap) * 1363 + max_disks = sizeof_field(struct ppl_log, disk_flush_bitmap) * 1364 1364 BITS_PER_BYTE; 1365 1365 if (conf->raid_disks > max_disks) { 1366 1366 pr_warn("md/raid:%s PPL doesn't support over %d disks in the array\n",
+12 -12
drivers/media/platform/omap3isp/isppreview.c
··· 753 753 preview_config_luma_enhancement, 754 754 preview_enable_luma_enhancement, 755 755 offsetof(struct prev_params, luma), 756 - FIELD_SIZEOF(struct prev_params, luma), 756 + sizeof_field(struct prev_params, luma), 757 757 offsetof(struct omap3isp_prev_update_config, luma), 758 758 }, /* OMAP3ISP_PREV_INVALAW */ { 759 759 NULL, ··· 762 762 preview_config_hmed, 763 763 preview_enable_hmed, 764 764 offsetof(struct prev_params, hmed), 765 - FIELD_SIZEOF(struct prev_params, hmed), 765 + sizeof_field(struct prev_params, hmed), 766 766 offsetof(struct omap3isp_prev_update_config, hmed), 767 767 }, /* OMAP3ISP_PREV_CFA */ { 768 768 preview_config_cfa, 769 769 NULL, 770 770 offsetof(struct prev_params, cfa), 771 - FIELD_SIZEOF(struct prev_params, cfa), 771 + sizeof_field(struct prev_params, cfa), 772 772 offsetof(struct omap3isp_prev_update_config, cfa), 773 773 }, /* OMAP3ISP_PREV_CHROMA_SUPP */ { 774 774 preview_config_chroma_suppression, 775 775 preview_enable_chroma_suppression, 776 776 offsetof(struct prev_params, csup), 777 - FIELD_SIZEOF(struct prev_params, csup), 777 + sizeof_field(struct prev_params, csup), 778 778 offsetof(struct omap3isp_prev_update_config, csup), 779 779 }, /* OMAP3ISP_PREV_WB */ { 780 780 preview_config_whitebalance, 781 781 NULL, 782 782 offsetof(struct prev_params, wbal), 783 - FIELD_SIZEOF(struct prev_params, wbal), 783 + sizeof_field(struct prev_params, wbal), 784 784 offsetof(struct omap3isp_prev_update_config, wbal), 785 785 }, /* OMAP3ISP_PREV_BLKADJ */ { 786 786 preview_config_blkadj, 787 787 NULL, 788 788 offsetof(struct prev_params, blkadj), 789 - FIELD_SIZEOF(struct prev_params, blkadj), 789 + sizeof_field(struct prev_params, blkadj), 790 790 offsetof(struct omap3isp_prev_update_config, blkadj), 791 791 }, /* OMAP3ISP_PREV_RGB2RGB */ { 792 792 preview_config_rgb_blending, 793 793 NULL, 794 794 offsetof(struct prev_params, rgb2rgb), 795 - FIELD_SIZEOF(struct prev_params, rgb2rgb), 795 + sizeof_field(struct prev_params, rgb2rgb), 796 796 offsetof(struct omap3isp_prev_update_config, rgb2rgb), 797 797 }, /* OMAP3ISP_PREV_COLOR_CONV */ { 798 798 preview_config_csc, 799 799 NULL, 800 800 offsetof(struct prev_params, csc), 801 - FIELD_SIZEOF(struct prev_params, csc), 801 + sizeof_field(struct prev_params, csc), 802 802 offsetof(struct omap3isp_prev_update_config, csc), 803 803 }, /* OMAP3ISP_PREV_YC_LIMIT */ { 804 804 preview_config_yc_range, 805 805 NULL, 806 806 offsetof(struct prev_params, yclimit), 807 - FIELD_SIZEOF(struct prev_params, yclimit), 807 + sizeof_field(struct prev_params, yclimit), 808 808 offsetof(struct omap3isp_prev_update_config, yclimit), 809 809 }, /* OMAP3ISP_PREV_DEFECT_COR */ { 810 810 preview_config_dcor, 811 811 preview_enable_dcor, 812 812 offsetof(struct prev_params, dcor), 813 - FIELD_SIZEOF(struct prev_params, dcor), 813 + sizeof_field(struct prev_params, dcor), 814 814 offsetof(struct omap3isp_prev_update_config, dcor), 815 815 }, /* Previously OMAP3ISP_PREV_GAMMABYPASS, not used anymore */ { 816 816 NULL, ··· 828 828 preview_config_noisefilter, 829 829 preview_enable_noisefilter, 830 830 offsetof(struct prev_params, nf), 831 - FIELD_SIZEOF(struct prev_params, nf), 831 + sizeof_field(struct prev_params, nf), 832 832 offsetof(struct omap3isp_prev_update_config, nf), 833 833 }, /* OMAP3ISP_PREV_GAMMA */ { 834 834 preview_config_gammacorrn, 835 835 preview_enable_gammacorrn, 836 836 offsetof(struct prev_params, gamma), 837 - FIELD_SIZEOF(struct prev_params, gamma), 837 + sizeof_field(struct prev_params, gamma), 838 838 offsetof(struct omap3isp_prev_update_config, gamma), 839 839 }, /* OMAP3ISP_PREV_CONTRAST */ { 840 840 preview_config_contrast,
+1 -1
drivers/media/v4l2-core/v4l2-ioctl.c
··· 2652 2652 /* Zero struct from after the field to the end */ 2653 2653 #define INFO_FL_CLEAR(v4l2_struct, field) \ 2654 2654 ((offsetof(struct v4l2_struct, field) + \ 2655 - FIELD_SIZEOF(struct v4l2_struct, field)) << 16) 2655 + sizeof_field(struct v4l2_struct, field)) << 16) 2656 2656 #define INFO_FL_CLEAR_MASK (_IOC_SIZEMASK << 16) 2657 2657 2658 2658 #define DEFINE_V4L_STUB_FUNC(_vidioc) \
+2 -2
drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
··· 129 129 130 130 #define XGMAC_MMC_STAT(_string, _var) \ 131 131 { _string, \ 132 - FIELD_SIZEOF(struct xgbe_mmc_stats, _var), \ 132 + sizeof_field(struct xgbe_mmc_stats, _var), \ 133 133 offsetof(struct xgbe_prv_data, mmc_stats._var), \ 134 134 } 135 135 136 136 #define XGMAC_EXT_STAT(_string, _var) \ 137 137 { _string, \ 138 - FIELD_SIZEOF(struct xgbe_ext_stats, _var), \ 138 + sizeof_field(struct xgbe_ext_stats, _var), \ 139 139 offsetof(struct xgbe_prv_data, ext_stats._var), \ 140 140 } 141 141
+8 -8
drivers/net/ethernet/cavium/liquidio/octeon_console.c
··· 205 205 major_version = (u32)__cvmx_bootmem_desc_get( 206 206 oct, oct->bootmem_desc_addr, 207 207 offsetof(struct cvmx_bootmem_desc, major_version), 208 - FIELD_SIZEOF(struct cvmx_bootmem_desc, major_version)); 208 + sizeof_field(struct cvmx_bootmem_desc, major_version)); 209 209 minor_version = (u32)__cvmx_bootmem_desc_get( 210 210 oct, oct->bootmem_desc_addr, 211 211 offsetof(struct cvmx_bootmem_desc, minor_version), 212 - FIELD_SIZEOF(struct cvmx_bootmem_desc, minor_version)); 212 + sizeof_field(struct cvmx_bootmem_desc, minor_version)); 213 213 214 214 dev_dbg(&oct->pci_dev->dev, "%s: major_version=%d\n", __func__, 215 215 major_version); ··· 237 237 oct, named_addr, 238 238 offsetof(struct cvmx_bootmem_named_block_desc, 239 239 base_addr), 240 - FIELD_SIZEOF( 240 + sizeof_field( 241 241 struct cvmx_bootmem_named_block_desc, 242 242 base_addr)); 243 243 desc->size = __cvmx_bootmem_desc_get(oct, named_addr, 244 244 offsetof(struct cvmx_bootmem_named_block_desc, 245 245 size), 246 - FIELD_SIZEOF( 246 + sizeof_field( 247 247 struct cvmx_bootmem_named_block_desc, 248 248 size)); 249 249 ··· 268 268 oct, oct->bootmem_desc_addr, 269 269 offsetof(struct cvmx_bootmem_desc, 270 270 named_block_array_addr), 271 - FIELD_SIZEOF(struct cvmx_bootmem_desc, 271 + sizeof_field(struct cvmx_bootmem_desc, 272 272 named_block_array_addr)); 273 273 u32 num_blocks = (u32)__cvmx_bootmem_desc_get( 274 274 oct, oct->bootmem_desc_addr, 275 275 offsetof(struct cvmx_bootmem_desc, 276 276 nb_num_blocks), 277 - FIELD_SIZEOF(struct cvmx_bootmem_desc, 277 + sizeof_field(struct cvmx_bootmem_desc, 278 278 nb_num_blocks)); 279 279 280 280 u32 name_length = (u32)__cvmx_bootmem_desc_get( 281 281 oct, oct->bootmem_desc_addr, 282 282 offsetof(struct cvmx_bootmem_desc, 283 283 named_block_name_len), 284 - FIELD_SIZEOF(struct cvmx_bootmem_desc, 284 + sizeof_field(struct cvmx_bootmem_desc, 285 285 named_block_name_len)); 286 286 287 287 u64 named_addr = named_block_array_addr; ··· 292 292 offsetof( 293 293 struct cvmx_bootmem_named_block_desc, 294 294 size), 295 - FIELD_SIZEOF( 295 + sizeof_field( 296 296 struct cvmx_bootmem_named_block_desc, 297 297 size)); 298 298
+1 -1
drivers/net/ethernet/emulex/benet/be_ethtool.c
··· 23 23 }; 24 24 25 25 enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT}; 26 - #define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \ 26 + #define FIELDINFO(_struct, field) sizeof_field(_struct, field), \ 27 27 offsetof(_struct, field) 28 28 #define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\ 29 29 FIELDINFO(struct be_tx_stats, field)
+1 -1
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
··· 10240 10240 return ret; 10241 10241 } 10242 10242 10243 - data_len_per_desc = FIELD_SIZEOF(struct hclge_desc, data); 10243 + data_len_per_desc = sizeof_field(struct hclge_desc, data); 10244 10244 *len = 0; 10245 10245 for (i = 0; i < dfx_reg_type_num; i++) { 10246 10246 bd_num = bd_num_list[i];
+1 -1
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
··· 614 614 } 615 615 616 616 memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc, 617 - FIELD_SIZEOF(struct hnae3_knic_private_info, prio_tc)); 617 + sizeof_field(struct hnae3_knic_private_info, prio_tc)); 618 618 } 619 619 620 620 static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
+4 -4
drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
··· 450 450 451 451 #define HINIC_FUNC_STAT(_stat_item) { \ 452 452 .name = #_stat_item, \ 453 - .size = FIELD_SIZEOF(struct hinic_vport_stats, _stat_item), \ 453 + .size = sizeof_field(struct hinic_vport_stats, _stat_item), \ 454 454 .offset = offsetof(struct hinic_vport_stats, _stat_item) \ 455 455 } 456 456 ··· 477 477 478 478 #define HINIC_PORT_STAT(_stat_item) { \ 479 479 .name = #_stat_item, \ 480 - .size = FIELD_SIZEOF(struct hinic_phy_port_stats, _stat_item), \ 480 + .size = sizeof_field(struct hinic_phy_port_stats, _stat_item), \ 481 481 .offset = offsetof(struct hinic_phy_port_stats, _stat_item) \ 482 482 } 483 483 ··· 571 571 572 572 #define HINIC_TXQ_STAT(_stat_item) { \ 573 573 .name = "txq%d_"#_stat_item, \ 574 - .size = FIELD_SIZEOF(struct hinic_txq_stats, _stat_item), \ 574 + .size = sizeof_field(struct hinic_txq_stats, _stat_item), \ 575 575 .offset = offsetof(struct hinic_txq_stats, _stat_item) \ 576 576 } 577 577 ··· 586 586 587 587 #define HINIC_RXQ_STAT(_stat_item) { \ 588 588 .name = "rxq%d_"#_stat_item, \ 589 - .size = FIELD_SIZEOF(struct hinic_rxq_stats, _stat_item), \ 589 + .size = sizeof_field(struct hinic_rxq_stats, _stat_item), \ 590 590 .offset = offsetof(struct hinic_rxq_stats, _stat_item) \ 591 591 } 592 592
+1 -1
drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c
··· 18 18 19 19 #define FM10K_STAT_FIELDS(_type, _name, _stat) { \ 20 20 .stat_string = _name, \ 21 - .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ 21 + .sizeof_stat = sizeof_field(_type, _stat), \ 22 22 .stat_offset = offsetof(_type, _stat) \ 23 23 } 24 24
+1 -1
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
··· 43 43 */ 44 44 #define I40E_STAT(_type, _name, _stat) { \ 45 45 .stat_string = _name, \ 46 - .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ 46 + .sizeof_stat = sizeof_field(_type, _stat), \ 47 47 .stat_offset = offsetof(_type, _stat) \ 48 48 } 49 49
+1 -1
drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
··· 659 659 660 660 #define I40E_HMC_STORE(_struct, _ele) \ 661 661 offsetof(struct _struct, _ele), \ 662 - FIELD_SIZEOF(struct _struct, _ele) 662 + sizeof_field(struct _struct, _ele) 663 663 664 664 struct i40e_context_ele { 665 665 u16 offset;
+1 -1
drivers/net/ethernet/intel/iavf/iavf_ethtool.c
··· 42 42 */ 43 43 #define IAVF_STAT(_type, _name, _stat) { \ 44 44 .stat_string = _name, \ 45 - .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ 45 + .sizeof_stat = sizeof_field(_type, _stat), \ 46 46 .stat_offset = offsetof(_type, _stat) \ 47 47 } 48 48
+5 -5
drivers/net/ethernet/intel/ice/ice_ethtool.c
··· 15 15 16 16 #define ICE_STAT(_type, _name, _stat) { \ 17 17 .stat_string = _name, \ 18 - .sizeof_stat = FIELD_SIZEOF(_type, _stat), \ 18 + .sizeof_stat = sizeof_field(_type, _stat), \ 19 19 .stat_offset = offsetof(_type, _stat) \ 20 20 } 21 21 ··· 36 36 #define ICE_VSI_STATS_LEN ARRAY_SIZE(ice_gstrings_vsi_stats) 37 37 38 38 #define ICE_PFC_STATS_LEN ( \ 39 - (FIELD_SIZEOF(struct ice_pf, stats.priority_xoff_rx) + \ 40 - FIELD_SIZEOF(struct ice_pf, stats.priority_xon_rx) + \ 41 - FIELD_SIZEOF(struct ice_pf, stats.priority_xoff_tx) + \ 42 - FIELD_SIZEOF(struct ice_pf, stats.priority_xon_tx)) \ 39 + (sizeof_field(struct ice_pf, stats.priority_xoff_rx) + \ 40 + sizeof_field(struct ice_pf, stats.priority_xon_rx) + \ 41 + sizeof_field(struct ice_pf, stats.priority_xoff_tx) + \ 42 + sizeof_field(struct ice_pf, stats.priority_xon_tx)) \ 43 43 / sizeof(u64)) 44 44 #define ICE_ALL_STATS_LEN(n) (ICE_PF_STATS_LEN + ICE_PFC_STATS_LEN + \ 45 45 ICE_VSI_STATS_LEN + ice_q_stats_len(n))
+1 -1
drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
··· 302 302 303 303 #define ICE_CTX_STORE(_struct, _ele, _width, _lsb) { \ 304 304 .offset = offsetof(struct _struct, _ele), \ 305 - .size_of = FIELD_SIZEOF(struct _struct, _ele), \ 305 + .size_of = sizeof_field(struct _struct, _ele), \ 306 306 .width = _width, \ 307 307 .lsb = _lsb, \ 308 308 }
+2 -2
drivers/net/ethernet/intel/igb/igb_ethtool.c
··· 26 26 27 27 #define IGB_STAT(_name, _stat) { \ 28 28 .stat_string = _name, \ 29 - .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \ 29 + .sizeof_stat = sizeof_field(struct igb_adapter, _stat), \ 30 30 .stat_offset = offsetof(struct igb_adapter, _stat) \ 31 31 } 32 32 static const struct igb_stats igb_gstrings_stats[] = { ··· 76 76 77 77 #define IGB_NETDEV_STAT(_net_stat) { \ 78 78 .stat_string = __stringify(_net_stat), \ 79 - .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \ 79 + .sizeof_stat = sizeof_field(struct rtnl_link_stats64, _net_stat), \ 80 80 .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ 81 81 } 82 82 static const struct igb_stats igb_gstrings_net_stats[] = {
+2 -2
drivers/net/ethernet/intel/igc/igc_ethtool.c
··· 16 16 17 17 #define IGC_STAT(_name, _stat) { \ 18 18 .stat_string = _name, \ 19 - .sizeof_stat = FIELD_SIZEOF(struct igc_adapter, _stat), \ 19 + .sizeof_stat = sizeof_field(struct igc_adapter, _stat), \ 20 20 .stat_offset = offsetof(struct igc_adapter, _stat) \ 21 21 } 22 22 ··· 67 67 68 68 #define IGC_NETDEV_STAT(_net_stat) { \ 69 69 .stat_string = __stringify(_net_stat), \ 70 - .sizeof_stat = FIELD_SIZEOF(struct rtnl_link_stats64, _net_stat), \ 70 + .sizeof_stat = sizeof_field(struct rtnl_link_stats64, _net_stat), \ 71 71 .stat_offset = offsetof(struct rtnl_link_stats64, _net_stat) \ 72 72 } 73 73
+2 -2
drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
··· 19 19 }; 20 20 21 21 #define IXGB_STAT(m) IXGB_STATS, \ 22 - FIELD_SIZEOF(struct ixgb_adapter, m), \ 22 + sizeof_field(struct ixgb_adapter, m), \ 23 23 offsetof(struct ixgb_adapter, m) 24 24 #define IXGB_NETDEV_STAT(m) NETDEV_STATS, \ 25 - FIELD_SIZEOF(struct net_device, m), \ 25 + sizeof_field(struct net_device, m), \ 26 26 offsetof(struct net_device, m) 27 27 28 28 static struct ixgb_stats ixgb_gstrings_stats[] = {
+2 -2
drivers/net/ethernet/intel/ixgbevf/ethtool.c
··· 31 31 #define IXGBEVF_STAT(_name, _stat) { \ 32 32 .stat_string = _name, \ 33 33 .type = IXGBEVF_STATS, \ 34 - .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, _stat), \ 34 + .sizeof_stat = sizeof_field(struct ixgbevf_adapter, _stat), \ 35 35 .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \ 36 36 } 37 37 38 38 #define IXGBEVF_NETDEV_STAT(_net_stat) { \ 39 39 .stat_string = #_net_stat, \ 40 40 .type = NETDEV_STATS, \ 41 - .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ 41 + .sizeof_stat = sizeof_field(struct net_device_stats, _net_stat), \ 42 42 .stat_offset = offsetof(struct net_device_stats, _net_stat) \ 43 43 } 44 44
+2 -2
drivers/net/ethernet/marvell/mv643xx_eth.c
··· 1432 1432 }; 1433 1433 1434 1434 #define SSTAT(m) \ 1435 - { #m, FIELD_SIZEOF(struct net_device_stats, m), \ 1435 + { #m, sizeof_field(struct net_device_stats, m), \ 1436 1436 offsetof(struct net_device, stats.m), -1 } 1437 1437 1438 1438 #define MIBSTAT(m) \ 1439 - { #m, FIELD_SIZEOF(struct mib_counters, m), \ 1439 + { #m, sizeof_field(struct mib_counters, m), \ 1440 1440 -1, offsetof(struct mv643xx_eth_private, mib_counters.m) } 1441 1441 1442 1442 static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
+1 -1
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
··· 611 611 } 612 612 613 613 #define MLX4_LINK_MODES_SZ \ 614 - (FIELD_SIZEOF(struct mlx4_ptys_reg, eth_proto_cap) * 8) 614 + (sizeof_field(struct mlx4_ptys_reg, eth_proto_cap) * 8) 615 615 616 616 enum ethtool_report { 617 617 SUPPORTED = 0,
+3 -3
drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
··· 87 87 * value is not constant during the lifetime 88 88 * of the key object. 89 89 */ 90 - .key_len = FIELD_SIZEOF(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) - 91 - FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), 90 + .key_len = sizeof_field(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) - 91 + sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), 92 92 .key_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hw_sa) + 93 - FIELD_SIZEOF(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), 93 + sizeof_field(struct mlx5_ifc_fpga_ipsec_sa_v1, cmd), 94 94 .head_offset = offsetof(struct mlx5_fpga_ipsec_sa_ctx, hash), 95 95 .automatic_shrinking = true, 96 96 .min_size = 1,
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 209 209 }; 210 210 211 211 static const struct rhashtable_params rhash_fte = { 212 - .key_len = FIELD_SIZEOF(struct fs_fte, val), 212 + .key_len = sizeof_field(struct fs_fte, val), 213 213 .key_offset = offsetof(struct fs_fte, val), 214 214 .head_offset = offsetof(struct fs_fte, hash), 215 215 .automatic_shrinking = true, ··· 217 217 }; 218 218 219 219 static const struct rhashtable_params rhash_fg = { 220 - .key_len = FIELD_SIZEOF(struct mlx5_flow_group, mask), 220 + .key_len = sizeof_field(struct mlx5_flow_group, mask), 221 221 .key_offset = offsetof(struct mlx5_flow_group, mask), 222 222 .head_offset = offsetof(struct mlx5_flow_group, hash), 223 223 .automatic_shrinking = true,
+5 -5
drivers/net/ethernet/netronome/nfp/bpf/jit.c
··· 2652 2652 2653 2653 switch (meta->insn.off) { 2654 2654 case offsetof(struct __sk_buff, len): 2655 - if (size != FIELD_SIZEOF(struct __sk_buff, len)) 2655 + if (size != sizeof_field(struct __sk_buff, len)) 2656 2656 return -EOPNOTSUPP; 2657 2657 wrp_mov(nfp_prog, dst, plen_reg(nfp_prog)); 2658 2658 break; 2659 2659 case offsetof(struct __sk_buff, data): 2660 - if (size != FIELD_SIZEOF(struct __sk_buff, data)) 2660 + if (size != sizeof_field(struct __sk_buff, data)) 2661 2661 return -EOPNOTSUPP; 2662 2662 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 2663 2663 break; 2664 2664 case offsetof(struct __sk_buff, data_end): 2665 - if (size != FIELD_SIZEOF(struct __sk_buff, data_end)) 2665 + if (size != sizeof_field(struct __sk_buff, data_end)) 2666 2666 return -EOPNOTSUPP; 2667 2667 emit_alu(nfp_prog, dst, 2668 2668 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog)); ··· 2683 2683 2684 2684 switch (meta->insn.off) { 2685 2685 case offsetof(struct xdp_md, data): 2686 - if (size != FIELD_SIZEOF(struct xdp_md, data)) 2686 + if (size != sizeof_field(struct xdp_md, data)) 2687 2687 return -EOPNOTSUPP; 2688 2688 wrp_mov(nfp_prog, dst, pptr_reg(nfp_prog)); 2689 2689 break; 2690 2690 case offsetof(struct xdp_md, data_end): 2691 - if (size != FIELD_SIZEOF(struct xdp_md, data_end)) 2691 + if (size != sizeof_field(struct xdp_md, data_end)) 2692 2692 return -EOPNOTSUPP; 2693 2693 emit_alu(nfp_prog, dst, 2694 2694 plen_reg(nfp_prog), ALU_OP_ADD, pptr_reg(nfp_prog));
+1 -1
drivers/net/ethernet/netronome/nfp/bpf/main.c
··· 15 15 16 16 const struct rhashtable_params nfp_bpf_maps_neutral_params = { 17 17 .nelem_hint = 4, 18 - .key_len = FIELD_SIZEOF(struct bpf_map, id), 18 + .key_len = sizeof_field(struct bpf_map, id), 19 19 .key_offset = offsetof(struct nfp_bpf_neutral_map, map_id), 20 20 .head_offset = offsetof(struct nfp_bpf_neutral_map, l), 21 21 .automatic_shrinking = true,
+1 -1
drivers/net/ethernet/netronome/nfp/bpf/offload.c
··· 374 374 } 375 375 376 376 use_map_size = DIV_ROUND_UP(offmap->map.value_size, 4) * 377 - FIELD_SIZEOF(struct nfp_bpf_map, use_map[0]); 377 + sizeof_field(struct nfp_bpf_map, use_map[0]); 378 378 379 379 nfp_map = kzalloc(sizeof(*nfp_map) + use_map_size, GFP_USER); 380 380 if (!nfp_map)
+1 -1
drivers/net/ethernet/netronome/nfp/flower/main.h
··· 24 24 #define NFP_FL_STAT_ID_MU_NUM GENMASK(31, 22) 25 25 #define NFP_FL_STAT_ID_STAT GENMASK(21, 0) 26 26 27 - #define NFP_FL_STATS_ELEM_RS FIELD_SIZEOF(struct nfp_fl_stats_id, \ 27 + #define NFP_FL_STATS_ELEM_RS sizeof_field(struct nfp_fl_stats_id, \ 28 28 init_unalloc) 29 29 #define NFP_FLOWER_MASK_ENTRY_RS 256 30 30 #define NFP_FLOWER_MASK_ELEMENT_RS 1
+1 -1
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
··· 20 20 #define PCH_GBE_STAT(m) \ 21 21 { \ 22 22 .string = #m, \ 23 - .size = FIELD_SIZEOF(struct pch_gbe_hw_stats, m), \ 23 + .size = sizeof_field(struct pch_gbe_hw_stats, m), \ 24 24 .offset = offsetof(struct pch_gbe_hw_stats, m), \ 25 25 } 26 26
+1 -1
drivers/net/ethernet/qlogic/qede/qede.h
··· 464 464 struct qede_tx_queue *txq; 465 465 struct qede_tx_queue *xdp_tx; 466 466 467 - #define VEC_NAME_SIZE (FIELD_SIZEOF(struct net_device, name) + 8) 467 + #define VEC_NAME_SIZE (sizeof_field(struct net_device, name) + 8) 468 468 char name[VEC_NAME_SIZE]; 469 469 }; 470 470
+1 -1
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
··· 20 20 int stat_offset; 21 21 }; 22 22 23 - #define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m) 23 + #define QLC_SIZEOF(m) sizeof_field(struct qlcnic_adapter, m) 24 24 #define QLC_OFF(m) offsetof(struct qlcnic_adapter, m) 25 25 static const u32 qlcnic_fw_dump_level[] = { 26 26 0x3, 0x7, 0xf, 0x1f, 0x3f, 0x7f, 0xff
+1 -1
drivers/net/ethernet/realtek/r8169_firmware.c
··· 37 37 u8 chksum; 38 38 } __packed; 39 39 40 - #define FW_OPCODE_SIZE FIELD_SIZEOF(struct rtl_fw_phy_action, code[0]) 40 + #define FW_OPCODE_SIZE sizeof_field(struct rtl_fw_phy_action, code[0]) 41 41 42 42 static bool rtl_fw_format_ok(struct rtl_fw *rtl_fw) 43 43 {
+1 -1
drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
··· 30 30 #define SXGBE_STAT(m) \ 31 31 { \ 32 32 #m, \ 33 - FIELD_SIZEOF(struct sxgbe_extra_stats, m), \ 33 + sizeof_field(struct sxgbe_extra_stats, m), \ 34 34 offsetof(struct sxgbe_priv_data, xstats.m) \ 35 35 } 36 36
+2 -2
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
··· 34 34 }; 35 35 36 36 #define STMMAC_STAT(m) \ 37 - { #m, FIELD_SIZEOF(struct stmmac_extra_stats, m), \ 37 + { #m, sizeof_field(struct stmmac_extra_stats, m), \ 38 38 offsetof(struct stmmac_priv, xstats.m)} 39 39 40 40 static const struct stmmac_stats stmmac_gstrings_stats[] = { ··· 163 163 164 164 /* HW MAC Management counters (if supported) */ 165 165 #define STMMAC_MMC_STAT(m) \ 166 - { #m, FIELD_SIZEOF(struct stmmac_counters, m), \ 166 + { #m, sizeof_field(struct stmmac_counters, m), \ 167 167 offsetof(struct stmmac_priv, mmc.m)} 168 168 169 169 static const struct stmmac_stats stmmac_mmc[] = {
+3 -3
drivers/net/ethernet/ti/cpsw_ethtool.c
··· 73 73 }; 74 74 75 75 #define CPSW_STAT(m) CPSW_STATS, \ 76 - FIELD_SIZEOF(struct cpsw_hw_stats, m), \ 76 + sizeof_field(struct cpsw_hw_stats, m), \ 77 77 offsetof(struct cpsw_hw_stats, m) 78 78 #define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \ 79 - FIELD_SIZEOF(struct cpdma_chan_stats, m), \ 79 + sizeof_field(struct cpdma_chan_stats, m), \ 80 80 offsetof(struct cpdma_chan_stats, m) 81 81 #define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \ 82 - FIELD_SIZEOF(struct cpdma_chan_stats, m), \ 82 + sizeof_field(struct cpdma_chan_stats, m), \ 83 83 offsetof(struct cpdma_chan_stats, m) 84 84 85 85 static const struct cpsw_stats cpsw_gstrings_stats[] = {
+16 -16
drivers/net/ethernet/ti/netcp_ethss.c
··· 783 783 #define GBE_STATSA_INFO(field) \ 784 784 { \ 785 785 "GBE_A:"#field, GBE_STATSA_MODULE, \ 786 - FIELD_SIZEOF(struct gbe_hw_stats, field), \ 786 + sizeof_field(struct gbe_hw_stats, field), \ 787 787 offsetof(struct gbe_hw_stats, field) \ 788 788 } 789 789 790 790 #define GBE_STATSB_INFO(field) \ 791 791 { \ 792 792 "GBE_B:"#field, GBE_STATSB_MODULE, \ 793 - FIELD_SIZEOF(struct gbe_hw_stats, field), \ 793 + sizeof_field(struct gbe_hw_stats, field), \ 794 794 offsetof(struct gbe_hw_stats, field) \ 795 795 } 796 796 797 797 #define GBE_STATSC_INFO(field) \ 798 798 { \ 799 799 "GBE_C:"#field, GBE_STATSC_MODULE, \ 800 - FIELD_SIZEOF(struct gbe_hw_stats, field), \ 800 + sizeof_field(struct gbe_hw_stats, field), \ 801 801 offsetof(struct gbe_hw_stats, field) \ 802 802 } 803 803 804 804 #define GBE_STATSD_INFO(field) \ 805 805 { \ 806 806 "GBE_D:"#field, GBE_STATSD_MODULE, \ 807 - FIELD_SIZEOF(struct gbe_hw_stats, field), \ 807 + sizeof_field(struct gbe_hw_stats, field), \ 808 808 offsetof(struct gbe_hw_stats, field) \ 809 809 } 810 810 ··· 957 957 #define GBENU_STATS_HOST(field) \ 958 958 { \ 959 959 "GBE_HOST:"#field, GBENU_STATS0_MODULE, \ 960 - FIELD_SIZEOF(struct gbenu_hw_stats, field), \ 960 + sizeof_field(struct gbenu_hw_stats, field), \ 961 961 offsetof(struct gbenu_hw_stats, field) \ 962 962 } 963 963 ··· 967 967 #define GBENU_STATS_P1(field) \ 968 968 { \ 969 969 "GBE_P1:"#field, GBENU_STATS1_MODULE, \ 970 - FIELD_SIZEOF(struct gbenu_hw_stats, field), \ 970 + sizeof_field(struct gbenu_hw_stats, field), \ 971 971 offsetof(struct gbenu_hw_stats, field) \ 972 972 } 973 973 974 974 #define GBENU_STATS_P2(field) \ 975 975 { \ 976 976 "GBE_P2:"#field, GBENU_STATS2_MODULE, \ 977 - FIELD_SIZEOF(struct gbenu_hw_stats, field), \ 977 + sizeof_field(struct gbenu_hw_stats, field), \ 978 978 offsetof(struct gbenu_hw_stats, field) \ 979 979 } 980 980 981 981 #define GBENU_STATS_P3(field) \ 982 982 { \ 983 983 "GBE_P3:"#field, GBENU_STATS3_MODULE, \ 984 - FIELD_SIZEOF(struct gbenu_hw_stats, field), \ 984 + sizeof_field(struct gbenu_hw_stats, field), \ 985 985 offsetof(struct gbenu_hw_stats, field) \ 986 986 } 987 987 988 988 #define GBENU_STATS_P4(field) \ 989 989 { \ 990 990 "GBE_P4:"#field, GBENU_STATS4_MODULE, \ 991 - FIELD_SIZEOF(struct gbenu_hw_stats, field), \ 991 + sizeof_field(struct gbenu_hw_stats, field), \ 992 992 offsetof(struct gbenu_hw_stats, field) \ 993 993 } 994 994 995 995 #define GBENU_STATS_P5(field) \ 996 996 { \ 997 997 "GBE_P5:"#field, GBENU_STATS5_MODULE, \ 998 - FIELD_SIZEOF(struct gbenu_hw_stats, field), \ 998 + sizeof_field(struct gbenu_hw_stats, field), \ 999 999 offsetof(struct gbenu_hw_stats, field) \ 1000 1000 } 1001 1001 1002 1002 #define GBENU_STATS_P6(field) \ 1003 1003 { \ 1004 1004 "GBE_P6:"#field, GBENU_STATS6_MODULE, \ 1005 - FIELD_SIZEOF(struct gbenu_hw_stats, field), \ 1005 + sizeof_field(struct gbenu_hw_stats, field), \ 1006 1006 offsetof(struct gbenu_hw_stats, field) \ 1007 1007 } 1008 1008 1009 1009 #define GBENU_STATS_P7(field) \ 1010 1010 { \ 1011 1011 "GBE_P7:"#field, GBENU_STATS7_MODULE, \ 1012 - FIELD_SIZEOF(struct gbenu_hw_stats, field), \ 1012 + sizeof_field(struct gbenu_hw_stats, field), \ 1013 1013 offsetof(struct gbenu_hw_stats, field) \ 1014 1014 } 1015 1015 1016 1016 #define GBENU_STATS_P8(field) \ 1017 1017 { \ 1018 1018 "GBE_P8:"#field, GBENU_STATS8_MODULE, \ 1019 - FIELD_SIZEOF(struct gbenu_hw_stats, field), \ 1019 + sizeof_field(struct gbenu_hw_stats, field), \ 1020 1020 offsetof(struct gbenu_hw_stats, field) \ 1021 1021 } 1022 1022 ··· 1607 1607 #define XGBE_STATS0_INFO(field) \ 1608 1608 { \ 1609 1609 "GBE_0:"#field, XGBE_STATS0_MODULE, \ 1610 - FIELD_SIZEOF(struct xgbe_hw_stats, field), \ 1610 + sizeof_field(struct xgbe_hw_stats, field), \ 1611 1611 offsetof(struct xgbe_hw_stats, field) \ 1612 1612 } 1613 1613 1614 1614 #define XGBE_STATS1_INFO(field) \ 1615 1615 { \ 1616 1616 "GBE_1:"#field, XGBE_STATS1_MODULE, \ 1617 - FIELD_SIZEOF(struct xgbe_hw_stats, field), \ 1617 + sizeof_field(struct xgbe_hw_stats, field), \ 1618 1618 offsetof(struct xgbe_hw_stats, field) \ 1619 1619 } 1620 1620 1621 1621 #define XGBE_STATS2_INFO(field) \ 1622 1622 { \ 1623 1623 "GBE_2:"#field, XGBE_STATS2_MODULE, \ 1624 - FIELD_SIZEOF(struct xgbe_hw_stats, field), \ 1624 + sizeof_field(struct xgbe_hw_stats, field), \ 1625 1625 offsetof(struct xgbe_hw_stats, field) \ 1626 1626 } 1627 1627
+1 -1
drivers/net/fjes/fjes_ethtool.c
··· 21 21 22 22 #define FJES_STAT(name, stat) { \ 23 23 .stat_string = name, \ 24 - .sizeof_stat = FIELD_SIZEOF(struct fjes_adapter, stat), \ 24 + .sizeof_stat = sizeof_field(struct fjes_adapter, stat), \ 25 25 .stat_offset = offsetof(struct fjes_adapter, stat) \ 26 26 } 27 27
+1 -1
drivers/net/geneve.c
··· 1156 1156 1157 1157 static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = { 1158 1158 [IFLA_GENEVE_ID] = { .type = NLA_U32 }, 1159 - [IFLA_GENEVE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, 1159 + [IFLA_GENEVE_REMOTE] = { .len = sizeof_field(struct iphdr, daddr) }, 1160 1160 [IFLA_GENEVE_REMOTE6] = { .len = sizeof(struct in6_addr) }, 1161 1161 [IFLA_GENEVE_TTL] = { .type = NLA_U8 }, 1162 1162 [IFLA_GENEVE_TOS] = { .type = NLA_U8 },
+1 -1
drivers/net/hyperv/netvsc_drv.c
··· 571 571 572 572 /* Use the skb control buffer for building up the packet */ 573 573 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) > 574 - FIELD_SIZEOF(struct sk_buff, cb)); 574 + sizeof_field(struct sk_buff, cb)); 575 575 packet = (struct hv_netvsc_packet *)skb->cb; 576 576 577 577 packet->q_idx = skb_get_queue_mapping(skb);
+1 -1
drivers/net/usb/sierra_net.c
··· 865 865 u16 len; 866 866 bool need_tail; 867 867 868 - BUILD_BUG_ON(FIELD_SIZEOF(struct usbnet, data) 868 + BUILD_BUG_ON(sizeof_field(struct usbnet, data) 869 869 < sizeof(struct cdc_state)); 870 870 871 871 dev_dbg(&dev->udev->dev, "%s", __func__);
+1 -1
drivers/net/usb/usbnet.c
··· 2184 2184 { 2185 2185 /* Compiler should optimize this out. */ 2186 2186 BUILD_BUG_ON( 2187 - FIELD_SIZEOF(struct sk_buff, cb) < sizeof(struct skb_data)); 2187 + sizeof_field(struct sk_buff, cb) < sizeof(struct skb_data)); 2188 2188 2189 2189 eth_random_addr(node_id); 2190 2190 return 0;
+2 -2
drivers/net/vxlan.c
··· 3069 3069 3070 3070 static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { 3071 3071 [IFLA_VXLAN_ID] = { .type = NLA_U32 }, 3072 - [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, 3072 + [IFLA_VXLAN_GROUP] = { .len = sizeof_field(struct iphdr, daddr) }, 3073 3073 [IFLA_VXLAN_GROUP6] = { .len = sizeof(struct in6_addr) }, 3074 3074 [IFLA_VXLAN_LINK] = { .type = NLA_U32 }, 3075 - [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) }, 3075 + [IFLA_VXLAN_LOCAL] = { .len = sizeof_field(struct iphdr, saddr) }, 3076 3076 [IFLA_VXLAN_LOCAL6] = { .len = sizeof(struct in6_addr) }, 3077 3077 [IFLA_VXLAN_TOS] = { .type = NLA_U8 }, 3078 3078 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
+1 -1
drivers/net/wireless/marvell/libertas/debugfs.c
··· 774 774 775 775 #ifdef PROC_DEBUG 776 776 777 - #define item_size(n) (FIELD_SIZEOF(struct lbs_private, n)) 777 + #define item_size(n) (sizeof_field(struct lbs_private, n)) 778 778 #define item_addr(n) (offsetof(struct lbs_private, n)) 779 779 780 780
+2 -2
drivers/net/wireless/marvell/mwifiex/util.h
··· 36 36 }; 37 37 38 38 /* size/addr for mwifiex_debug_info */ 39 - #define item_size(n) (FIELD_SIZEOF(struct mwifiex_debug_info, n)) 39 + #define item_size(n) (sizeof_field(struct mwifiex_debug_info, n)) 40 40 #define item_addr(n) (offsetof(struct mwifiex_debug_info, n)) 41 41 42 42 /* size/addr for struct mwifiex_adapter */ 43 - #define adapter_item_size(n) (FIELD_SIZEOF(struct mwifiex_adapter, n)) 43 + #define adapter_item_size(n) (sizeof_field(struct mwifiex_adapter, n)) 44 44 #define adapter_item_addr(n) (offsetof(struct mwifiex_adapter, n)) 45 45 46 46 struct mwifiex_debug_data {
+1 -1
drivers/s390/net/qeth_core_main.c
··· 4779 4779 4780 4780 QETH_CARD_TEXT(card, 2, "qdioest"); 4781 4781 4782 - qib_param_field = kzalloc(FIELD_SIZEOF(struct qib, parm), GFP_KERNEL); 4782 + qib_param_field = kzalloc(sizeof_field(struct qib, parm), GFP_KERNEL); 4783 4783 if (!qib_param_field) { 4784 4784 rc = -ENOMEM; 4785 4785 goto out_free_nothing;
+5 -5
drivers/s390/net/qeth_core_mpc.h
··· 421 421 } data; 422 422 } __attribute__ ((packed)); 423 423 424 - #define SETASS_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_setassparms,\ 424 + #define SETASS_DATA_SIZEOF(field) sizeof_field(struct qeth_ipacmd_setassparms,\ 425 425 data.field) 426 426 427 427 /* SETRTG IPA Command: ****************************************************/ ··· 535 535 } data; 536 536 } __attribute__ ((packed)); 537 537 538 - #define SETADP_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_setadpparms,\ 538 + #define SETADP_DATA_SIZEOF(field) sizeof_field(struct qeth_ipacmd_setadpparms,\ 539 539 data.field) 540 540 541 541 /* CREATE_ADDR IPA Command: ***********************************************/ ··· 648 648 } data; 649 649 }; 650 650 651 - #define VNICC_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_vnicc,\ 651 + #define VNICC_DATA_SIZEOF(field) sizeof_field(struct qeth_ipacmd_vnicc,\ 652 652 data.field) 653 653 654 654 /* SETBRIDGEPORT IPA Command: *********************************************/ ··· 729 729 } data; 730 730 } __packed; 731 731 732 - #define SBP_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipacmd_setbridgeport,\ 732 + #define SBP_DATA_SIZEOF(field) sizeof_field(struct qeth_ipacmd_setbridgeport,\ 733 733 data.field) 734 734 735 735 /* ADDRESS_CHANGE_NOTIFICATION adapter-initiated "command" *******************/ ··· 790 790 } data; 791 791 } __attribute__ ((packed)); 792 792 793 - #define IPA_DATA_SIZEOF(field) FIELD_SIZEOF(struct qeth_ipa_cmd, data.field) 793 + #define IPA_DATA_SIZEOF(field) sizeof_field(struct qeth_ipa_cmd, data.field) 794 794 795 795 /* 796 796 * special command for ARP processing.
+2 -2
drivers/scsi/aacraid/aachba.c
··· 535 535 if ((le32_to_cpu(get_name_reply->status) == CT_OK) 536 536 && (get_name_reply->data[0] != '\0')) { 537 537 char *sp = get_name_reply->data; 538 - int data_size = FIELD_SIZEOF(struct aac_get_name_resp, data); 538 + int data_size = sizeof_field(struct aac_get_name_resp, data); 539 539 540 540 sp[data_size - 1] = '\0'; 541 541 while (*sp == ' ') ··· 574 574 575 575 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 576 576 577 - data_size = FIELD_SIZEOF(struct aac_get_name_resp, data); 577 + data_size = sizeof_field(struct aac_get_name_resp, data); 578 578 579 579 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 580 580
+1 -1
drivers/scsi/be2iscsi/be_cmds.h
··· 1300 1300 1301 1301 /* Returns the number of items in the field array. */ 1302 1302 #define BE_NUMBER_OF_FIELD(_type_, _field_) \ 1303 - (FIELD_SIZEOF(_type_, _field_)/sizeof((((_type_ *)0)->_field_[0])))\ 1303 + (sizeof_field(_type_, _field_)/sizeof((((_type_ *)0)->_field_[0])))\ 1304 1304 1305 1305 /** 1306 1306 * Different types of iSCSI completions to host driver for both initiator
+1 -1
drivers/scsi/cxgbi/libcxgbi.c
··· 2746 2746 { 2747 2747 pr_info("%s", version); 2748 2748 2749 - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) < 2749 + BUILD_BUG_ON(sizeof_field(struct sk_buff, cb) < 2750 2750 sizeof(struct cxgbi_skb_cb)); 2751 2751 return 0; 2752 2752 }
+3 -3
drivers/scsi/smartpqi/smartpqi_init.c
··· 8689 8689 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, 8690 8690 data.delete_operational_queue.queue_id) != 12); 8691 8691 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64); 8692 - BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, 8692 + BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 8693 8693 data.create_operational_iq) != 64 - 11); 8694 - BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, 8694 + BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 8695 8695 data.create_operational_oq) != 64 - 11); 8696 - BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, 8696 + BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, 8697 8697 data.delete_operational_queue) != 64 - 11); 8698 8698 8699 8699 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
+1 -1
drivers/staging/qlge/qlge_ethtool.c
··· 41 41 int stat_offset; 42 42 }; 43 43 44 - #define QL_SIZEOF(m) FIELD_SIZEOF(struct ql_adapter, m) 44 + #define QL_SIZEOF(m) sizeof_field(struct ql_adapter, m) 45 45 #define QL_OFF(m) offsetof(struct ql_adapter, m) 46 46 47 47 static const struct ql_stats ql_gstrings_stats[] = {
+1 -1
drivers/staging/wfx/data_tx.c
··· 679 679 struct ieee80211_sta *sta = control ? control->sta : NULL; 680 680 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 681 681 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 682 - size_t driver_data_room = FIELD_SIZEOF(struct ieee80211_tx_info, 682 + size_t driver_data_room = sizeof_field(struct ieee80211_tx_info, 683 683 rate_driver_data); 684 684 685 685 compiletime_assert(sizeof(struct wfx_tx_priv) <= driver_data_room,
+1 -1
drivers/target/iscsi/cxgbit/cxgbit_main.c
··· 708 708 pr_info("%s dcb enabled.\n", DRV_NAME); 709 709 register_dcbevent_notifier(&cxgbit_dcbevent_nb); 710 710 #endif 711 - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) < 711 + BUILD_BUG_ON(sizeof_field(struct sk_buff, cb) < 712 712 sizeof(union cxgbit_skb_cb)); 713 713 return 0; 714 714 }
+1 -1
drivers/usb/atm/usbatm.c
··· 1275 1275 1276 1276 static int __init usbatm_usb_init(void) 1277 1277 { 1278 - if (sizeof(struct usbatm_control) > FIELD_SIZEOF(struct sk_buff, cb)) { 1278 + if (sizeof(struct usbatm_control) > sizeof_field(struct sk_buff, cb)) { 1279 1279 printk(KERN_ERR "%s unusable with this kernel!\n", usbatm_driver_name); 1280 1280 return -EIO; 1281 1281 }
+1 -1
drivers/usb/gadget/function/f_fs.c
··· 3509 3509 3510 3510 static int ffs_set_inst_name(struct usb_function_instance *fi, const char *name) 3511 3511 { 3512 - if (strlen(name) >= FIELD_SIZEOF(struct ffs_dev, name)) 3512 + if (strlen(name) >= sizeof_field(struct ffs_dev, name)) 3513 3513 return -ENAMETOOLONG; 3514 3514 return ffs_name_dev(to_f_fs_opts(fi)->dev, name); 3515 3515 }
+1 -1
fs/crypto/keyring.c
··· 151 151 } 152 152 153 153 #define FSCRYPT_FS_KEYRING_DESCRIPTION_SIZE \ 154 - (CONST_STRLEN("fscrypt-") + FIELD_SIZEOF(struct super_block, s_id)) 154 + (CONST_STRLEN("fscrypt-") + sizeof_field(struct super_block, s_id)) 155 155 156 156 #define FSCRYPT_MK_DESCRIPTION_SIZE (2 * FSCRYPT_KEY_IDENTIFIER_SIZE + 1) 157 157
+1 -1
fs/verity/enable.c
··· 315 315 if (arg.block_size != PAGE_SIZE) 316 316 return -EINVAL; 317 317 318 - if (arg.salt_size > FIELD_SIZEOF(struct fsverity_descriptor, salt)) 318 + if (arg.salt_size > sizeof_field(struct fsverity_descriptor, salt)) 319 319 return -EMSGSIZE; 320 320 321 321 if (arg.sig_size > FS_VERITY_MAX_SIGNATURE_SIZE)
+6 -6
include/linux/filter.h
··· 420 420 421 421 #define BPF_FIELD_SIZEOF(type, field) \ 422 422 ({ \ 423 - const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \ 423 + const int __size = bytes_to_bpf_size(sizeof_field(type, field)); \ 424 424 BUILD_BUG_ON(__size < 0); \ 425 425 __size; \ 426 426 }) ··· 497 497 498 498 #define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \ 499 499 ({ \ 500 - BUILD_BUG_ON(FIELD_SIZEOF(TYPE, MEMBER) != (SIZE)); \ 500 + BUILD_BUG_ON(sizeof_field(TYPE, MEMBER) != (SIZE)); \ 501 501 *(PTR_SIZE) = (SIZE); \ 502 502 offsetof(TYPE, MEMBER); \ 503 503 }) ··· 608 608 { 609 609 struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb; 610 610 611 - BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb)); 611 + BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb)); 612 612 cb->data_meta = skb->data - skb_metadata_len(skb); 613 613 cb->data_end = skb->data + skb_headlen(skb); 614 614 } ··· 646 646 * attached to sockets, we need to clear the bpf_skb_cb() area 647 647 * to not leak previous contents to user space. 648 648 */ 649 - BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN); 650 - BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != 651 - FIELD_SIZEOF(struct qdisc_skb_cb, data)); 649 + BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) != BPF_SKB_CB_LEN); 650 + BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) != 651 + sizeof_field(struct qdisc_skb_cb, data)); 652 652 653 653 return qdisc_skb_cb(skb)->data; 654 654 }
+1 -1
include/linux/kvm_host.h
··· 149 149 #define KVM_REQUEST_ARCH_BASE 8 150 150 151 151 #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ 152 - BUILD_BUG_ON((unsigned)(nr) >= (FIELD_SIZEOF(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \ 152 + BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \ 153 153 (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \ 154 154 }) 155 155 #define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0)
+1 -1
include/linux/phy_led_triggers.h
··· 14 14 #define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 11 15 15 16 16 #define PHY_LINK_LED_TRIGGER_NAME_SIZE (MII_BUS_ID_SIZE + \ 17 - FIELD_SIZEOF(struct mdio_device, addr)+\ 17 + sizeof_field(struct mdio_device, addr)+\ 18 18 PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE) 19 19 20 20 struct phy_led_trigger {
+1 -1
include/net/garp.h
··· 37 37 static inline struct garp_skb_cb *garp_cb(struct sk_buff *skb) 38 38 { 39 39 BUILD_BUG_ON(sizeof(struct garp_skb_cb) > 40 - FIELD_SIZEOF(struct sk_buff, cb)); 40 + sizeof_field(struct sk_buff, cb)); 41 41 return (struct garp_skb_cb *)skb->cb; 42 42 } 43 43
+3 -3
include/net/ip_tunnels.h
··· 33 33 /* Used to memset ipv4 address padding. */ 34 34 #define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst) 35 35 #define IP_TUNNEL_KEY_IPV4_PAD_LEN \ 36 - (FIELD_SIZEOF(struct ip_tunnel_key, u) - \ 37 - FIELD_SIZEOF(struct ip_tunnel_key, u.ipv4)) 36 + (sizeof_field(struct ip_tunnel_key, u) - \ 37 + sizeof_field(struct ip_tunnel_key, u.ipv4)) 38 38 39 39 struct ip_tunnel_key { 40 40 __be64 tun_id; ··· 63 63 64 64 /* Maximum tunnel options length. */ 65 65 #define IP_TUNNEL_OPTS_MAX \ 66 - GENMASK((FIELD_SIZEOF(struct ip_tunnel_info, \ 66 + GENMASK((sizeof_field(struct ip_tunnel_info, \ 67 67 options_len) * BITS_PER_BYTE) - 1, 0) 68 68 69 69 struct ip_tunnel_info {
+1 -1
include/net/mrp.h
··· 39 39 static inline struct mrp_skb_cb *mrp_cb(struct sk_buff *skb) 40 40 { 41 41 BUILD_BUG_ON(sizeof(struct mrp_skb_cb) > 42 - FIELD_SIZEOF(struct sk_buff, cb)); 42 + sizeof_field(struct sk_buff, cb)); 43 43 return (struct mrp_skb_cb *)skb->cb; 44 44 } 45 45
+1 -1
include/net/netfilter/nf_conntrack_helper.h
··· 81 81 }; 82 82 83 83 #define NF_CT_HELPER_BUILD_BUG_ON(structsize) \ 84 - BUILD_BUG_ON((structsize) > FIELD_SIZEOF(struct nf_conn_help, data)) 84 + BUILD_BUG_ON((structsize) > sizeof_field(struct nf_conn_help, data)) 85 85 86 86 struct nf_conntrack_helper *__nf_conntrack_helper_find(const char *name, 87 87 u16 l3num, u8 protonum);
+1 -1
include/net/netfilter/nf_tables_core.h
··· 41 41 */ 42 42 static inline u32 nft_cmp_fast_mask(unsigned int len) 43 43 { 44 - return cpu_to_le32(~0U >> (FIELD_SIZEOF(struct nft_cmp_fast_expr, 44 + return cpu_to_le32(~0U >> (sizeof_field(struct nft_cmp_fast_expr, 45 45 data) * BITS_PER_BYTE - len)); 46 46 } 47 47
+1 -1
include/net/sock.h
··· 2305 2305 * using skb->cb[] would keep using it directly and utilize its 2306 2306 * alignement guarantee. 2307 2307 */ 2308 - #define SOCK_SKB_CB_OFFSET ((FIELD_SIZEOF(struct sk_buff, cb) - \ 2308 + #define SOCK_SKB_CB_OFFSET ((sizeof_field(struct sk_buff, cb) - \ 2309 2309 sizeof(struct sock_skb_cb))) 2310 2310 2311 2311 #define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \
+1 -1
ipc/util.c
··· 100 100 static const struct rhashtable_params ipc_kht_params = { 101 101 .head_offset = offsetof(struct kern_ipc_perm, khtnode), 102 102 .key_offset = offsetof(struct kern_ipc_perm, key), 103 - .key_len = FIELD_SIZEOF(struct kern_ipc_perm, key), 103 + .key_len = sizeof_field(struct kern_ipc_perm, key), 104 104 .automatic_shrinking = true, 105 105 }; 106 106
+1 -1
kernel/bpf/cgroup.c
··· 1341 1341 *insn++ = BPF_LDX_MEM( 1342 1342 BPF_SIZE(si->code), si->dst_reg, si->src_reg, 1343 1343 bpf_target_off(struct bpf_sysctl_kern, write, 1344 - FIELD_SIZEOF(struct bpf_sysctl_kern, 1344 + sizeof_field(struct bpf_sysctl_kern, 1345 1345 write), 1346 1346 target_size)); 1347 1347 break;
+2 -2
kernel/bpf/local_storage.c
··· 357 357 * The first field must be a 64 bit integer at 0 offset. 358 358 */ 359 359 m = (struct btf_member *)(key_type + 1); 360 - size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, cgroup_inode_id); 360 + size = sizeof_field(struct bpf_cgroup_storage_key, cgroup_inode_id); 361 361 if (!btf_member_is_reg_int(btf, key_type, m, 0, size)) 362 362 return -EINVAL; 363 363 ··· 366 366 */ 367 367 m++; 368 368 offset = offsetof(struct bpf_cgroup_storage_key, attach_type); 369 - size = FIELD_SIZEOF(struct bpf_cgroup_storage_key, attach_type); 369 + size = sizeof_field(struct bpf_cgroup_storage_key, attach_type); 370 370 if (!btf_member_is_reg_int(btf, key_type, m, offset, size)) 371 371 return -EINVAL; 372 372
+3 -3
net/802/mrp.c
··· 523 523 struct mrp_attr *attr; 524 524 525 525 if (sizeof(struct mrp_skb_cb) + len > 526 - FIELD_SIZEOF(struct sk_buff, cb)) 526 + sizeof_field(struct sk_buff, cb)) 527 527 return -ENOMEM; 528 528 529 529 spin_lock_bh(&app->lock); ··· 548 548 struct mrp_attr *attr; 549 549 550 550 if (sizeof(struct mrp_skb_cb) + len > 551 - FIELD_SIZEOF(struct sk_buff, cb)) 551 + sizeof_field(struct sk_buff, cb)) 552 552 return; 553 553 554 554 spin_lock_bh(&app->lock); ··· 692 692 * advance to the next event in its Vector. 693 693 */ 694 694 if (sizeof(struct mrp_skb_cb) + mrp_cb(skb)->mh->attrlen > 695 - FIELD_SIZEOF(struct sk_buff, cb)) 695 + sizeof_field(struct sk_buff, cb)) 696 696 return -1; 697 697 if (skb_copy_bits(skb, *offset, mrp_cb(skb)->attrvalue, 698 698 mrp_cb(skb)->mh->attrlen) < 0)
+1 -1
net/batman-adv/main.c
··· 548 548 BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_change) != 12); 549 549 BUILD_BUG_ON(sizeof(struct batadv_tvlv_roam_adv) != 8); 550 550 551 - i = FIELD_SIZEOF(struct sk_buff, cb); 551 + i = sizeof_field(struct sk_buff, cb); 552 552 BUILD_BUG_ON(sizeof(struct batadv_skb_cb) > i); 553 553 554 554 /* broadcast packet */
+4 -4
net/bpf/test_run.c
··· 253 253 /* priority is allowed */ 254 254 255 255 if (!range_is_zero(__skb, offsetof(struct __sk_buff, priority) + 256 - FIELD_SIZEOF(struct __sk_buff, priority), 256 + sizeof_field(struct __sk_buff, priority), 257 257 offsetof(struct __sk_buff, cb))) 258 258 return -EINVAL; 259 259 260 260 /* cb is allowed */ 261 261 262 262 if (!range_is_zero(__skb, offsetof(struct __sk_buff, cb) + 263 - FIELD_SIZEOF(struct __sk_buff, cb), 263 + sizeof_field(struct __sk_buff, cb), 264 264 offsetof(struct __sk_buff, tstamp))) 265 265 return -EINVAL; 266 266 267 267 /* tstamp is allowed */ 268 268 269 269 if (!range_is_zero(__skb, offsetof(struct __sk_buff, tstamp) + 270 - FIELD_SIZEOF(struct __sk_buff, tstamp), 270 + sizeof_field(struct __sk_buff, tstamp), 271 271 sizeof(struct __sk_buff))) 272 272 return -EINVAL; 273 273 ··· 438 438 /* flags is allowed */ 439 439 440 440 if (!range_is_zero(ctx, offsetof(struct bpf_flow_keys, flags) + 441 - FIELD_SIZEOF(struct bpf_flow_keys, flags), 441 + sizeof_field(struct bpf_flow_keys, flags), 442 442 sizeof(struct bpf_flow_keys))) 443 443 return -EINVAL; 444 444
+1 -1
net/bridge/br.c
··· 312 312 { 313 313 int err; 314 314 315 - BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb)); 315 + BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > sizeof_field(struct sk_buff, cb)); 316 316 317 317 err = stp_proto_register(&br_stp_proto); 318 318 if (err < 0) {
+1 -1
net/core/dev.c
··· 10165 10165 static int __net_init netdev_init(struct net *net) 10166 10166 { 10167 10167 BUILD_BUG_ON(GRO_HASH_BUCKETS > 10168 - 8 * FIELD_SIZEOF(struct napi_struct, gro_bitmask)); 10168 + 8 * sizeof_field(struct napi_struct, gro_bitmask)); 10169 10169 10170 10170 if (net != &init_net) 10171 10171 INIT_LIST_HEAD(&net->dev_base_head);
+70 -70
net/core/filter.c
··· 274 274 275 275 switch (skb_field) { 276 276 case SKF_AD_MARK: 277 - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); 277 + BUILD_BUG_ON(sizeof_field(struct sk_buff, mark) != 4); 278 278 279 279 *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, 280 280 offsetof(struct sk_buff, mark)); ··· 289 289 break; 290 290 291 291 case SKF_AD_QUEUE: 292 - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); 292 + BUILD_BUG_ON(sizeof_field(struct sk_buff, queue_mapping) != 2); 293 293 294 294 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, 295 295 offsetof(struct sk_buff, queue_mapping)); 296 296 break; 297 297 298 298 case SKF_AD_VLAN_TAG: 299 - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); 299 + BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_tci) != 2); 300 300 301 301 /* dst_reg = *(u16 *) (src_reg + offsetof(vlan_tci)) */ 302 302 *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, ··· 322 322 323 323 switch (fp->k) { 324 324 case SKF_AD_OFF + SKF_AD_PROTOCOL: 325 - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); 325 + BUILD_BUG_ON(sizeof_field(struct sk_buff, protocol) != 2); 326 326 327 327 /* A = *(u16 *) (CTX + offsetof(protocol)) */ 328 328 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, ··· 338 338 339 339 case SKF_AD_OFF + SKF_AD_IFINDEX: 340 340 case SKF_AD_OFF + SKF_AD_HATYPE: 341 - BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); 342 - BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); 341 + BUILD_BUG_ON(sizeof_field(struct net_device, ifindex) != 4); 342 + BUILD_BUG_ON(sizeof_field(struct net_device, type) != 2); 343 343 344 344 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev), 345 345 BPF_REG_TMP, BPF_REG_CTX, ··· 361 361 break; 362 362 363 363 case SKF_AD_OFF + SKF_AD_RXHASH: 364 - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); 364 + BUILD_BUG_ON(sizeof_field(struct sk_buff, hash) != 4); 365 365 366 366 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, 367 367 offsetof(struct sk_buff, hash)); ··· 385 385 break; 386 386 387 387 case SKF_AD_OFF + SKF_AD_VLAN_TPID: 388 - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2); 388 + BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_proto) != 2); 389 389 390 390 /* A = *(u16 *) (CTX + offsetof(vlan_proto)) */ 391 391 *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, ··· 5589 5589 5590 5590 #define BPF_TCP_SOCK_GET_COMMON(FIELD) \ 5591 5591 do { \ 5592 - BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, FIELD) > \ 5593 - FIELD_SIZEOF(struct bpf_tcp_sock, FIELD)); \ 5592 + BUILD_BUG_ON(sizeof_field(struct tcp_sock, FIELD) > \ 5593 + sizeof_field(struct bpf_tcp_sock, FIELD)); \ 5594 5594 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct tcp_sock, FIELD),\ 5595 5595 si->dst_reg, si->src_reg, \ 5596 5596 offsetof(struct tcp_sock, FIELD)); \ ··· 5598 5598 5599 5599 #define BPF_INET_SOCK_GET_COMMON(FIELD) \ 5600 5600 do { \ 5601 - BUILD_BUG_ON(FIELD_SIZEOF(struct inet_connection_sock, \ 5601 + BUILD_BUG_ON(sizeof_field(struct inet_connection_sock, \ 5602 5602 FIELD) > \ 5603 - FIELD_SIZEOF(struct bpf_tcp_sock, FIELD)); \ 5603 + sizeof_field(struct bpf_tcp_sock, FIELD)); \ 5604 5604 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ 5605 5605 struct inet_connection_sock, \ 5606 5606 FIELD), \ ··· 5615 5615 5616 5616 switch (si->off) { 5617 5617 case offsetof(struct bpf_tcp_sock, rtt_min): 5618 - BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) != 5618 + BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) != 5619 5619 sizeof(struct minmax)); 5620 5620 BUILD_BUG_ON(sizeof(struct minmax) < 5621 5621 sizeof(struct minmax_sample)); ··· 5780 5780 5781 5781 #define BPF_XDP_SOCK_GET(FIELD) \ 5782 5782 do { \ 5783 - BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_sock, FIELD) > \ 5784 - FIELD_SIZEOF(struct bpf_xdp_sock, FIELD)); \ 5783 + BUILD_BUG_ON(sizeof_field(struct xdp_sock, FIELD) > \ 5784 + sizeof_field(struct bpf_xdp_sock, FIELD)); \ 5785 5785 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_sock, FIELD),\ 5786 5786 si->dst_reg, si->src_reg, \ 5787 5787 offsetof(struct xdp_sock, FIELD)); \ ··· 7344 7344 7345 7345 case offsetof(struct __sk_buff, cb[0]) ... 7346 7346 offsetofend(struct __sk_buff, cb[4]) - 1: 7347 - BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20); 7347 + BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, data) < 20); 7348 7348 BUILD_BUG_ON((offsetof(struct sk_buff, cb) + 7349 7349 offsetof(struct qdisc_skb_cb, data)) % 7350 7350 sizeof(__u64)); ··· 7363 7363 break; 7364 7364 7365 7365 case offsetof(struct __sk_buff, tc_classid): 7366 - BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, tc_classid) != 2); 7366 + BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, tc_classid) != 2); 7367 7367 7368 7368 off = si->off; 7369 7369 off -= offsetof(struct __sk_buff, tc_classid); ··· 7434 7434 #endif 7435 7435 break; 7436 7436 case offsetof(struct __sk_buff, family): 7437 - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2); 7437 + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2); 7438 7438 7439 7439 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 7440 7440 si->dst_reg, si->src_reg, ··· 7445 7445 2, target_size)); 7446 7446 break; 7447 7447 case offsetof(struct __sk_buff, remote_ip4): 7448 - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4); 7448 + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4); 7449 7449 7450 7450 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 7451 7451 si->dst_reg, si->src_reg, ··· 7456 7456 4, target_size)); 7457 7457 break; 7458 7458 case offsetof(struct __sk_buff, local_ip4): 7459 - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, 7459 + BUILD_BUG_ON(sizeof_field(struct sock_common, 7460 7460 skc_rcv_saddr) != 4); 7461 7461 7462 7462 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), ··· 7470 7470 case offsetof(struct __sk_buff, remote_ip6[0]) ... 7471 7471 offsetof(struct __sk_buff, remote_ip6[3]): 7472 7472 #if IS_ENABLED(CONFIG_IPV6) 7473 - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, 7473 + BUILD_BUG_ON(sizeof_field(struct sock_common, 7474 7474 skc_v6_daddr.s6_addr32[0]) != 4); 7475 7475 7476 7476 off = si->off; ··· 7490 7490 case offsetof(struct __sk_buff, local_ip6[0]) ... 7491 7491 offsetof(struct __sk_buff, local_ip6[3]): 7492 7492 #if IS_ENABLED(CONFIG_IPV6) 7493 - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, 7493 + BUILD_BUG_ON(sizeof_field(struct sock_common, 7494 7494 skc_v6_rcv_saddr.s6_addr32[0]) != 4); 7495 7495 7496 7496 off = si->off; ··· 7509 7509 break; 7510 7510 7511 7511 case offsetof(struct __sk_buff, remote_port): 7512 - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2); 7512 + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2); 7513 7513 7514 7514 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 7515 7515 si->dst_reg, si->src_reg, ··· 7524 7524 break; 7525 7525 7526 7526 case offsetof(struct __sk_buff, local_port): 7527 - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2); 7527 + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2); 7528 7528 7529 7529 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 7530 7530 si->dst_reg, si->src_reg, ··· 7535 7535 break; 7536 7536 7537 7537 case offsetof(struct __sk_buff, tstamp): 7538 - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tstamp) != 8); 7538 + BUILD_BUG_ON(sizeof_field(struct sk_buff, tstamp) != 8); 7539 7539 7540 7540 if (type == BPF_WRITE) 7541 7541 *insn++ = BPF_STX_MEM(BPF_DW, ··· 7573 7573 target_size)); 7574 7574 break; 7575 7575 case offsetof(struct __sk_buff, wire_len): 7576 - BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, pkt_len) != 4); 7576 + BUILD_BUG_ON(sizeof_field(struct qdisc_skb_cb, pkt_len) != 4); 7577 7577 7578 7578 off = si->off; 7579 7579 off -= offsetof(struct __sk_buff, wire_len); ··· 7603 7603 7604 7604 switch (si->off) { 7605 7605 case offsetof(struct bpf_sock, bound_dev_if): 7606 - BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_bound_dev_if) != 4); 7606 + BUILD_BUG_ON(sizeof_field(struct sock, sk_bound_dev_if) != 4); 7607 7607 7608 7608 if (type == BPF_WRITE) 7609 7609 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, ··· 7614 7614 break; 7615 7615 7616 7616 case offsetof(struct bpf_sock, mark): 7617 - BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_mark) != 4); 7617 + BUILD_BUG_ON(sizeof_field(struct sock, sk_mark) != 4); 7618 7618 7619 7619 if (type == BPF_WRITE) 7620 7620 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, ··· 7625 7625 break; 7626 7626 7627 7627 case offsetof(struct bpf_sock, priority): 7628 - BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_priority) != 4); 7628 + BUILD_BUG_ON(sizeof_field(struct sock, sk_priority) != 4); 7629 7629 7630 7630 if (type == BPF_WRITE) 7631 7631 *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg, ··· 7641 7641 si->dst_reg, si->src_reg, 7642 7642 bpf_target_off(struct sock_common, 7643 7643 skc_family, 7644 - FIELD_SIZEOF(struct sock_common, 7644 + sizeof_field(struct sock_common, 7645 7645 skc_family), 7646 7646 target_size)); 7647 7647 break; ··· 7668 7668 *insn++ = BPF_LDX_MEM( 7669 7669 BPF_SIZE(si->code), si->dst_reg, si->src_reg, 7670 7670 bpf_target_off(struct sock_common, skc_rcv_saddr, 7671 - FIELD_SIZEOF(struct sock_common, 7671 + sizeof_field(struct sock_common, 7672 7672 skc_rcv_saddr), 7673 7673 target_size)); 7674 7674 break; ··· 7677 7677 *insn++ = BPF_LDX_MEM( 7678 7678 BPF_SIZE(si->code), si->dst_reg, si->src_reg, 7679 7679 bpf_target_off(struct sock_common, skc_daddr, 7680 - FIELD_SIZEOF(struct sock_common, 7680 + sizeof_field(struct sock_common, 7681 7681 skc_daddr), 7682 7682 target_size)); 7683 7683 break; ··· 7691 7691 bpf_target_off( 7692 7692 struct sock_common, 7693 7693 skc_v6_rcv_saddr.s6_addr32[0], 7694 - FIELD_SIZEOF(struct sock_common, 7694 + sizeof_field(struct sock_common, 7695 7695 skc_v6_rcv_saddr.s6_addr32[0]), 7696 7696 target_size) + off); 7697 7697 #else ··· 7708 7708 BPF_SIZE(si->code), si->dst_reg, si->src_reg, 7709 7709 bpf_target_off(struct sock_common, 7710 7710 skc_v6_daddr.s6_addr32[0], 7711 - FIELD_SIZEOF(struct sock_common, 7711 + sizeof_field(struct sock_common, 7712 7712 skc_v6_daddr.s6_addr32[0]), 7713 7713 target_size) + off); 7714 7714 #else ··· 7722 7722 BPF_FIELD_SIZEOF(struct sock_common, skc_num), 7723 7723 si->dst_reg, si->src_reg, 7724 7724 bpf_target_off(struct sock_common, skc_num, 7725 - FIELD_SIZEOF(struct sock_common, 7725 + sizeof_field(struct sock_common, 7726 7726 skc_num), 7727 7727 target_size)); 7728 7728 break; ··· 7732 7732 BPF_FIELD_SIZEOF(struct sock_common, skc_dport), 7733 7733 si->dst_reg, si->src_reg, 7734 7734 bpf_target_off(struct sock_common, skc_dport, 7735 - FIELD_SIZEOF(struct sock_common, 7735 + sizeof_field(struct sock_common, 7736 7736 skc_dport), 7737 7737 target_size)); 7738 7738 break; ··· 7742 7742 BPF_FIELD_SIZEOF(struct sock_common, skc_state), 7743 7743 si->dst_reg, si->src_reg, 7744 7744 bpf_target_off(struct sock_common, skc_state, 7745 - FIELD_SIZEOF(struct sock_common, 7745 + sizeof_field(struct sock_common, 7746 7746 skc_state), 7747 7747 target_size)); 7748 7748 break; ··· 7837 7837 si->src_reg, offsetof(S, F)); \ 7838 7838 *insn++ = BPF_LDX_MEM( \ 7839 7839 SIZE, si->dst_reg, si->dst_reg, \ 7840 - bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \ 7840 + bpf_target_off(NS, NF, sizeof_field(NS, NF), \ 7841 7841 target_size) \ 7842 7842 + OFF); \ 7843 7843 } while (0) ··· 7868 7868 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(S, F), tmp_reg, \ 7869 7869 si->dst_reg, offsetof(S, F)); \ 7870 7870 *insn++ = BPF_STX_MEM(SIZE, tmp_reg, si->src_reg, \ 7871 - bpf_target_off(NS, NF, FIELD_SIZEOF(NS, NF), \ 7871 + bpf_target_off(NS, NF, sizeof_field(NS, NF), \ 7872 7872 target_size) \ 7873 7873 + OFF); \ 7874 7874 *insn++ = BPF_LDX_MEM(BPF_DW, tmp_reg, si->dst_reg, \ ··· 7930 7930 */ 7931 7931 BUILD_BUG_ON(offsetof(struct sockaddr_in, sin_port) != 7932 7932 offsetof(struct sockaddr_in6, sin6_port)); 7933 - BUILD_BUG_ON(FIELD_SIZEOF(struct sockaddr_in, sin_port) != 7934 - FIELD_SIZEOF(struct sockaddr_in6, sin6_port)); 7933 + BUILD_BUG_ON(sizeof_field(struct sockaddr_in, sin_port) != 7934 + sizeof_field(struct sockaddr_in6, sin6_port)); 7935 7935 SOCK_ADDR_LOAD_OR_STORE_NESTED_FIELD(struct bpf_sock_addr_kern, 7936 7936 struct sockaddr_in6, uaddr, 7937 7937 sin6_port, tmp_reg); ··· 7997 7997 /* Helper macro for adding read access to tcp_sock or sock fields. */ 7998 7998 #define SOCK_OPS_GET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \ 7999 7999 do { \ 8000 - BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \ 8001 - FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \ 8000 + BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \ 8001 + sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \ 8002 8002 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \ 8003 8003 struct bpf_sock_ops_kern, \ 8004 8004 is_fullsock), \ ··· 8031 8031 #define SOCK_OPS_SET_FIELD(BPF_FIELD, OBJ_FIELD, OBJ) \ 8032 8032 do { \ 8033 8033 int reg = BPF_REG_9; \ 8034 - BUILD_BUG_ON(FIELD_SIZEOF(OBJ, OBJ_FIELD) > \ 8035 - FIELD_SIZEOF(struct bpf_sock_ops, BPF_FIELD)); \ 8034 + BUILD_BUG_ON(sizeof_field(OBJ, OBJ_FIELD) > \ 8035 + sizeof_field(struct bpf_sock_ops, BPF_FIELD)); \ 8036 8036 if (si->dst_reg == reg || si->src_reg == reg) \ 8037 8037 reg--; \ 8038 8038 if (si->dst_reg == reg || si->src_reg == reg) \ ··· 8073 8073 switch (si->off) { 8074 8074 case offsetof(struct bpf_sock_ops, op) ... 8075 8075 offsetof(struct bpf_sock_ops, replylong[3]): 8076 - BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, op) != 8077 - FIELD_SIZEOF(struct bpf_sock_ops_kern, op)); 8078 - BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, reply) != 8079 - FIELD_SIZEOF(struct bpf_sock_ops_kern, reply)); 8080 - BUILD_BUG_ON(FIELD_SIZEOF(struct bpf_sock_ops, replylong) != 8081 - FIELD_SIZEOF(struct bpf_sock_ops_kern, replylong)); 8076 + BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, op) != 8077 + sizeof_field(struct bpf_sock_ops_kern, op)); 8078 + BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, reply) != 8079 + sizeof_field(struct bpf_sock_ops_kern, reply)); 8080 + BUILD_BUG_ON(sizeof_field(struct bpf_sock_ops, replylong) != 8081 + sizeof_field(struct bpf_sock_ops_kern, replylong)); 8082 8082 off = si->off; 8083 8083 off -= offsetof(struct bpf_sock_ops, op); 8084 8084 off += offsetof(struct bpf_sock_ops_kern, op); ··· 8091 8091 break; 8092 8092 8093 8093 case offsetof(struct bpf_sock_ops, family): 8094 - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2); 8094 + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2); 8095 8095 8096 8096 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 8097 8097 struct bpf_sock_ops_kern, sk), ··· 8102 8102 break; 8103 8103 8104 8104 case offsetof(struct bpf_sock_ops, remote_ip4): 8105 - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4); 8105 + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4); 8106 8106 8107 8107 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 8108 8108 struct bpf_sock_ops_kern, sk), ··· 8113 8113 break; 8114 8114 8115 8115 case offsetof(struct bpf_sock_ops, local_ip4): 8116 - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, 8116 + BUILD_BUG_ON(sizeof_field(struct sock_common, 8117 8117 skc_rcv_saddr) != 4); 8118 8118 8119 8119 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( ··· 8128 8128 case offsetof(struct bpf_sock_ops, remote_ip6[0]) ... 8129 8129 offsetof(struct bpf_sock_ops, remote_ip6[3]): 8130 8130 #if IS_ENABLED(CONFIG_IPV6) 8131 - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, 8131 + BUILD_BUG_ON(sizeof_field(struct sock_common, 8132 8132 skc_v6_daddr.s6_addr32[0]) != 4); 8133 8133 8134 8134 off = si->off; ··· 8149 8149 case offsetof(struct bpf_sock_ops, local_ip6[0]) ... 8150 8150 offsetof(struct bpf_sock_ops, local_ip6[3]): 8151 8151 #if IS_ENABLED(CONFIG_IPV6) 8152 - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, 8152 + BUILD_BUG_ON(sizeof_field(struct sock_common, 8153 8153 skc_v6_rcv_saddr.s6_addr32[0]) != 4); 8154 8154 8155 8155 off = si->off; ··· 8168 8168 break; 8169 8169 8170 8170 case offsetof(struct bpf_sock_ops, remote_port): 8171 - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2); 8171 + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2); 8172 8172 8173 8173 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 8174 8174 struct bpf_sock_ops_kern, sk), ··· 8182 8182 break; 8183 8183 8184 8184 case offsetof(struct bpf_sock_ops, local_port): 8185 - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2); 8185 + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2); 8186 8186 8187 8187 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 8188 8188 struct bpf_sock_ops_kern, sk), ··· 8202 8202 break; 8203 8203 8204 8204 case offsetof(struct bpf_sock_ops, state): 8205 - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_state) != 1); 8205 + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_state) != 1); 8206 8206 8207 8207 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 8208 8208 struct bpf_sock_ops_kern, sk), ··· 8213 8213 break; 8214 8214 8215 8215 case offsetof(struct bpf_sock_ops, rtt_min): 8216 - BUILD_BUG_ON(FIELD_SIZEOF(struct tcp_sock, rtt_min) != 8216 + BUILD_BUG_ON(sizeof_field(struct tcp_sock, rtt_min) != 8217 8217 sizeof(struct minmax)); 8218 8218 BUILD_BUG_ON(sizeof(struct minmax) < 8219 8219 sizeof(struct minmax_sample)); ··· 8224 8224 offsetof(struct bpf_sock_ops_kern, sk)); 8225 8225 *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg, 8226 8226 offsetof(struct tcp_sock, rtt_min) + 8227 - FIELD_SIZEOF(struct minmax_sample, t)); 8227 + sizeof_field(struct minmax_sample, t)); 8228 8228 break; 8229 8229 8230 8230 case offsetof(struct bpf_sock_ops, bpf_sock_ops_cb_flags): ··· 8366 8366 offsetof(struct sk_msg, data_end)); 8367 8367 break; 8368 8368 case offsetof(struct sk_msg_md, family): 8369 - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_family) != 2); 8369 + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_family) != 2); 8370 8370 8371 8371 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 8372 8372 struct sk_msg, sk), ··· 8377 8377 break; 8378 8378 8379 8379 case offsetof(struct sk_msg_md, remote_ip4): 8380 - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_daddr) != 4); 8380 + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_daddr) != 4); 8381 8381 8382 8382 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 8383 8383 struct sk_msg, sk), ··· 8388 8388 break; 8389 8389 8390 8390 case offsetof(struct sk_msg_md, local_ip4): 8391 - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, 8391 + BUILD_BUG_ON(sizeof_field(struct sock_common, 8392 8392 skc_rcv_saddr) != 4); 8393 8393 8394 8394 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( ··· 8403 8403 case offsetof(struct sk_msg_md, remote_ip6[0]) ... 8404 8404 offsetof(struct sk_msg_md, remote_ip6[3]): 8405 8405 #if IS_ENABLED(CONFIG_IPV6) 8406 - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, 8406 + BUILD_BUG_ON(sizeof_field(struct sock_common, 8407 8407 skc_v6_daddr.s6_addr32[0]) != 4); 8408 8408 8409 8409 off = si->off; ··· 8424 8424 case offsetof(struct sk_msg_md, local_ip6[0]) ... 8425 8425 offsetof(struct sk_msg_md, local_ip6[3]): 8426 8426 #if IS_ENABLED(CONFIG_IPV6) 8427 - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, 8427 + BUILD_BUG_ON(sizeof_field(struct sock_common, 8428 8428 skc_v6_rcv_saddr.s6_addr32[0]) != 4); 8429 8429 8430 8430 off = si->off; ··· 8443 8443 break; 8444 8444 8445 8445 case offsetof(struct sk_msg_md, remote_port): 8446 - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_dport) != 2); 8446 + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_dport) != 2); 8447 8447 8448 8448 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 8449 8449 struct sk_msg, sk), ··· 8457 8457 break; 8458 8458 8459 8459 case offsetof(struct sk_msg_md, local_port): 8460 - BUILD_BUG_ON(FIELD_SIZEOF(struct sock_common, skc_num) != 2); 8460 + BUILD_BUG_ON(sizeof_field(struct sock_common, skc_num) != 2); 8461 8461 8462 8462 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( 8463 8463 struct sk_msg, sk), ··· 8847 8847 8848 8848 /* Fields that allow narrowing */ 8849 8849 case bpf_ctx_range(struct sk_reuseport_md, eth_protocol): 8850 - if (size < FIELD_SIZEOF(struct sk_buff, protocol)) 8850 + if (size < sizeof_field(struct sk_buff, protocol)) 8851 8851 return false; 8852 8852 /* fall through */ 8853 8853 case bpf_ctx_range(struct sk_reuseport_md, ip_protocol): ··· 8865 8865 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_reuseport_kern, F), \ 8866 8866 si->dst_reg, si->src_reg, \ 8867 8867 bpf_target_off(struct sk_reuseport_kern, F, \ 8868 - FIELD_SIZEOF(struct sk_reuseport_kern, F), \ 8868 + sizeof_field(struct sk_reuseport_kern, F), \ 8869 8869 target_size)); \ 8870 8870 }) 8871 8871
+5 -5
net/core/flow_dissector.c
··· 599 599 offset += sizeof(struct gre_base_hdr); 600 600 601 601 if (hdr->flags & GRE_CSUM) 602 - offset += FIELD_SIZEOF(struct gre_full_hdr, csum) + 603 - FIELD_SIZEOF(struct gre_full_hdr, reserved1); 602 + offset += sizeof_field(struct gre_full_hdr, csum) + 603 + sizeof_field(struct gre_full_hdr, reserved1); 604 604 605 605 if (hdr->flags & GRE_KEY) { 606 606 const __be32 *keyid; ··· 622 622 else 623 623 key_keyid->keyid = *keyid & GRE_PPTP_KEY_MASK; 624 624 } 625 - offset += FIELD_SIZEOF(struct gre_full_hdr, key); 625 + offset += sizeof_field(struct gre_full_hdr, key); 626 626 } 627 627 628 628 if (hdr->flags & GRE_SEQ) 629 - offset += FIELD_SIZEOF(struct pptp_gre_header, seq); 629 + offset += sizeof_field(struct pptp_gre_header, seq); 630 630 631 631 if (gre_ver == 0) { 632 632 if (*p_proto == htons(ETH_P_TEB)) { ··· 653 653 u8 *ppp_hdr; 654 654 655 655 if (hdr->flags & GRE_ACK) 656 - offset += FIELD_SIZEOF(struct pptp_gre_header, ack); 656 + offset += sizeof_field(struct pptp_gre_header, ack); 657 657 658 658 ppp_hdr = __skb_header_pointer(skb, *p_nhoff + offset, 659 659 sizeof(_ppp_hdr),
+2 -2
net/core/xdp.c
··· 36 36 const u32 *k = data; 37 37 const u32 key = *k; 38 38 39 - BUILD_BUG_ON(FIELD_SIZEOF(struct xdp_mem_allocator, mem.id) 39 + BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id) 40 40 != sizeof(u32)); 41 41 42 42 /* Use cyclic increasing ID as direct hash key */ ··· 56 56 .nelem_hint = 64, 57 57 .head_offset = offsetof(struct xdp_mem_allocator, node), 58 58 .key_offset = offsetof(struct xdp_mem_allocator, mem.id), 59 - .key_len = FIELD_SIZEOF(struct xdp_mem_allocator, mem.id), 59 + .key_len = sizeof_field(struct xdp_mem_allocator, mem.id), 60 60 .max_size = MEM_ID_MAX, 61 61 .min_size = 8, 62 62 .automatic_shrinking = true,
+1 -1
net/dccp/proto.c
··· 1132 1132 int rc; 1133 1133 1134 1134 BUILD_BUG_ON(sizeof(struct dccp_skb_cb) > 1135 - FIELD_SIZEOF(struct sk_buff, cb)); 1135 + sizeof_field(struct sk_buff, cb)); 1136 1136 rc = percpu_counter_init(&dccp_orphan_count, 0, GFP_KERNEL); 1137 1137 if (rc) 1138 1138 goto out_fail;
+2 -2
net/ipv4/ip_gre.c
··· 1464 1464 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 }, 1465 1465 [IFLA_GRE_IKEY] = { .type = NLA_U32 }, 1466 1466 [IFLA_GRE_OKEY] = { .type = NLA_U32 }, 1467 - [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) }, 1468 - [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, 1467 + [IFLA_GRE_LOCAL] = { .len = sizeof_field(struct iphdr, saddr) }, 1468 + [IFLA_GRE_REMOTE] = { .len = sizeof_field(struct iphdr, daddr) }, 1469 1469 [IFLA_GRE_TTL] = { .type = NLA_U8 }, 1470 1470 [IFLA_GRE_TOS] = { .type = NLA_U8 }, 1471 1471 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
+2 -2
net/ipv4/ip_vti.c
··· 580 580 [IFLA_VTI_LINK] = { .type = NLA_U32 }, 581 581 [IFLA_VTI_IKEY] = { .type = NLA_U32 }, 582 582 [IFLA_VTI_OKEY] = { .type = NLA_U32 }, 583 - [IFLA_VTI_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) }, 584 - [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) }, 583 + [IFLA_VTI_LOCAL] = { .len = sizeof_field(struct iphdr, saddr) }, 584 + [IFLA_VTI_REMOTE] = { .len = sizeof_field(struct iphdr, daddr) }, 585 585 [IFLA_VTI_FWMARK] = { .type = NLA_U32 }, 586 586 }; 587 587
+1 -1
net/ipv4/tcp.c
··· 3949 3949 3950 3950 BUILD_BUG_ON(TCP_MIN_SND_MSS <= MAX_TCP_OPTION_SPACE); 3951 3951 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > 3952 - FIELD_SIZEOF(struct sk_buff, cb)); 3952 + sizeof_field(struct sk_buff, cb)); 3953 3953 3954 3954 percpu_counter_init(&tcp_sockets_allocated, 0, GFP_KERNEL); 3955 3955 percpu_counter_init(&tcp_orphan_count, 0, GFP_KERNEL);
+2 -2
net/ipv6/ip6_gre.c
··· 2170 2170 [IFLA_GRE_OFLAGS] = { .type = NLA_U16 }, 2171 2171 [IFLA_GRE_IKEY] = { .type = NLA_U32 }, 2172 2172 [IFLA_GRE_OKEY] = { .type = NLA_U32 }, 2173 - [IFLA_GRE_LOCAL] = { .len = FIELD_SIZEOF(struct ipv6hdr, saddr) }, 2174 - [IFLA_GRE_REMOTE] = { .len = FIELD_SIZEOF(struct ipv6hdr, daddr) }, 2173 + [IFLA_GRE_LOCAL] = { .len = sizeof_field(struct ipv6hdr, saddr) }, 2174 + [IFLA_GRE_REMOTE] = { .len = sizeof_field(struct ipv6hdr, daddr) }, 2175 2175 [IFLA_GRE_TTL] = { .type = NLA_U8 }, 2176 2176 [IFLA_GRE_ENCAP_LIMIT] = { .type = NLA_U8 }, 2177 2177 [IFLA_GRE_FLOWINFO] = { .type = NLA_U32 },
+1 -1
net/iucv/af_iucv.c
··· 50 50 static const u8 iprm_shutdown[8] = 51 51 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}; 52 52 53 - #define TRGCLS_SIZE FIELD_SIZEOF(struct iucv_message, class) 53 + #define TRGCLS_SIZE sizeof_field(struct iucv_message, class) 54 54 55 55 #define __iucv_sock_wait(sk, condition, timeo, ret) \ 56 56 do { \
+2 -2
net/netfilter/nf_tables_api.c
··· 7595 7595 return -EINVAL; 7596 7596 if (len == 0) 7597 7597 return -EINVAL; 7598 - if (reg * NFT_REG32_SIZE + len > FIELD_SIZEOF(struct nft_regs, data)) 7598 + if (reg * NFT_REG32_SIZE + len > sizeof_field(struct nft_regs, data)) 7599 7599 return -ERANGE; 7600 7600 7601 7601 return 0; ··· 7643 7643 if (len == 0) 7644 7644 return -EINVAL; 7645 7645 if (reg * NFT_REG32_SIZE + len > 7646 - FIELD_SIZEOF(struct nft_regs, data)) 7646 + sizeof_field(struct nft_regs, data)) 7647 7647 return -ERANGE; 7648 7648 7649 7649 if (data != NULL && type != NFT_DATA_VALUE)
+6 -6
net/netfilter/nft_ct.c
··· 440 440 441 441 switch (ctx->family) { 442 442 case NFPROTO_IPV4: 443 - len = FIELD_SIZEOF(struct nf_conntrack_tuple, 443 + len = sizeof_field(struct nf_conntrack_tuple, 444 444 src.u3.ip); 445 445 break; 446 446 case NFPROTO_IPV6: 447 447 case NFPROTO_INET: 448 - len = FIELD_SIZEOF(struct nf_conntrack_tuple, 448 + len = sizeof_field(struct nf_conntrack_tuple, 449 449 src.u3.ip6); 450 450 break; 451 451 default: ··· 457 457 if (tb[NFTA_CT_DIRECTION] == NULL) 458 458 return -EINVAL; 459 459 460 - len = FIELD_SIZEOF(struct nf_conntrack_tuple, src.u3.ip); 460 + len = sizeof_field(struct nf_conntrack_tuple, src.u3.ip); 461 461 break; 462 462 case NFT_CT_SRC_IP6: 463 463 case NFT_CT_DST_IP6: 464 464 if (tb[NFTA_CT_DIRECTION] == NULL) 465 465 return -EINVAL; 466 466 467 - len = FIELD_SIZEOF(struct nf_conntrack_tuple, src.u3.ip6); 467 + len = sizeof_field(struct nf_conntrack_tuple, src.u3.ip6); 468 468 break; 469 469 case NFT_CT_PROTO_SRC: 470 470 case NFT_CT_PROTO_DST: 471 471 if (tb[NFTA_CT_DIRECTION] == NULL) 472 472 return -EINVAL; 473 - len = FIELD_SIZEOF(struct nf_conntrack_tuple, src.u.all); 473 + len = sizeof_field(struct nf_conntrack_tuple, src.u.all); 474 474 break; 475 475 case NFT_CT_BYTES: 476 476 case NFT_CT_PKTS: ··· 551 551 case NFT_CT_MARK: 552 552 if (tb[NFTA_CT_DIRECTION]) 553 553 return -EINVAL; 554 - len = FIELD_SIZEOF(struct nf_conn, mark); 554 + len = sizeof_field(struct nf_conn, mark); 555 555 break; 556 556 #endif 557 557 #ifdef CONFIG_NF_CONNTRACK_LABELS
+1 -1
net/netfilter/nft_masq.c
··· 43 43 const struct nft_expr *expr, 44 44 const struct nlattr * const tb[]) 45 45 { 46 - u32 plen = FIELD_SIZEOF(struct nf_nat_range, min_addr.all); 46 + u32 plen = sizeof_field(struct nf_nat_range, min_addr.all); 47 47 struct nft_masq *priv = nft_expr_priv(expr); 48 48 int err; 49 49
+3 -3
net/netfilter/nft_nat.c
··· 141 141 142 142 switch (family) { 143 143 case NFPROTO_IPV4: 144 - alen = FIELD_SIZEOF(struct nf_nat_range, min_addr.ip); 144 + alen = sizeof_field(struct nf_nat_range, min_addr.ip); 145 145 break; 146 146 case NFPROTO_IPV6: 147 - alen = FIELD_SIZEOF(struct nf_nat_range, min_addr.ip6); 147 + alen = sizeof_field(struct nf_nat_range, min_addr.ip6); 148 148 break; 149 149 default: 150 150 return -EAFNOSUPPORT; ··· 171 171 } 172 172 } 173 173 174 - plen = FIELD_SIZEOF(struct nf_nat_range, min_addr.all); 174 + plen = sizeof_field(struct nf_nat_range, min_addr.all); 175 175 if (tb[NFTA_NAT_REG_PROTO_MIN]) { 176 176 priv->sreg_proto_min = 177 177 nft_parse_register(tb[NFTA_NAT_REG_PROTO_MIN]);
+1 -1
net/netfilter/nft_redir.c
··· 48 48 unsigned int plen; 49 49 int err; 50 50 51 - plen = FIELD_SIZEOF(struct nf_nat_range, min_addr.all); 51 + plen = sizeof_field(struct nf_nat_range, min_addr.all); 52 52 if (tb[NFTA_REDIR_REG_PROTO_MIN]) { 53 53 priv->sreg_proto_min = 54 54 nft_parse_register(tb[NFTA_REDIR_REG_PROTO_MIN]);
+2 -2
net/netfilter/nft_tproxy.c
··· 218 218 219 219 switch (priv->family) { 220 220 case NFPROTO_IPV4: 221 - alen = FIELD_SIZEOF(union nf_inet_addr, in); 221 + alen = sizeof_field(union nf_inet_addr, in); 222 222 err = nf_defrag_ipv4_enable(ctx->net); 223 223 if (err) 224 224 return err; 225 225 break; 226 226 #if IS_ENABLED(CONFIG_NF_TABLES_IPV6) 227 227 case NFPROTO_IPV6: 228 - alen = FIELD_SIZEOF(union nf_inet_addr, in6); 228 + alen = sizeof_field(union nf_inet_addr, in6); 229 229 err = nf_defrag_ipv6_enable(ctx->net); 230 230 if (err) 231 231 return err;
+1 -1
net/netfilter/xt_RATEEST.c
··· 30 30 31 31 static unsigned int xt_rateest_hash(const char *name) 32 32 { 33 - return jhash(name, FIELD_SIZEOF(struct xt_rateest, name), jhash_rnd) & 33 + return jhash(name, sizeof_field(struct xt_rateest, name), jhash_rnd) & 34 34 (RATEEST_HSIZE - 1); 35 35 } 36 36
+1 -1
net/netlink/af_netlink.c
··· 2755 2755 if (err != 0) 2756 2756 goto out; 2757 2757 2758 - BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb)); 2758 + BUILD_BUG_ON(sizeof(struct netlink_skb_parms) > sizeof_field(struct sk_buff, cb)); 2759 2759 2760 2760 nl_table = kcalloc(MAX_LINKS, sizeof(*nl_table), GFP_KERNEL); 2761 2761 if (!nl_table)
+1 -1
net/openvswitch/datapath.c
··· 2497 2497 { 2498 2498 int err; 2499 2499 2500 - BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb)); 2500 + BUILD_BUG_ON(sizeof(struct ovs_skb_cb) > sizeof_field(struct sk_buff, cb)); 2501 2501 2502 2502 pr_info("Open vSwitch switching datapath\n"); 2503 2503
+2 -2
net/openvswitch/flow.h
··· 37 37 * matching for small options. 38 38 */ 39 39 #define TUN_METADATA_OFFSET(opt_len) \ 40 - (FIELD_SIZEOF(struct sw_flow_key, tun_opts) - opt_len) 40 + (sizeof_field(struct sw_flow_key, tun_opts) - opt_len) 41 41 #define TUN_METADATA_OPTS(flow_key, opt_len) \ 42 42 ((void *)((flow_key)->tun_opts + TUN_METADATA_OFFSET(opt_len))) 43 43 ··· 52 52 53 53 #define OVS_SW_FLOW_KEY_METADATA_SIZE \ 54 54 (offsetof(struct sw_flow_key, recirc_id) + \ 55 - FIELD_SIZEOF(struct sw_flow_key, recirc_id)) 55 + sizeof_field(struct sw_flow_key, recirc_id)) 56 56 57 57 struct ovs_key_nsh { 58 58 struct ovs_nsh_key_base base;
+1 -1
net/rxrpc/af_rxrpc.c
··· 972 972 int ret = -1; 973 973 unsigned int tmp; 974 974 975 - BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > FIELD_SIZEOF(struct sk_buff, cb)); 975 + BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof_field(struct sk_buff, cb)); 976 976 977 977 get_random_bytes(&tmp, sizeof(tmp)); 978 978 tmp &= 0x3fffffff;
+2 -2
net/sched/act_ct.c
··· 312 312 u32 *labels_m) 313 313 { 314 314 #if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) 315 - size_t labels_sz = FIELD_SIZEOF(struct tcf_ct_params, labels); 315 + size_t labels_sz = sizeof_field(struct tcf_ct_params, labels); 316 316 317 317 if (!memchr_inv(labels_m, 0, labels_sz)) 318 318 return; ··· 936 936 937 937 static __net_init int ct_init_net(struct net *net) 938 938 { 939 - unsigned int n_bits = FIELD_SIZEOF(struct tcf_ct_params, labels) * 8; 939 + unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8; 940 940 struct tc_ct_action_net *tn = net_generic(net, ct_net_id); 941 941 942 942 if (nf_connlabels_get(net, n_bits - 1)) {
+1 -1
net/sched/cls_flower.c
··· 1481 1481 } 1482 1482 1483 1483 #define FL_KEY_MEMBER_OFFSET(member) offsetof(struct fl_flow_key, member) 1484 - #define FL_KEY_MEMBER_SIZE(member) FIELD_SIZEOF(struct fl_flow_key, member) 1484 + #define FL_KEY_MEMBER_SIZE(member) sizeof_field(struct fl_flow_key, member) 1485 1485 1486 1486 #define FL_KEY_IS_MASKED(mask, member) \ 1487 1487 memchr_inv(((char *)mask) + FL_KEY_MEMBER_OFFSET(member), \
+1 -1
net/unix/af_unix.c
··· 2865 2865 { 2866 2866 int rc = -1; 2867 2867 2868 - BUILD_BUG_ON(sizeof(struct unix_skb_parms) > FIELD_SIZEOF(struct sk_buff, cb)); 2868 + BUILD_BUG_ON(sizeof(struct unix_skb_parms) > sizeof_field(struct sk_buff, cb)); 2869 2869 2870 2870 rc = proto_register(&unix_proto, 1); 2871 2871 if (rc != 0) {
+2 -2
security/integrity/ima/ima_policy.c
··· 45 45 #define DONT_HASH 0x0200 46 46 47 47 #define INVALID_PCR(a) (((a) < 0) || \ 48 - (a) >= (FIELD_SIZEOF(struct integrity_iint_cache, measured_pcrs) * 8)) 48 + (a) >= (sizeof_field(struct integrity_iint_cache, measured_pcrs) * 8)) 49 49 50 50 int ima_policy_flag; 51 51 static int temp_ima_appraise; ··· 274 274 * lsm rules can change 275 275 */ 276 276 memcpy(nentry, entry, sizeof(*nentry)); 277 - memset(nentry->lsm, 0, FIELD_SIZEOF(struct ima_rule_entry, lsm)); 277 + memset(nentry->lsm, 0, sizeof_field(struct ima_rule_entry, lsm)); 278 278 279 279 for (i = 0; i < MAX_LSM_RULES; i++) { 280 280 if (!entry->lsm[i].rule)
+1 -1
sound/soc/codecs/hdmi-codec.c
··· 292 292 struct snd_ctl_elem_info *uinfo) 293 293 { 294 294 uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES; 295 - uinfo->count = FIELD_SIZEOF(struct hdmi_codec_priv, eld); 295 + uinfo->count = sizeof_field(struct hdmi_codec_priv, eld); 296 296 297 297 return 0; 298 298 }