Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Alexei Starovoitov says:

====================
pull-request: bpf-next 2021-09-17

We've added 63 non-merge commits during the last 12 day(s) which contain
a total of 65 files changed, 2653 insertions(+), 751 deletions(-).

The main changes are:

1) Streamline internal BPF program sections handling and
bpf_program__set_attach_target() in libbpf, from Andrii.

2) Add support for new btf kind BTF_KIND_TAG, from Yonghong.

3) Introduce bpf_get_branch_snapshot() to capture LBR, from Song.

4) IMUL optimization for x86-64 JIT, from Jie.

5) xsk selftest improvements, from Magnus.

6) Introduce legacy kprobe events support in libbpf, from Rafael.

7) Access hw timestamp through BPF's __sk_buff, from Vadim.

* https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (63 commits)
selftests/bpf: Fix a few compiler warnings
libbpf: Constify all high-level program attach APIs
libbpf: Schedule open_opts.attach_prog_fd deprecation since v0.7
selftests/bpf: Switch fexit_bpf2bpf selftest to set_attach_target() API
libbpf: Allow skipping attach_func_name in bpf_program__set_attach_target()
libbpf: Deprecated bpf_object_open_opts.relaxed_core_relocs
selftests/bpf: Stop using relaxed_core_relocs which has no effect
libbpf: Use pre-setup sec_def in libbpf_find_attach_btf_id()
bpf: Update bpf_get_smp_processor_id() documentation
libbpf: Add sphinx code documentation comments
selftests/bpf: Skip btf_tag test if btf_tag attribute not supported
docs/bpf: Add documentation for BTF_KIND_TAG
selftests/bpf: Add a test with a bpf program with btf_tag attributes
selftests/bpf: Test BTF_KIND_TAG for deduplication
selftests/bpf: Add BTF_KIND_TAG unit tests
selftests/bpf: Change NAME_NTH/IS_NAME_NTH for BTF_KIND_TAG format
selftests/bpf: Test libbpf API function btf__add_tag()
bpftool: Add support for BTF_KIND_TAG
libbpf: Add support for BTF_KIND_TAG
libbpf: Rename btf_{hash,equal}_int to btf_{hash,equal}_int_tag
...
====================

Link: https://lore.kernel.org/r/20210917173738.3397064-1-ast@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+2659 -757
+28 -1
Documentation/bpf/btf.rst
··· 85 85 #define BTF_KIND_VAR 14 /* Variable */ 86 86 #define BTF_KIND_DATASEC 15 /* Section */ 87 87 #define BTF_KIND_FLOAT 16 /* Floating point */ 88 + #define BTF_KIND_TAG 17 /* Tag */ 88 89 89 90 Note that the type section encodes debug info, not just pure types. 90 91 ``BTF_KIND_FUNC`` is not a type, and it represents a defined subprogram. ··· 107 106 * "size" tells the size of the type it is describing. 108 107 * 109 108 * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, 110 - * FUNC and FUNC_PROTO. 109 + * FUNC, FUNC_PROTO and TAG. 111 110 * "type" is a type_id referring to another type. 112 111 */ 113 112 union { ··· 465 464 * ``size``: the size of the float type in bytes: 2, 4, 8, 12 or 16. 466 465 467 466 No additional type data follow ``btf_type``. 467 + 468 + 2.2.17 BTF_KIND_TAG 469 + ~~~~~~~~~~~~~~~~~~~ 470 + 471 + ``struct btf_type`` encoding requirement: 472 + * ``name_off``: offset to a non-empty string 473 + * ``info.kind_flag``: 0 474 + * ``info.kind``: BTF_KIND_TAG 475 + * ``info.vlen``: 0 476 + * ``type``: ``struct``, ``union``, ``func`` or ``var`` 477 + 478 + ``btf_type`` is followed by ``struct btf_tag``.:: 479 + 480 + struct btf_tag { 481 + __u32 component_idx; 482 + }; 483 + 484 + The ``name_off`` encodes btf_tag attribute string. 485 + The ``type`` should be ``struct``, ``union``, ``func`` or ``var``. 486 + For ``var`` type, ``btf_tag.component_idx`` must be ``-1``. 487 + For the other three types, if the btf_tag attribute is 488 + applied to the ``struct``, ``union`` or ``func`` itself, 489 + ``btf_tag.component_idx`` must be ``-1``. Otherwise, 490 + the attribute is applied to a ``struct``/``union`` member or 491 + a ``func`` argument, and ``btf_tag.component_idx`` should be a 492 + valid index (starting from 0) pointing to a member or an argument. 468 493 469 494 3. BTF Kernel API 470 495 *****************
+61 -6
arch/x86/events/intel/core.c
··· 2143 2143 * However, there are some cases which may change PEBS status, e.g. PMI 2144 2144 * throttle. The PEBS_ENABLE should be updated where the status changes. 2145 2145 */ 2146 - static void __intel_pmu_disable_all(void) 2146 + static __always_inline void __intel_pmu_disable_all(bool bts) 2147 2147 { 2148 2148 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2149 2149 2150 2150 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0); 2151 2151 2152 - if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) 2152 + if (bts && test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) 2153 2153 intel_pmu_disable_bts(); 2154 2154 } 2155 2155 2156 - static void intel_pmu_disable_all(void) 2156 + static __always_inline void intel_pmu_disable_all(void) 2157 2157 { 2158 - __intel_pmu_disable_all(); 2158 + __intel_pmu_disable_all(true); 2159 2159 intel_pmu_pebs_disable_all(); 2160 2160 intel_pmu_lbr_disable_all(); 2161 2161 } ··· 2184 2184 { 2185 2185 intel_pmu_pebs_enable_all(); 2186 2186 __intel_pmu_enable_all(added, false); 2187 + } 2188 + 2189 + static noinline int 2190 + __intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, 2191 + unsigned int cnt, unsigned long flags) 2192 + { 2193 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 2194 + 2195 + intel_pmu_lbr_read(); 2196 + cnt = min_t(unsigned int, cnt, x86_pmu.lbr_nr); 2197 + 2198 + memcpy(entries, cpuc->lbr_entries, sizeof(struct perf_branch_entry) * cnt); 2199 + intel_pmu_enable_all(0); 2200 + local_irq_restore(flags); 2201 + return cnt; 2202 + } 2203 + 2204 + static int 2205 + intel_pmu_snapshot_branch_stack(struct perf_branch_entry *entries, unsigned int cnt) 2206 + { 2207 + unsigned long flags; 2208 + 2209 + /* must not have branches... */ 2210 + local_irq_save(flags); 2211 + __intel_pmu_disable_all(false); /* we don't care about BTS */ 2212 + __intel_pmu_pebs_disable_all(); 2213 + __intel_pmu_lbr_disable(); 2214 + /* ... until here */ 2215 + return __intel_pmu_snapshot_branch_stack(entries, cnt, flags); 2216 + } 2217 + 2218 + static int 2219 + intel_pmu_snapshot_arch_branch_stack(struct perf_branch_entry *entries, unsigned int cnt) 2220 + { 2221 + unsigned long flags; 2222 + 2223 + /* must not have branches... */ 2224 + local_irq_save(flags); 2225 + __intel_pmu_disable_all(false); /* we don't care about BTS */ 2226 + __intel_pmu_pebs_disable_all(); 2227 + __intel_pmu_arch_lbr_disable(); 2228 + /* ... until here */ 2229 + return __intel_pmu_snapshot_branch_stack(entries, cnt, flags); 2187 2230 } 2188 2231 2189 2232 /* ··· 2972 2929 apic_write(APIC_LVTPC, APIC_DM_NMI); 2973 2930 intel_bts_disable_local(); 2974 2931 cpuc->enabled = 0; 2975 - __intel_pmu_disable_all(); 2932 + __intel_pmu_disable_all(true); 2976 2933 handled = intel_pmu_drain_bts_buffer(); 2977 2934 handled += intel_bts_interrupt(); 2978 2935 status = intel_pmu_get_status(); ··· 6326 6283 x86_pmu.lbr_nr = 0; 6327 6284 } 6328 6285 6329 - if (x86_pmu.lbr_nr) 6286 + if (x86_pmu.lbr_nr) { 6330 6287 pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr); 6288 + 6289 + /* only support branch_stack snapshot for perfmon >= v2 */ 6290 + if (x86_pmu.disable_all == intel_pmu_disable_all) { 6291 + if (boot_cpu_has(X86_FEATURE_ARCH_LBR)) { 6292 + static_call_update(perf_snapshot_branch_stack, 6293 + intel_pmu_snapshot_arch_branch_stack); 6294 + } else { 6295 + static_call_update(perf_snapshot_branch_stack, 6296 + intel_pmu_snapshot_branch_stack); 6297 + } 6298 + } 6299 + } 6331 6300 6332 6301 intel_pmu_check_extra_regs(x86_pmu.extra_regs); 6333 6302
+1 -1
arch/x86/events/intel/ds.c
··· 1301 1301 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 1302 1302 1303 1303 if (cpuc->pebs_enabled) 1304 - wrmsrl(MSR_IA32_PEBS_ENABLE, 0); 1304 + __intel_pmu_pebs_disable_all(); 1305 1305 } 1306 1306 1307 1307 static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)
+5 -15
arch/x86/events/intel/lbr.c
··· 228 228 wrmsrl(MSR_ARCH_LBR_CTL, lbr_select | ARCH_LBR_CTL_LBREN); 229 229 } 230 230 231 - static void __intel_pmu_lbr_disable(void) 232 - { 233 - u64 debugctl; 234 - 235 - if (static_cpu_has(X86_FEATURE_ARCH_LBR)) { 236 - wrmsrl(MSR_ARCH_LBR_CTL, 0); 237 - return; 238 - } 239 - 240 - rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 241 - debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); 242 - wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 243 - } 244 - 245 231 void intel_pmu_lbr_reset_32(void) 246 232 { 247 233 int i; ··· 765 779 { 766 780 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); 767 781 768 - if (cpuc->lbr_users && !vlbr_exclude_host()) 782 + if (cpuc->lbr_users && !vlbr_exclude_host()) { 783 + if (static_cpu_has(X86_FEATURE_ARCH_LBR)) 784 + return __intel_pmu_arch_lbr_disable(); 785 + 769 786 __intel_pmu_lbr_disable(); 787 + } 770 788 } 771 789 772 790 void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
+19
arch/x86/events/perf_event.h
··· 1240 1240 return intel_pmu_has_bts_period(event, hwc->sample_period); 1241 1241 } 1242 1242 1243 + static __always_inline void __intel_pmu_pebs_disable_all(void) 1244 + { 1245 + wrmsrl(MSR_IA32_PEBS_ENABLE, 0); 1246 + } 1247 + 1248 + static __always_inline void __intel_pmu_arch_lbr_disable(void) 1249 + { 1250 + wrmsrl(MSR_ARCH_LBR_CTL, 0); 1251 + } 1252 + 1253 + static __always_inline void __intel_pmu_lbr_disable(void) 1254 + { 1255 + u64 debugctl; 1256 + 1257 + rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 1258 + debugctl &= ~(DEBUGCTLMSR_LBR | DEBUGCTLMSR_FREEZE_LBRS_ON_PMI); 1259 + wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctl); 1260 + } 1261 + 1243 1262 int intel_pmu_save_and_restart(struct perf_event *event); 1244 1263 1245 1264 struct event_constraint *
+24 -31
arch/x86/net/bpf_jit_comp.c
··· 1070 1070 break; 1071 1071 1072 1072 case BPF_ALU | BPF_MUL | BPF_K: 1073 - case BPF_ALU | BPF_MUL | BPF_X: 1074 1073 case BPF_ALU64 | BPF_MUL | BPF_K: 1075 - case BPF_ALU64 | BPF_MUL | BPF_X: 1076 - { 1077 - bool is64 = BPF_CLASS(insn->code) == BPF_ALU64; 1074 + if (BPF_CLASS(insn->code) == BPF_ALU64) 1075 + EMIT1(add_2mod(0x48, dst_reg, dst_reg)); 1076 + else if (is_ereg(dst_reg)) 1077 + EMIT1(add_2mod(0x40, dst_reg, dst_reg)); 1078 1078 1079 - if (dst_reg != BPF_REG_0) 1080 - EMIT1(0x50); /* push rax */ 1081 - if (dst_reg != BPF_REG_3) 1082 - EMIT1(0x52); /* push rdx */ 1083 - 1084 - /* mov r11, dst_reg */ 1085 - EMIT_mov(AUX_REG, dst_reg); 1086 - 1087 - if (BPF_SRC(insn->code) == BPF_X) 1088 - emit_mov_reg(&prog, is64, BPF_REG_0, src_reg); 1079 + if (is_imm8(imm32)) 1080 + /* imul dst_reg, dst_reg, imm8 */ 1081 + EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg), 1082 + imm32); 1089 1083 else 1090 - emit_mov_imm32(&prog, is64, BPF_REG_0, imm32); 1091 - 1092 - if (is64) 1093 - EMIT1(add_1mod(0x48, AUX_REG)); 1094 - else if (is_ereg(AUX_REG)) 1095 - EMIT1(add_1mod(0x40, AUX_REG)); 1096 - /* mul(q) r11 */ 1097 - EMIT2(0xF7, add_1reg(0xE0, AUX_REG)); 1098 - 1099 - if (dst_reg != BPF_REG_3) 1100 - EMIT1(0x5A); /* pop rdx */ 1101 - if (dst_reg != BPF_REG_0) { 1102 - /* mov dst_reg, rax */ 1103 - EMIT_mov(dst_reg, BPF_REG_0); 1104 - EMIT1(0x58); /* pop rax */ 1105 - } 1084 + /* imul dst_reg, dst_reg, imm32 */ 1085 + EMIT2_off32(0x69, 1086 + add_2reg(0xC0, dst_reg, dst_reg), 1087 + imm32); 1106 1088 break; 1107 - } 1089 + 1090 + case BPF_ALU | BPF_MUL | BPF_X: 1091 + case BPF_ALU64 | BPF_MUL | BPF_X: 1092 + if (BPF_CLASS(insn->code) == BPF_ALU64) 1093 + EMIT1(add_2mod(0x48, src_reg, dst_reg)); 1094 + else if (is_ereg(dst_reg) || is_ereg(src_reg)) 1095 + EMIT1(add_2mod(0x40, src_reg, dst_reg)); 1096 + 1097 + /* imul dst_reg, src_reg */ 1098 + EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg)); 1099 + break; 1100 + 1108 1101 /* Shifts */ 1109 1102 case BPF_ALU | BPF_LSH | BPF_K: 1110 1103 case BPF_ALU | BPF_RSH | BPF_K:
+23
include/linux/perf_event.h
··· 57 57 #include <linux/cgroup.h> 58 58 #include <linux/refcount.h> 59 59 #include <linux/security.h> 60 + #include <linux/static_call.h> 60 61 #include <asm/local.h> 61 62 62 63 struct perf_callchain_entry { ··· 1612 1611 #ifdef CONFIG_MMU 1613 1612 extern __weak u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr); 1614 1613 #endif 1614 + 1615 + /* 1616 + * Snapshot branch stack on software events. 1617 + * 1618 + * Branch stack can be very useful in understanding software events. For 1619 + * example, when a long function, e.g. sys_perf_event_open, returns an 1620 + * errno, it is not obvious why the function failed. Branch stack could 1621 + * provide very helpful information in this type of scenarios. 1622 + * 1623 + * On software event, it is necessary to stop the hardware branch recorder 1624 + * fast. Otherwise, the hardware register/buffer will be flushed with 1625 + * entries of the triggering event. Therefore, static call is used to 1626 + * stop the hardware recorder. 1627 + */ 1628 + 1629 + /* 1630 + * cnt is the number of entries allocated for entries. 1631 + * Return number of entries copied to . 1632 + */ 1633 + typedef int (perf_snapshot_branch_stack_t)(struct perf_branch_entry *entries, 1634 + unsigned int cnt); 1635 + DECLARE_STATIC_CALL(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t); 1615 1636 1616 1637 #endif /* _LINUX_PERF_EVENT_H */
+25 -1
include/uapi/linux/bpf.h
··· 1629 1629 * u32 bpf_get_smp_processor_id(void) 1630 1630 * Description 1631 1631 * Get the SMP (symmetric multiprocessing) processor id. Note that 1632 - * all programs run with preemption disabled, which means that the 1632 + * all programs run with migration disabled, which means that the 1633 1633 * SMP processor id is stable during all the execution of the 1634 1634 * program. 1635 1635 * Return ··· 4877 4877 * Get the struct pt_regs associated with **task**. 4878 4878 * Return 4879 4879 * A pointer to struct pt_regs. 4880 + * 4881 + * long bpf_get_branch_snapshot(void *entries, u32 size, u64 flags) 4882 + * Description 4883 + * Get branch trace from hardware engines like Intel LBR. The 4884 + * hardware engine is stopped shortly after the helper is 4885 + * called. Therefore, the user need to filter branch entries 4886 + * based on the actual use case. To capture branch trace 4887 + * before the trigger point of the BPF program, the helper 4888 + * should be called at the beginning of the BPF program. 4889 + * 4890 + * The data is stored as struct perf_branch_entry into output 4891 + * buffer *entries*. *size* is the size of *entries* in bytes. 4892 + * *flags* is reserved for now and must be zero. 4893 + * 4894 + * Return 4895 + * On success, number of bytes written to *buf*. On error, a 4896 + * negative value. 4897 + * 4898 + * **-EINVAL** if *flags* is not zero. 4899 + * 4900 + * **-ENOENT** if architecture does not support branch records. 4880 4901 */ 4881 4902 #define __BPF_FUNC_MAPPER(FN) \ 4882 4903 FN(unspec), \ ··· 5076 5055 FN(get_func_ip), \ 5077 5056 FN(get_attach_cookie), \ 5078 5057 FN(task_pt_regs), \ 5058 + FN(get_branch_snapshot), \ 5079 5059 /* */ 5080 5060 5081 5061 /* integer value in 'imm' field of BPF_CALL instruction selects which helper ··· 5306 5284 __u32 gso_segs; 5307 5285 __bpf_md_ptr(struct bpf_sock *, sk); 5308 5286 __u32 gso_size; 5287 + __u32 :32; /* Padding, future use. */ 5288 + __u64 hwtstamp; 5309 5289 }; 5310 5290 5311 5291 struct bpf_tunnel_key {
+35 -20
include/uapi/linux/btf.h
··· 43 43 * "size" tells the size of the type it is describing. 44 44 * 45 45 * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, 46 - * FUNC, FUNC_PROTO and VAR. 46 + * FUNC, FUNC_PROTO, VAR and TAG. 47 47 * "type" is a type_id referring to another type. 48 48 */ 49 49 union { ··· 56 56 #define BTF_INFO_VLEN(info) ((info) & 0xffff) 57 57 #define BTF_INFO_KFLAG(info) ((info) >> 31) 58 58 59 - #define BTF_KIND_UNKN 0 /* Unknown */ 60 - #define BTF_KIND_INT 1 /* Integer */ 61 - #define BTF_KIND_PTR 2 /* Pointer */ 62 - #define BTF_KIND_ARRAY 3 /* Array */ 63 - #define BTF_KIND_STRUCT 4 /* Struct */ 64 - #define BTF_KIND_UNION 5 /* Union */ 65 - #define BTF_KIND_ENUM 6 /* Enumeration */ 66 - #define BTF_KIND_FWD 7 /* Forward */ 67 - #define BTF_KIND_TYPEDEF 8 /* Typedef */ 68 - #define BTF_KIND_VOLATILE 9 /* Volatile */ 69 - #define BTF_KIND_CONST 10 /* Const */ 70 - #define BTF_KIND_RESTRICT 11 /* Restrict */ 71 - #define BTF_KIND_FUNC 12 /* Function */ 72 - #define BTF_KIND_FUNC_PROTO 13 /* Function Proto */ 73 - #define BTF_KIND_VAR 14 /* Variable */ 74 - #define BTF_KIND_DATASEC 15 /* Section */ 75 - #define BTF_KIND_FLOAT 16 /* Floating point */ 76 - #define BTF_KIND_MAX BTF_KIND_FLOAT 77 - #define NR_BTF_KINDS (BTF_KIND_MAX + 1) 59 + enum { 60 + BTF_KIND_UNKN = 0, /* Unknown */ 61 + BTF_KIND_INT = 1, /* Integer */ 62 + BTF_KIND_PTR = 2, /* Pointer */ 63 + BTF_KIND_ARRAY = 3, /* Array */ 64 + BTF_KIND_STRUCT = 4, /* Struct */ 65 + BTF_KIND_UNION = 5, /* Union */ 66 + BTF_KIND_ENUM = 6, /* Enumeration */ 67 + BTF_KIND_FWD = 7, /* Forward */ 68 + BTF_KIND_TYPEDEF = 8, /* Typedef */ 69 + BTF_KIND_VOLATILE = 9, /* Volatile */ 70 + BTF_KIND_CONST = 10, /* Const */ 71 + BTF_KIND_RESTRICT = 11, /* Restrict */ 72 + BTF_KIND_FUNC = 12, /* Function */ 73 + BTF_KIND_FUNC_PROTO = 13, /* Function Proto */ 74 + BTF_KIND_VAR = 14, /* Variable */ 75 + BTF_KIND_DATASEC = 15, /* Section */ 76 + BTF_KIND_FLOAT = 16, /* Floating point */ 77 + BTF_KIND_TAG = 17, /* Tag */ 78 + 79 + NR_BTF_KINDS, 80 + BTF_KIND_MAX = NR_BTF_KINDS - 1, 81 + }; 78 82 79 83 /* For some specific BTF_KIND, "struct btf_type" is immediately 80 84 * followed by extra data. ··· 172 168 __u32 type; 173 169 __u32 offset; 174 170 __u32 size; 171 + }; 172 + 173 + /* BTF_KIND_TAG is followed by a single "struct btf_tag" to describe 174 + * additional information related to the tag applied location. 175 + * If component_idx == -1, the tag is applied to a struct, union, 176 + * variable or function. Otherwise, it is applied to a struct/union 177 + * member or a func argument, and component_idx indicates which member 178 + * or argument (0 ... vlen-1). 179 + */ 180 + struct btf_tag { 181 + __s32 component_idx; 175 182 }; 176 183 177 184 #endif /* _UAPI__LINUX_BTF_H__ */
+128
kernel/bpf/btf.c
··· 281 281 [BTF_KIND_VAR] = "VAR", 282 282 [BTF_KIND_DATASEC] = "DATASEC", 283 283 [BTF_KIND_FLOAT] = "FLOAT", 284 + [BTF_KIND_TAG] = "TAG", 284 285 }; 285 286 286 287 const char *btf_type_str(const struct btf_type *t) ··· 460 459 return BTF_INFO_KIND(t->info) == BTF_KIND_DATASEC; 461 460 } 462 461 462 + static bool btf_type_is_tag(const struct btf_type *t) 463 + { 464 + return BTF_INFO_KIND(t->info) == BTF_KIND_TAG; 465 + } 466 + 467 + static bool btf_type_is_tag_target(const struct btf_type *t) 468 + { 469 + return btf_type_is_func(t) || btf_type_is_struct(t) || 470 + btf_type_is_var(t); 471 + } 472 + 463 473 u32 btf_nr_types(const struct btf *btf) 464 474 { 465 475 u32 total = 0; ··· 549 537 static bool btf_type_is_resolve_source_only(const struct btf_type *t) 550 538 { 551 539 return btf_type_is_var(t) || 540 + btf_type_is_tag(t) || 552 541 btf_type_is_datasec(t); 553 542 } 554 543 ··· 576 563 btf_type_is_struct(t) || 577 564 btf_type_is_array(t) || 578 565 btf_type_is_var(t) || 566 + btf_type_is_tag(t) || 579 567 btf_type_is_datasec(t); 580 568 } 581 569 ··· 628 614 static const struct btf_var *btf_type_var(const struct btf_type *t) 629 615 { 630 616 return (const struct btf_var *)(t + 1); 617 + } 618 + 619 + static const struct btf_tag *btf_type_tag(const struct btf_type *t) 620 + { 621 + return (const struct btf_tag *)(t + 1); 631 622 } 632 623 633 624 static const struct btf_kind_operations *btf_type_ops(const struct btf_type *t) ··· 3820 3801 .show = btf_df_show, 3821 3802 }; 3822 3803 3804 + static s32 btf_tag_check_meta(struct btf_verifier_env *env, 3805 + const struct btf_type *t, 3806 + u32 meta_left) 3807 + { 3808 + const struct btf_tag *tag; 3809 + u32 meta_needed = sizeof(*tag); 3810 + s32 component_idx; 3811 + const char *value; 3812 + 3813 + if (meta_left < meta_needed) { 3814 + btf_verifier_log_basic(env, t, 3815 + "meta_left:%u meta_needed:%u", 3816 + meta_left, meta_needed); 3817 + return -EINVAL; 3818 + } 3819 + 3820 + value = btf_name_by_offset(env->btf, t->name_off); 3821 + if (!value || !value[0]) { 3822 + btf_verifier_log_type(env, t, "Invalid value"); 3823 + return -EINVAL; 3824 + } 3825 + 3826 + if (btf_type_vlen(t)) { 3827 + btf_verifier_log_type(env, t, "vlen != 0"); 3828 + return -EINVAL; 3829 + } 3830 + 3831 + if (btf_type_kflag(t)) { 3832 + btf_verifier_log_type(env, t, "Invalid btf_info kind_flag"); 3833 + return -EINVAL; 3834 + } 3835 + 3836 + component_idx = btf_type_tag(t)->component_idx; 3837 + if (component_idx < -1) { 3838 + btf_verifier_log_type(env, t, "Invalid component_idx"); 3839 + return -EINVAL; 3840 + } 3841 + 3842 + btf_verifier_log_type(env, t, NULL); 3843 + 3844 + return meta_needed; 3845 + } 3846 + 3847 + static int btf_tag_resolve(struct btf_verifier_env *env, 3848 + const struct resolve_vertex *v) 3849 + { 3850 + const struct btf_type *next_type; 3851 + const struct btf_type *t = v->t; 3852 + u32 next_type_id = t->type; 3853 + struct btf *btf = env->btf; 3854 + s32 component_idx; 3855 + u32 vlen; 3856 + 3857 + next_type = btf_type_by_id(btf, next_type_id); 3858 + if (!next_type || !btf_type_is_tag_target(next_type)) { 3859 + btf_verifier_log_type(env, v->t, "Invalid type_id"); 3860 + return -EINVAL; 3861 + } 3862 + 3863 + if (!env_type_is_resolve_sink(env, next_type) && 3864 + !env_type_is_resolved(env, next_type_id)) 3865 + return env_stack_push(env, next_type, next_type_id); 3866 + 3867 + component_idx = btf_type_tag(t)->component_idx; 3868 + if (component_idx != -1) { 3869 + if (btf_type_is_var(next_type)) { 3870 + btf_verifier_log_type(env, v->t, "Invalid component_idx"); 3871 + return -EINVAL; 3872 + } 3873 + 3874 + if (btf_type_is_struct(next_type)) { 3875 + vlen = btf_type_vlen(next_type); 3876 + } else { 3877 + /* next_type should be a function */ 3878 + next_type = btf_type_by_id(btf, next_type->type); 3879 + vlen = btf_type_vlen(next_type); 3880 + } 3881 + 3882 + if ((u32)component_idx >= vlen) { 3883 + btf_verifier_log_type(env, v->t, "Invalid component_idx"); 3884 + return -EINVAL; 3885 + } 3886 + } 3887 + 3888 + env_stack_pop_resolved(env, next_type_id, 0); 3889 + 3890 + return 0; 3891 + } 3892 + 3893 + static void btf_tag_log(struct btf_verifier_env *env, const struct btf_type *t) 3894 + { 3895 + btf_verifier_log(env, "type=%u component_idx=%d", t->type, 3896 + btf_type_tag(t)->component_idx); 3897 + } 3898 + 3899 + static const struct btf_kind_operations tag_ops = { 3900 + .check_meta = btf_tag_check_meta, 3901 + .resolve = btf_tag_resolve, 3902 + .check_member = btf_df_check_member, 3903 + .check_kflag_member = btf_df_check_kflag_member, 3904 + .log_details = btf_tag_log, 3905 + .show = btf_df_show, 3906 + }; 3907 + 3823 3908 static int btf_func_proto_check(struct btf_verifier_env *env, 3824 3909 const struct btf_type *t) 3825 3910 { ··· 4058 3935 [BTF_KIND_VAR] = &var_ops, 4059 3936 [BTF_KIND_DATASEC] = &datasec_ops, 4060 3937 [BTF_KIND_FLOAT] = &float_ops, 3938 + [BTF_KIND_TAG] = &tag_ops, 4061 3939 }; 4062 3940 4063 3941 static s32 btf_check_meta(struct btf_verifier_env *env, ··· 4141 4017 4142 4018 if (btf_type_is_struct(t) || btf_type_is_datasec(t)) 4143 4019 return !btf_resolved_type_id(btf, type_id) && 4020 + !btf_resolved_type_size(btf, type_id); 4021 + 4022 + if (btf_type_is_tag(t)) 4023 + return btf_resolved_type_id(btf, type_id) && 4144 4024 !btf_resolved_type_size(btf, type_id); 4145 4025 4146 4026 if (btf_type_is_modifier(t) || btf_type_is_ptr(t) ||
+2 -1
kernel/bpf/trampoline.c
··· 10 10 #include <linux/rcupdate_trace.h> 11 11 #include <linux/rcupdate_wait.h> 12 12 #include <linux/module.h> 13 + #include <linux/static_call.h> 13 14 14 15 /* dummy _ops. The verifier will operate on target program's ops. */ 15 16 const struct bpf_verifier_ops bpf_extension_verifier_ops = { ··· 527 526 } 528 527 529 528 #define NO_START_TIME 1 530 - static u64 notrace bpf_prog_start_time(void) 529 + static __always_inline u64 notrace bpf_prog_start_time(void) 531 530 { 532 531 u64 start = NO_START_TIME; 533 532
+2
kernel/events/core.c
··· 13435 13435 .threaded = true, 13436 13436 }; 13437 13437 #endif /* CONFIG_CGROUP_PERF */ 13438 + 13439 + DEFINE_STATIC_CALL_RET0(perf_snapshot_branch_stack, perf_snapshot_branch_stack_t);
+30
kernel/trace/bpf_trace.c
··· 1017 1017 .arg1_type = ARG_PTR_TO_CTX, 1018 1018 }; 1019 1019 1020 + BPF_CALL_3(bpf_get_branch_snapshot, void *, buf, u32, size, u64, flags) 1021 + { 1022 + #ifndef CONFIG_X86 1023 + return -ENOENT; 1024 + #else 1025 + static const u32 br_entry_size = sizeof(struct perf_branch_entry); 1026 + u32 entry_cnt = size / br_entry_size; 1027 + 1028 + entry_cnt = static_call(perf_snapshot_branch_stack)(buf, entry_cnt); 1029 + 1030 + if (unlikely(flags)) 1031 + return -EINVAL; 1032 + 1033 + if (!entry_cnt) 1034 + return -ENOENT; 1035 + 1036 + return entry_cnt * br_entry_size; 1037 + #endif 1038 + } 1039 + 1040 + static const struct bpf_func_proto bpf_get_branch_snapshot_proto = { 1041 + .func = bpf_get_branch_snapshot, 1042 + .gpl_only = true, 1043 + .ret_type = RET_INTEGER, 1044 + .arg1_type = ARG_PTR_TO_UNINIT_MEM, 1045 + .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1046 + }; 1047 + 1020 1048 static const struct bpf_func_proto * 1021 1049 bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1022 1050 { ··· 1160 1132 return &bpf_snprintf_proto; 1161 1133 case BPF_FUNC_get_func_ip: 1162 1134 return &bpf_get_func_ip_proto_tracing; 1135 + case BPF_FUNC_get_branch_snapshot: 1136 + return &bpf_get_branch_snapshot_proto; 1163 1137 default: 1164 1138 return bpf_base_func_proto(func_id); 1165 1139 }
+1
lib/test_bpf.c
··· 8800 8800 skb_shinfo(skb[0])->gso_type |= SKB_GSO_DODGY; 8801 8801 skb_shinfo(skb[0])->gso_segs = 0; 8802 8802 skb_shinfo(skb[0])->frag_list = skb[1]; 8803 + skb_shinfo(skb[0])->hwtstamps.hwtstamp = 1000; 8803 8804 8804 8805 /* adjust skb[0]'s len */ 8805 8806 skb[0]->len += skb[1]->len;
+11 -5
net/bpf/test_run.c
··· 483 483 return -EINVAL; 484 484 485 485 /* priority is allowed */ 486 - 487 - if (!range_is_zero(__skb, offsetofend(struct __sk_buff, priority), 488 - offsetof(struct __sk_buff, ifindex))) 489 - return -EINVAL; 490 - 486 + /* ingress_ifindex is allowed */ 491 487 /* ifindex is allowed */ 492 488 493 489 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex), ··· 507 511 /* gso_size is allowed */ 508 512 509 513 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size), 514 + offsetof(struct __sk_buff, hwtstamp))) 515 + return -EINVAL; 516 + 517 + /* hwtstamp is allowed */ 518 + 519 + if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp), 510 520 sizeof(struct __sk_buff))) 511 521 return -EINVAL; 512 522 513 523 skb->mark = __skb->mark; 514 524 skb->priority = __skb->priority; 525 + skb->skb_iif = __skb->ingress_ifindex; 515 526 skb->tstamp = __skb->tstamp; 516 527 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN); 517 528 ··· 535 532 return -EINVAL; 536 533 skb_shinfo(skb)->gso_segs = __skb->gso_segs; 537 534 skb_shinfo(skb)->gso_size = __skb->gso_size; 535 + skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp; 538 536 539 537 return 0; 540 538 } ··· 549 545 550 546 __skb->mark = skb->mark; 551 547 __skb->priority = skb->priority; 548 + __skb->ingress_ifindex = skb->skb_iif; 552 549 __skb->ifindex = skb->dev->ifindex; 553 550 __skb->tstamp = skb->tstamp; 554 551 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN); 555 552 __skb->wire_len = cb->pkt_len; 556 553 __skb->gso_segs = skb_shinfo(skb)->gso_segs; 554 + __skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp; 557 555 } 558 556 559 557 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
+21
net/core/filter.c
··· 7765 7765 break; 7766 7766 case bpf_ctx_range_ptr(struct __sk_buff, flow_keys): 7767 7767 return false; 7768 + case bpf_ctx_range(struct __sk_buff, hwtstamp): 7769 + if (type == BPF_WRITE || size != sizeof(__u64)) 7770 + return false; 7771 + break; 7768 7772 case bpf_ctx_range(struct __sk_buff, tstamp): 7769 7773 if (size != sizeof(__u64)) 7770 7774 return false; ··· 7778 7774 return false; 7779 7775 info->reg_type = PTR_TO_SOCK_COMMON_OR_NULL; 7780 7776 break; 7777 + case offsetofend(struct __sk_buff, gso_size) ... offsetof(struct __sk_buff, hwtstamp) - 1: 7778 + /* Explicitly prohibit access to padding in __sk_buff. */ 7779 + return false; 7781 7780 default: 7782 7781 /* Only narrow read access allowed for now. */ 7783 7782 if (type == BPF_WRITE) { ··· 7809 7802 case bpf_ctx_range_till(struct __sk_buff, family, local_port): 7810 7803 case bpf_ctx_range(struct __sk_buff, tstamp): 7811 7804 case bpf_ctx_range(struct __sk_buff, wire_len): 7805 + case bpf_ctx_range(struct __sk_buff, hwtstamp): 7812 7806 return false; 7813 7807 } 7814 7808 ··· 7880 7872 case bpf_ctx_range(struct __sk_buff, data_meta): 7881 7873 case bpf_ctx_range(struct __sk_buff, tstamp): 7882 7874 case bpf_ctx_range(struct __sk_buff, wire_len): 7875 + case bpf_ctx_range(struct __sk_buff, hwtstamp): 7883 7876 return false; 7884 7877 } 7885 7878 ··· 8382 8373 case bpf_ctx_range(struct __sk_buff, data_meta): 8383 8374 case bpf_ctx_range(struct __sk_buff, tstamp): 8384 8375 case bpf_ctx_range(struct __sk_buff, wire_len): 8376 + case bpf_ctx_range(struct __sk_buff, hwtstamp): 8385 8377 return false; 8386 8378 } 8387 8379 ··· 8893 8883 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, sk), 8894 8884 si->dst_reg, si->src_reg, 8895 8885 offsetof(struct sk_buff, sk)); 8886 + break; 8887 + case offsetof(struct __sk_buff, hwtstamp): 8888 + BUILD_BUG_ON(sizeof_field(struct skb_shared_hwtstamps, hwtstamp) != 8); 8889 + BUILD_BUG_ON(offsetof(struct skb_shared_hwtstamps, hwtstamp) != 0); 8890 + 8891 + insn = bpf_convert_shinfo_access(si, insn); 8892 + *insn++ = BPF_LDX_MEM(BPF_DW, 8893 + si->dst_reg, si->dst_reg, 8894 + bpf_target_off(struct skb_shared_info, 8895 + hwtstamps, 8, 8896 + target_size)); 8896 8897 break; 8897 8898 } 8898 8899
+3
tools/bpf/bpftool/Makefile
··· 137 137 BPFTOOL_BOOTSTRAP := $(BOOTSTRAP_OUTPUT)bpftool 138 138 139 139 BOOTSTRAP_OBJS = $(addprefix $(BOOTSTRAP_OUTPUT),main.o common.o json_writer.o gen.o btf.o xlated_dumper.o btf_dumper.o disasm.o) 140 + $(BOOTSTRAP_OBJS): $(LIBBPF_BOOTSTRAP) 141 + 140 142 OBJS = $(patsubst %.c,$(OUTPUT)%.o,$(SRCS)) $(OUTPUT)disasm.o 143 + $(OBJS): $(LIBBPF) 141 144 142 145 VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux) \ 143 146 $(if $(KBUILD_OUTPUT),$(KBUILD_OUTPUT)/vmlinux) \
+12
tools/bpf/bpftool/btf.c
··· 37 37 [BTF_KIND_VAR] = "VAR", 38 38 [BTF_KIND_DATASEC] = "DATASEC", 39 39 [BTF_KIND_FLOAT] = "FLOAT", 40 + [BTF_KIND_TAG] = "TAG", 40 41 }; 41 42 42 43 struct btf_attach_table { ··· 346 345 jsonw_uint_field(w, "size", t->size); 347 346 else 348 347 printf(" size=%u", t->size); 348 + break; 349 + } 350 + case BTF_KIND_TAG: { 351 + const struct btf_tag *tag = (const void *)(t + 1); 352 + 353 + if (json_output) { 354 + jsonw_uint_field(w, "type_id", t->type); 355 + jsonw_int_field(w, "component_idx", tag->component_idx); 356 + } else { 357 + printf(" type_id=%u component_idx=%d", t->type, tag->component_idx); 358 + } 349 359 break; 350 360 } 351 361 default:
+19 -12
tools/bpf/bpftool/gen.c
··· 238 238 } else if (c == '\n') { 239 239 break; 240 240 } else { 241 - p_err("unrecognized character at pos %td in template '%s'", 242 - src - template - 1, template); 241 + p_err("unrecognized character at pos %td in template '%s': '%c'", 242 + src - template - 1, template, c); 243 243 free(s); 244 244 exit(-1); 245 245 } ··· 406 406 } 407 407 408 408 bpf_object__for_each_map(map, obj) { 409 - const char * ident; 409 + const char *ident; 410 410 411 411 ident = get_map_ident(map); 412 412 if (!ident) ··· 862 862 codegen("\ 863 863 \n\ 864 864 \n\ 865 + static inline const void *%1$s__elf_bytes(size_t *sz); \n\ 866 + \n\ 865 867 static inline int \n\ 866 868 %1$s__create_skeleton(struct %1$s *obj) \n\ 867 869 { \n\ ··· 945 943 codegen("\ 946 944 \n\ 947 945 \n\ 948 - s->data_sz = %d; \n\ 949 - s->data = (void *)\"\\ \n\ 950 - ", 951 - file_sz); 946 + s->data = (void *)%2$s__elf_bytes(&s->data_sz); \n\ 947 + \n\ 948 + return 0; \n\ 949 + err: \n\ 950 + bpf_object__destroy_skeleton(s); \n\ 951 + return -ENOMEM; \n\ 952 + } \n\ 953 + \n\ 954 + static inline const void *%2$s__elf_bytes(size_t *sz) \n\ 955 + { \n\ 956 + *sz = %1$d; \n\ 957 + return (const void *)\"\\ \n\ 958 + " 959 + , file_sz, obj_name); 952 960 953 961 /* embed contents of BPF object file */ 954 962 print_hex(obj_data, file_sz); ··· 966 954 codegen("\ 967 955 \n\ 968 956 \"; \n\ 969 - \n\ 970 - return 0; \n\ 971 - err: \n\ 972 - bpf_object__destroy_skeleton(s); \n\ 973 - return -ENOMEM; \n\ 974 957 } \n\ 975 958 \n\ 976 959 #endif /* %s */ \n\
+3 -2
tools/bpf/resolve_btfids/Makefile
··· 26 26 SUBCMD_SRC := $(srctree)/tools/lib/subcmd/ 27 27 28 28 BPFOBJ := $(OUTPUT)/libbpf/libbpf.a 29 + LIBBPF_OUT := $(abspath $(dir $(BPFOBJ)))/ 29 30 SUBCMDOBJ := $(OUTPUT)/libsubcmd/libsubcmd.a 30 31 31 32 BINARY := $(OUTPUT)/resolve_btfids ··· 42 41 $(Q)$(MAKE) -C $(SUBCMD_SRC) OUTPUT=$(abspath $(dir $@))/ $(abspath $@) 43 42 44 43 $(BPFOBJ): $(wildcard $(LIBBPF_SRC)/*.[ch] $(LIBBPF_SRC)/Makefile) | $(OUTPUT)/libbpf 45 - $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(abspath $(dir $@))/ $(abspath $@) 44 + $(Q)$(MAKE) $(submake_extras) -C $(LIBBPF_SRC) OUTPUT=$(LIBBPF_OUT) $(abspath $@) 46 45 47 46 CFLAGS := -g \ 48 47 -I$(srctree)/tools/include \ ··· 55 54 export srctree OUTPUT CFLAGS Q 56 55 include $(srctree)/tools/build/Makefile.include 57 56 58 - $(BINARY_IN): fixdep FORCE | $(OUTPUT) 57 + $(BINARY_IN): $(BPFOBJ) fixdep FORCE | $(OUTPUT) 59 58 $(Q)$(MAKE) $(build)=resolve_btfids 60 59 61 60 $(BINARY): $(BPFOBJ) $(SUBCMDOBJ) $(BINARY_IN)
+25 -1
tools/include/uapi/linux/bpf.h
··· 1629 1629 * u32 bpf_get_smp_processor_id(void) 1630 1630 * Description 1631 1631 * Get the SMP (symmetric multiprocessing) processor id. Note that 1632 - * all programs run with preemption disabled, which means that the 1632 + * all programs run with migration disabled, which means that the 1633 1633 * SMP processor id is stable during all the execution of the 1634 1634 * program. 1635 1635 * Return ··· 4877 4877 * Get the struct pt_regs associated with **task**. 4878 4878 * Return 4879 4879 * A pointer to struct pt_regs. 4880 + * 4881 + * long bpf_get_branch_snapshot(void *entries, u32 size, u64 flags) 4882 + * Description 4883 + * Get branch trace from hardware engines like Intel LBR. The 4884 + * hardware engine is stopped shortly after the helper is 4885 + * called. Therefore, the user need to filter branch entries 4886 + * based on the actual use case. To capture branch trace 4887 + * before the trigger point of the BPF program, the helper 4888 + * should be called at the beginning of the BPF program. 4889 + * 4890 + * The data is stored as struct perf_branch_entry into output 4891 + * buffer *entries*. *size* is the size of *entries* in bytes. 4892 + * *flags* is reserved for now and must be zero. 4893 + * 4894 + * Return 4895 + * On success, number of bytes written to *buf*. On error, a 4896 + * negative value. 4897 + * 4898 + * **-EINVAL** if *flags* is not zero. 4899 + * 4900 + * **-ENOENT** if architecture does not support branch records. 4880 4901 */ 4881 4902 #define __BPF_FUNC_MAPPER(FN) \ 4882 4903 FN(unspec), \ ··· 5076 5055 FN(get_func_ip), \ 5077 5056 FN(get_attach_cookie), \ 5078 5057 FN(task_pt_regs), \ 5058 + FN(get_branch_snapshot), \ 5079 5059 /* */ 5080 5060 5081 5061 /* integer value in 'imm' field of BPF_CALL instruction selects which helper ··· 5306 5284 __u32 gso_segs; 5307 5285 __bpf_md_ptr(struct bpf_sock *, sk); 5308 5286 __u32 gso_size; 5287 + __u32 :32; /* Padding, future use. */ 5288 + __u64 hwtstamp; 5309 5289 }; 5310 5290 5311 5291 struct bpf_tunnel_key {
+35 -20
tools/include/uapi/linux/btf.h
··· 43 43 * "size" tells the size of the type it is describing. 44 44 * 45 45 * "type" is used by PTR, TYPEDEF, VOLATILE, CONST, RESTRICT, 46 - * FUNC, FUNC_PROTO and VAR. 46 + * FUNC, FUNC_PROTO, VAR and TAG. 47 47 * "type" is a type_id referring to another type. 48 48 */ 49 49 union { ··· 56 56 #define BTF_INFO_VLEN(info) ((info) & 0xffff) 57 57 #define BTF_INFO_KFLAG(info) ((info) >> 31) 58 58 59 - #define BTF_KIND_UNKN 0 /* Unknown */ 60 - #define BTF_KIND_INT 1 /* Integer */ 61 - #define BTF_KIND_PTR 2 /* Pointer */ 62 - #define BTF_KIND_ARRAY 3 /* Array */ 63 - #define BTF_KIND_STRUCT 4 /* Struct */ 64 - #define BTF_KIND_UNION 5 /* Union */ 65 - #define BTF_KIND_ENUM 6 /* Enumeration */ 66 - #define BTF_KIND_FWD 7 /* Forward */ 67 - #define BTF_KIND_TYPEDEF 8 /* Typedef */ 68 - #define BTF_KIND_VOLATILE 9 /* Volatile */ 69 - #define BTF_KIND_CONST 10 /* Const */ 70 - #define BTF_KIND_RESTRICT 11 /* Restrict */ 71 - #define BTF_KIND_FUNC 12 /* Function */ 72 - #define BTF_KIND_FUNC_PROTO 13 /* Function Proto */ 73 - #define BTF_KIND_VAR 14 /* Variable */ 74 - #define BTF_KIND_DATASEC 15 /* Section */ 75 - #define BTF_KIND_FLOAT 16 /* Floating point */ 76 - #define BTF_KIND_MAX BTF_KIND_FLOAT 77 - #define NR_BTF_KINDS (BTF_KIND_MAX + 1) 59 + enum { 60 + BTF_KIND_UNKN = 0, /* Unknown */ 61 + BTF_KIND_INT = 1, /* Integer */ 62 + BTF_KIND_PTR = 2, /* Pointer */ 63 + BTF_KIND_ARRAY = 3, /* Array */ 64 + BTF_KIND_STRUCT = 4, /* Struct */ 65 + BTF_KIND_UNION = 5, /* Union */ 66 + BTF_KIND_ENUM = 6, /* Enumeration */ 67 + BTF_KIND_FWD = 7, /* Forward */ 68 + BTF_KIND_TYPEDEF = 8, /* Typedef */ 69 + BTF_KIND_VOLATILE = 9, /* Volatile */ 70 + BTF_KIND_CONST = 10, /* Const */ 71 + BTF_KIND_RESTRICT = 11, /* Restrict */ 72 + BTF_KIND_FUNC = 12, /* Function */ 73 + BTF_KIND_FUNC_PROTO = 13, /* Function Proto */ 74 + BTF_KIND_VAR = 14, /* Variable */ 75 + BTF_KIND_DATASEC = 15, /* Section */ 76 + BTF_KIND_FLOAT = 16, /* Floating point */ 77 + BTF_KIND_TAG = 17, /* Tag */ 78 + 79 + NR_BTF_KINDS, 80 + BTF_KIND_MAX = NR_BTF_KINDS - 1, 81 + }; 78 82 79 83 /* For some specific BTF_KIND, "struct btf_type" is immediately 80 84 * followed by extra data. ··· 172 168 __u32 type; 173 169 __u32 offset; 174 170 __u32 size; 171 + }; 172 + 173 + /* BTF_KIND_TAG is followed by a single "struct btf_tag" to describe 174 + * additional information related to the tag applied location. 175 + * If component_idx == -1, the tag is applied to a struct, union, 176 + * variable or function. Otherwise, it is applied to a struct/union 177 + * member or a func argument, and component_idx indicates which member 178 + * or argument (0 ... vlen-1). 179 + */ 180 + struct btf_tag { 181 + __s32 component_idx; 175 182 }; 176 183 177 184 #endif /* _UAPI__LINUX_BTF_H__ */
-1
tools/lib/bpf/.gitignore
··· 1 1 # SPDX-License-Identifier: GPL-2.0-only 2 - libbpf_version.h 3 2 libbpf.pc 4 3 libbpf.so.* 5 4 TAGS
+29 -10
tools/lib/bpf/Makefile
··· 8 8 LIBBPF_VERSION := $(shell \ 9 9 grep -oE '^LIBBPF_([0-9.]+)' $(VERSION_SCRIPT) | \ 10 10 sort -rV | head -n1 | cut -d'_' -f2) 11 - LIBBPF_MAJOR_VERSION := $(firstword $(subst ., ,$(LIBBPF_VERSION))) 11 + LIBBPF_MAJOR_VERSION := $(word 1,$(subst ., ,$(LIBBPF_VERSION))) 12 + LIBBPF_MINOR_VERSION := $(word 2,$(subst ., ,$(LIBBPF_VERSION))) 12 13 13 14 MAKEFLAGS += --no-print-directory 14 15 ··· 60 59 VERBOSE = 0 61 60 endif 62 61 63 - INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/include/uapi 62 + INCLUDES = -I$(if $(OUTPUT),$(OUTPUT),.) \ 63 + -I$(srctree)/tools/include -I$(srctree)/tools/include/uapi 64 64 65 65 export prefix libdir src obj 66 66 ··· 114 112 BPF_IN_SHARED := $(SHARED_OBJDIR)libbpf-in.o 115 113 BPF_IN_STATIC := $(STATIC_OBJDIR)libbpf-in.o 116 114 BPF_HELPER_DEFS := $(OUTPUT)bpf_helper_defs.h 115 + BPF_GENERATED := $(BPF_HELPER_DEFS) 117 116 118 117 LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET)) 119 118 LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE)) ··· 139 136 140 137 all_cmd: $(CMD_TARGETS) check 141 138 142 - $(BPF_IN_SHARED): force $(BPF_HELPER_DEFS) 139 + $(BPF_IN_SHARED): force $(BPF_GENERATED) 143 140 @(test -f ../../include/uapi/linux/bpf.h -a -f ../../../include/uapi/linux/bpf.h && ( \ 144 141 (diff -B ../../include/uapi/linux/bpf.h ../../../include/uapi/linux/bpf.h >/dev/null) || \ 145 142 echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf.h' differs from latest version at 'include/uapi/linux/bpf.h'" >&2 )) || true ··· 157 154 echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true 158 155 $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(SHARED_OBJDIR) CFLAGS="$(CFLAGS) $(SHLIB_FLAGS)" 159 156 160 - $(BPF_IN_STATIC): force $(BPF_HELPER_DEFS) 157 + $(BPF_IN_STATIC): force $(BPF_GENERATED) 161 158 $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(STATIC_OBJDIR) 162 159 163 160 $(BPF_HELPER_DEFS): $(srctree)/tools/include/uapi/linux/bpf.h ··· 182 179 -e "s|@VERSION@|$(LIBBPF_VERSION)|" \ 183 180 < libbpf.pc.template > $@ 184 181 185 - check: check_abi 182 + check: check_abi check_version 186 183 187 184 check_abi: $(OUTPUT)libbpf.so $(VERSION_SCRIPT) 188 185 @if [ "$(GLOBAL_SYM_COUNT)" != "$(VERSIONED_SYM_COUNT)" ]; then \ ··· 208 205 exit 1; \ 209 206 fi 210 207 208 + HDR_MAJ_VERSION := $(shell grep -oE '^\#define LIBBPF_MAJOR_VERSION ([0-9]+)$$' libbpf_version.h | cut -d' ' -f3) 209 + HDR_MIN_VERSION := $(shell grep -oE '^\#define LIBBPF_MINOR_VERSION ([0-9]+)$$' libbpf_version.h | cut -d' ' -f3) 210 + 211 + check_version: $(VERSION_SCRIPT) libbpf_version.h 212 + @if [ "$(HDR_MAJ_VERSION)" != "$(LIBBPF_MAJOR_VERSION)" ]; then \ 213 + echo "Error: libbpf major version mismatch detected: " \ 214 + "'$(HDR_MAJ_VERSION)' != '$(LIBBPF_MAJOR_VERSION)'" >&2; \ 215 + exit 1; \ 216 + fi 217 + @if [ "$(HDR_MIN_VERSION)" != "$(LIBBPF_MINOR_VERSION)" ]; then \ 218 + echo "Error: libbpf minor version mismatch detected: " \ 219 + "'$(HDR_MIN_VERSION)' != '$(LIBBPF_MINOR_VERSION)'" >&2; \ 220 + exit 1; \ 221 + fi 222 + 211 223 define do_install_mkdir 212 224 if [ ! -d '$(DESTDIR_SQ)$1' ]; then \ 213 225 $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$1'; \ ··· 242 224 cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ) 243 225 244 226 INSTALL_HEADERS = bpf.h libbpf.h btf.h libbpf_common.h libbpf_legacy.h xsk.h \ 245 - bpf_helpers.h $(BPF_HELPER_DEFS) bpf_tracing.h \ 246 - bpf_endian.h bpf_core_read.h skel_internal.h 227 + bpf_helpers.h $(BPF_GENERATED) bpf_tracing.h \ 228 + bpf_endian.h bpf_core_read.h skel_internal.h \ 229 + libbpf_version.h 247 230 248 - install_headers: $(BPF_HELPER_DEFS) 231 + install_headers: $(BPF_GENERATED) 249 232 $(call QUIET_INSTALL, headers) \ 250 233 $(foreach hdr,$(INSTALL_HEADERS), \ 251 234 $(call do_install,$(hdr),$(prefix)/include/bpf,644);) ··· 259 240 260 241 clean: 261 242 $(call QUIET_CLEAN, libbpf) $(RM) -rf $(CMD_TARGETS) \ 262 - *~ .*.d .*.cmd LIBBPF-CFLAGS $(BPF_HELPER_DEFS) \ 243 + *~ .*.d .*.cmd LIBBPF-CFLAGS $(BPF_GENERATED) \ 263 244 $(SHARED_OBJDIR) $(STATIC_OBJDIR) \ 264 245 $(addprefix $(OUTPUT), \ 265 246 *.o *.a *.so *.so.$(LIBBPF_MAJOR_VERSION) *.pc) 266 247 267 - PHONY += force cscope tags 248 + PHONY += force cscope tags check check_abi check_version 268 249 force: 269 250 270 251 cscope:
+76 -8
tools/lib/bpf/btf.c
··· 304 304 return base_size + sizeof(struct btf_var); 305 305 case BTF_KIND_DATASEC: 306 306 return base_size + vlen * sizeof(struct btf_var_secinfo); 307 + case BTF_KIND_TAG: 308 + return base_size + sizeof(struct btf_tag); 307 309 default: 308 310 pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t)); 309 311 return -EINVAL; ··· 377 375 v->offset = bswap_32(v->offset); 378 376 v->size = bswap_32(v->size); 379 377 } 378 + return 0; 379 + case BTF_KIND_TAG: 380 + btf_tag(t)->component_idx = bswap_32(btf_tag(t)->component_idx); 380 381 return 0; 381 382 default: 382 383 pr_debug("Unsupported BTF_KIND:%u\n", btf_kind(t)); ··· 591 586 case BTF_KIND_CONST: 592 587 case BTF_KIND_RESTRICT: 593 588 case BTF_KIND_VAR: 589 + case BTF_KIND_TAG: 594 590 type_id = t->type; 595 591 break; 596 592 case BTF_KIND_ARRAY: ··· 2446 2440 return 0; 2447 2441 } 2448 2442 2443 + /* 2444 + * Append new BTF_KIND_TAG type with: 2445 + * - *value* - non-empty/non-NULL string; 2446 + * - *ref_type_id* - referenced type ID, it might not exist yet; 2447 + * - *component_idx* - -1 for tagging reference type, otherwise struct/union 2448 + * member or function argument index; 2449 + * Returns: 2450 + * - >0, type ID of newly added BTF type; 2451 + * - <0, on error. 2452 + */ 2453 + int btf__add_tag(struct btf *btf, const char *value, int ref_type_id, 2454 + int component_idx) 2455 + { 2456 + struct btf_type *t; 2457 + int sz, value_off; 2458 + 2459 + if (!value || !value[0] || component_idx < -1) 2460 + return libbpf_err(-EINVAL); 2461 + 2462 + if (validate_type_id(ref_type_id)) 2463 + return libbpf_err(-EINVAL); 2464 + 2465 + if (btf_ensure_modifiable(btf)) 2466 + return libbpf_err(-ENOMEM); 2467 + 2468 + sz = sizeof(struct btf_type) + sizeof(struct btf_tag); 2469 + t = btf_add_type_mem(btf, sz); 2470 + if (!t) 2471 + return libbpf_err(-ENOMEM); 2472 + 2473 + value_off = btf__add_str(btf, value); 2474 + if (value_off < 0) 2475 + return value_off; 2476 + 2477 + t->name_off = value_off; 2478 + t->info = btf_type_info(BTF_KIND_TAG, 0, false); 2479 + t->type = ref_type_id; 2480 + btf_tag(t)->component_idx = component_idx; 2481 + 2482 + return btf_commit_type(btf, sz); 2483 + } 2484 + 2449 2485 struct btf_ext_sec_setup_param { 2450 2486 __u32 off; 2451 2487 __u32 len; ··· 3304 3256 t1->size == t2->size; 3305 3257 } 3306 3258 3307 - /* Calculate type signature hash of INT. */ 3308 - static long btf_hash_int(struct btf_type *t) 3259 + /* Calculate type signature hash of INT or TAG. */ 3260 + static long btf_hash_int_tag(struct btf_type *t) 3309 3261 { 3310 3262 __u32 info = *(__u32 *)(t + 1); 3311 3263 long h; ··· 3315 3267 return h; 3316 3268 } 3317 3269 3318 - /* Check structural equality of two INTs. */ 3319 - static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2) 3270 + /* Check structural equality of two INTs or TAGs. */ 3271 + static bool btf_equal_int_tag(struct btf_type *t1, struct btf_type *t2) 3320 3272 { 3321 3273 __u32 info1, info2; 3322 3274 ··· 3583 3535 h = btf_hash_common(t); 3584 3536 break; 3585 3537 case BTF_KIND_INT: 3586 - h = btf_hash_int(t); 3538 + case BTF_KIND_TAG: 3539 + h = btf_hash_int_tag(t); 3587 3540 break; 3588 3541 case BTF_KIND_ENUM: 3589 3542 h = btf_hash_enum(t); ··· 3639 3590 case BTF_KIND_FUNC_PROTO: 3640 3591 case BTF_KIND_VAR: 3641 3592 case BTF_KIND_DATASEC: 3593 + case BTF_KIND_TAG: 3642 3594 return 0; 3643 3595 3644 3596 case BTF_KIND_INT: 3645 - h = btf_hash_int(t); 3597 + h = btf_hash_int_tag(t); 3646 3598 for_each_dedup_cand(d, hash_entry, h) { 3647 3599 cand_id = (__u32)(long)hash_entry->value; 3648 3600 cand = btf_type_by_id(d->btf, cand_id); 3649 - if (btf_equal_int(t, cand)) { 3601 + if (btf_equal_int_tag(t, cand)) { 3650 3602 new_id = cand_id; 3651 3603 break; 3652 3604 } ··· 3931 3881 3932 3882 switch (cand_kind) { 3933 3883 case BTF_KIND_INT: 3934 - return btf_equal_int(cand_type, canon_type); 3884 + return btf_equal_int_tag(cand_type, canon_type); 3935 3885 3936 3886 case BTF_KIND_ENUM: 3937 3887 if (d->opts.dont_resolve_fwds) ··· 4260 4210 } 4261 4211 break; 4262 4212 4213 + case BTF_KIND_TAG: 4214 + ref_type_id = btf_dedup_ref_type(d, t->type); 4215 + if (ref_type_id < 0) 4216 + return ref_type_id; 4217 + t->type = ref_type_id; 4218 + 4219 + h = btf_hash_int_tag(t); 4220 + for_each_dedup_cand(d, hash_entry, h) { 4221 + cand_id = (__u32)(long)hash_entry->value; 4222 + cand = btf_type_by_id(d->btf, cand_id); 4223 + if (btf_equal_int_tag(t, cand)) { 4224 + new_id = cand_id; 4225 + break; 4226 + } 4227 + } 4228 + break; 4229 + 4263 4230 case BTF_KIND_ARRAY: { 4264 4231 struct btf_array *info = btf_array(t); 4265 4232 ··· 4549 4482 case BTF_KIND_TYPEDEF: 4550 4483 case BTF_KIND_FUNC: 4551 4484 case BTF_KIND_VAR: 4485 + case BTF_KIND_TAG: 4552 4486 return visit(&t->type, ctx); 4553 4487 4554 4488 case BTF_KIND_ARRAY: {
+87
tools/lib/bpf/btf.h
··· 1 1 /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ 2 2 /* Copyright (c) 2018 Facebook */ 3 + /*! \file */ 3 4 4 5 #ifndef __LIBBPF_BTF_H 5 6 #define __LIBBPF_BTF_H ··· 31 30 BTF_BIG_ENDIAN = 1, 32 31 }; 33 32 33 + /** 34 + * @brief **btf__free()** frees all data of a BTF object 35 + * @param btf BTF object to free 36 + */ 34 37 LIBBPF_API void btf__free(struct btf *btf); 35 38 39 + /** 40 + * @brief **btf__new()** creates a new instance of a BTF object from the raw 41 + * bytes of an ELF's BTF section 42 + * @param data raw bytes 43 + * @param size number of bytes passed in `data` 44 + * @return new BTF object instance which has to be eventually freed with 45 + * **btf__free()** 46 + * 47 + * On error, error-code-encoded-as-pointer is returned, not a NULL. To extract 48 + * error code from such a pointer `libbpf_get_error()` should be used. If 49 + * `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is 50 + * returned on error instead. In both cases thread-local `errno` variable is 51 + * always set to error code as well. 52 + */ 36 53 LIBBPF_API struct btf *btf__new(const void *data, __u32 size); 54 + 55 + /** 56 + * @brief **btf__new_split()** create a new instance of a BTF object from the 57 + * provided raw data bytes. It takes another BTF instance, **base_btf**, which 58 + * serves as a base BTF, which is extended by types in a newly created BTF 59 + * instance 60 + * @param data raw bytes 61 + * @param size length of raw bytes 62 + * @param base_btf the base BTF object 63 + * @return new BTF object instance which has to be eventually freed with 64 + * **btf__free()** 65 + * 66 + * If *base_btf* is NULL, `btf__new_split()` is equivalent to `btf__new()` and 67 + * creates non-split BTF. 68 + * 69 + * On error, error-code-encoded-as-pointer is returned, not a NULL. To extract 70 + * error code from such a pointer `libbpf_get_error()` should be used. If 71 + * `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is 72 + * returned on error instead. In both cases thread-local `errno` variable is 73 + * always set to error code as well. 74 + */ 37 75 LIBBPF_API struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf); 76 + 77 + /** 78 + * @brief **btf__new_empty()** creates an empty BTF object. Use 79 + * `btf__add_*()` to populate such BTF object. 80 + * @return new BTF object instance which has to be eventually freed with 81 + * **btf__free()** 82 + * 83 + * On error, error-code-encoded-as-pointer is returned, not a NULL. To extract 84 + * error code from such a pointer `libbpf_get_error()` should be used. If 85 + * `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is 86 + * returned on error instead. In both cases thread-local `errno` variable is 87 + * always set to error code as well. 88 + */ 38 89 LIBBPF_API struct btf *btf__new_empty(void); 90 + 91 + /** 92 + * @brief **btf__new_empty_split()** creates an unpopulated BTF object from an 93 + * ELF BTF section except with a base BTF on top of which split BTF should be 94 + * based 95 + * @return new BTF object instance which has to be eventually freed with 96 + * **btf__free()** 97 + * 98 + * If *base_btf* is NULL, `btf__new_empty_split()` is equivalent to 99 + * `btf__new_empty()` and creates non-split BTF. 100 + * 101 + * On error, error-code-encoded-as-pointer is returned, not a NULL. To extract 102 + * error code from such a pointer `libbpf_get_error()` should be used. If 103 + * `libbpf_set_strict_mode(LIBBPF_STRICT_CLEAN_PTRS)` is enabled, NULL is 104 + * returned on error instead. In both cases thread-local `errno` variable is 105 + * always set to error code as well. 106 + */ 39 107 LIBBPF_API struct btf *btf__new_empty_split(struct btf *base_btf); 40 108 41 109 LIBBPF_API struct btf *btf__parse(const char *path, struct btf_ext **btf_ext); ··· 120 50 121 51 LIBBPF_API struct btf *btf__load_from_kernel_by_id(__u32 id); 122 52 LIBBPF_API struct btf *btf__load_from_kernel_by_id_split(__u32 id, struct btf *base_btf); 53 + LIBBPF_DEPRECATED_SINCE(0, 6, "use btf__load_from_kernel_by_id instead") 123 54 LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf); 124 55 125 56 LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf); 57 + LIBBPF_DEPRECATED_SINCE(0, 6, "use btf__load_into_kernel instead") 126 58 LIBBPF_API int btf__load(struct btf *btf); 127 59 LIBBPF_API int btf__load_into_kernel(struct btf *btf); 128 60 LIBBPF_API __s32 btf__find_by_name(const struct btf *btf, ··· 212 140 LIBBPF_API int btf__add_datasec(struct btf *btf, const char *name, __u32 byte_sz); 213 141 LIBBPF_API int btf__add_datasec_var_info(struct btf *btf, int var_type_id, 214 142 __u32 offset, __u32 byte_sz); 143 + 144 + /* tag construction API */ 145 + LIBBPF_API int btf__add_tag(struct btf *btf, const char *value, int ref_type_id, 146 + int component_idx); 215 147 216 148 struct btf_dedup_opts { 217 149 unsigned int dedup_table_size; ··· 404 328 return btf_kind(t) == BTF_KIND_FLOAT; 405 329 } 406 330 331 + static inline bool btf_is_tag(const struct btf_type *t) 332 + { 333 + return btf_kind(t) == BTF_KIND_TAG; 334 + } 335 + 407 336 static inline __u8 btf_int_encoding(const struct btf_type *t) 408 337 { 409 338 return BTF_INT_ENCODING(*(__u32 *)(t + 1)); ··· 475 394 btf_var_secinfos(const struct btf_type *t) 476 395 { 477 396 return (struct btf_var_secinfo *)(t + 1); 397 + } 398 + 399 + struct btf_tag; 400 + static inline struct btf_tag *btf_tag(const struct btf_type *t) 401 + { 402 + return (struct btf_tag *)(t + 1); 478 403 } 479 404 480 405 #ifdef __cplusplus
+3
tools/lib/bpf/btf_dump.c
··· 316 316 case BTF_KIND_TYPEDEF: 317 317 case BTF_KIND_FUNC: 318 318 case BTF_KIND_VAR: 319 + case BTF_KIND_TAG: 319 320 d->type_states[t->type].referenced = 1; 320 321 break; 321 322 ··· 584 583 case BTF_KIND_FUNC: 585 584 case BTF_KIND_VAR: 586 585 case BTF_KIND_DATASEC: 586 + case BTF_KIND_TAG: 587 587 d->type_states[id].order_state = ORDERED; 588 588 return 0; 589 589 ··· 2217 2215 case BTF_KIND_FWD: 2218 2216 case BTF_KIND_FUNC: 2219 2217 case BTF_KIND_FUNC_PROTO: 2218 + case BTF_KIND_TAG: 2220 2219 err = btf_dump_unsupported_data(d, t, id); 2221 2220 break; 2222 2221 case BTF_KIND_INT:
+288 -143
tools/lib/bpf/libbpf.c
··· 195 195 FEAT_BTF_FLOAT, 196 196 /* BPF perf link support */ 197 197 FEAT_PERF_LINK, 198 + /* BTF_KIND_TAG support */ 199 + FEAT_BTF_TAG, 198 200 __FEAT_CNT, 199 201 }; 200 202 ··· 220 218 221 219 struct bpf_sec_def; 222 220 223 - typedef struct bpf_link *(*attach_fn_t)(const struct bpf_sec_def *sec, 224 - struct bpf_program *prog); 221 + typedef struct bpf_link *(*attach_fn_t)(const struct bpf_program *prog); 225 222 226 223 struct bpf_sec_def { 227 224 const char *sec; ··· 1988 1987 case BTF_KIND_VAR: return "var"; 1989 1988 case BTF_KIND_DATASEC: return "datasec"; 1990 1989 case BTF_KIND_FLOAT: return "float"; 1990 + case BTF_KIND_TAG: return "tag"; 1991 1991 default: return "unknown"; 1992 1992 } 1993 1993 } ··· 2488 2486 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC); 2489 2487 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT); 2490 2488 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC); 2489 + bool has_tag = kernel_supports(obj, FEAT_BTF_TAG); 2491 2490 2492 - return !has_func || !has_datasec || !has_func_global || !has_float; 2491 + return !has_func || !has_datasec || !has_func_global || !has_float || !has_tag; 2493 2492 } 2494 2493 2495 2494 static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf) ··· 2499 2496 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC); 2500 2497 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT); 2501 2498 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC); 2499 + bool has_tag = kernel_supports(obj, FEAT_BTF_TAG); 2502 2500 struct btf_type *t; 2503 2501 int i, j, vlen; 2504 2502 2505 2503 for (i = 1; i <= btf__get_nr_types(btf); i++) { 2506 2504 t = (struct btf_type *)btf__type_by_id(btf, i); 2507 2505 2508 - if (!has_datasec && btf_is_var(t)) { 2509 - /* replace VAR with INT */ 2506 + if ((!has_datasec && btf_is_var(t)) || (!has_tag && btf_is_tag(t))) { 2507 + /* replace VAR/TAG with INT */ 2510 2508 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0); 2511 2509 /* 2512 2510 * using size = 1 is the safest choice, 4 will be too ··· 2995 2991 obj->efile.symbols_shndx = elf_ndxscn(scn); 2996 2992 obj->efile.strtabidx = sh.sh_link; 2997 2993 } 2994 + } 2995 + 2996 + if (!obj->efile.symbols) { 2997 + pr_warn("elf: couldn't find symbol table in %s, stripped object file?\n", 2998 + obj->path); 2999 + return -ENOENT; 2998 3000 } 2999 3001 3000 3002 scn = NULL; ··· 4217 4207 strs, sizeof(strs))); 4218 4208 } 4219 4209 4210 + static int probe_kern_btf_tag(void) 4211 + { 4212 + static const char strs[] = "\0tag"; 4213 + __u32 types[] = { 4214 + /* int */ 4215 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 4216 + /* VAR x */ /* [2] */ 4217 + BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1), 4218 + BTF_VAR_STATIC, 4219 + /* attr */ 4220 + BTF_TYPE_TAG_ENC(1, 2, -1), 4221 + }; 4222 + 4223 + return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), 4224 + strs, sizeof(strs))); 4225 + } 4226 + 4220 4227 static int probe_kern_array_mmap(void) 4221 4228 { 4222 4229 struct bpf_create_map_attr attr = { ··· 4449 4422 }, 4450 4423 [FEAT_PERF_LINK] = { 4451 4424 "BPF perf link support", probe_perf_link, 4425 + }, 4426 + [FEAT_BTF_TAG] = { 4427 + "BTF_KIND_TAG support", probe_kern_btf_tag, 4452 4428 }, 4453 4429 }; 4454 4430 ··· 6397 6367 6398 6368 static const struct bpf_sec_def *find_sec_def(const char *sec_name); 6399 6369 6370 + static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts) 6371 + { 6372 + struct bpf_program *prog; 6373 + 6374 + bpf_object__for_each_program(prog, obj) { 6375 + prog->sec_def = find_sec_def(prog->sec_name); 6376 + if (!prog->sec_def) { 6377 + /* couldn't guess, but user might manually specify */ 6378 + pr_debug("prog '%s': unrecognized ELF section name '%s'\n", 6379 + prog->name, prog->sec_name); 6380 + continue; 6381 + } 6382 + 6383 + if (prog->sec_def->is_sleepable) 6384 + prog->prog_flags |= BPF_F_SLEEPABLE; 6385 + bpf_program__set_type(prog, prog->sec_def->prog_type); 6386 + bpf_program__set_expected_attach_type(prog, prog->sec_def->expected_attach_type); 6387 + 6388 + #pragma GCC diagnostic push 6389 + #pragma GCC diagnostic ignored "-Wdeprecated-declarations" 6390 + if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING || 6391 + prog->sec_def->prog_type == BPF_PROG_TYPE_EXT) 6392 + prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0); 6393 + #pragma GCC diagnostic pop 6394 + } 6395 + 6396 + return 0; 6397 + } 6398 + 6400 6399 static struct bpf_object * 6401 6400 __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, 6402 6401 const struct bpf_object_open_opts *opts) 6403 6402 { 6404 6403 const char *obj_name, *kconfig, *btf_tmp_path; 6405 - struct bpf_program *prog; 6406 6404 struct bpf_object *obj; 6407 6405 char tmp_name[64]; 6408 6406 int err; ··· 6488 6430 err = err ? : bpf_object__collect_externs(obj); 6489 6431 err = err ? : bpf_object__finalize_btf(obj); 6490 6432 err = err ? : bpf_object__init_maps(obj, opts); 6433 + err = err ? : bpf_object_init_progs(obj, opts); 6491 6434 err = err ? : bpf_object__collect_relos(obj); 6492 6435 if (err) 6493 6436 goto out; 6437 + 6494 6438 bpf_object__elf_finish(obj); 6495 - 6496 - bpf_object__for_each_program(prog, obj) { 6497 - prog->sec_def = find_sec_def(prog->sec_name); 6498 - if (!prog->sec_def) { 6499 - /* couldn't guess, but user might manually specify */ 6500 - pr_debug("prog '%s': unrecognized ELF section name '%s'\n", 6501 - prog->name, prog->sec_name); 6502 - continue; 6503 - } 6504 - 6505 - if (prog->sec_def->is_sleepable) 6506 - prog->prog_flags |= BPF_F_SLEEPABLE; 6507 - bpf_program__set_type(prog, prog->sec_def->prog_type); 6508 - bpf_program__set_expected_attach_type(prog, 6509 - prog->sec_def->expected_attach_type); 6510 - 6511 - if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING || 6512 - prog->sec_def->prog_type == BPF_PROG_TYPE_EXT) 6513 - prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0); 6514 - } 6515 6439 6516 6440 return obj; 6517 6441 out: ··· 7947 7907 __VA_ARGS__ \ 7948 7908 } 7949 7909 7950 - static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec, 7951 - struct bpf_program *prog); 7952 - static struct bpf_link *attach_tp(const struct bpf_sec_def *sec, 7953 - struct bpf_program *prog); 7954 - static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec, 7955 - struct bpf_program *prog); 7956 - static struct bpf_link *attach_trace(const struct bpf_sec_def *sec, 7957 - struct bpf_program *prog); 7958 - static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec, 7959 - struct bpf_program *prog); 7960 - static struct bpf_link *attach_iter(const struct bpf_sec_def *sec, 7961 - struct bpf_program *prog); 7910 + static struct bpf_link *attach_kprobe(const struct bpf_program *prog); 7911 + static struct bpf_link *attach_tp(const struct bpf_program *prog); 7912 + static struct bpf_link *attach_raw_tp(const struct bpf_program *prog); 7913 + static struct bpf_link *attach_trace(const struct bpf_program *prog); 7914 + static struct bpf_link *attach_lsm(const struct bpf_program *prog); 7915 + static struct bpf_link *attach_iter(const struct bpf_program *prog); 7962 7916 7963 7917 static const struct bpf_sec_def section_defs[] = { 7964 7918 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER), ··· 8278 8244 return -EINVAL; 8279 8245 } 8280 8246 8281 - if (prog->type == BPF_PROG_TYPE_UNSPEC) { 8282 - const struct bpf_sec_def *sec_def; 8247 + /* prevent the use of BPF prog with invalid type */ 8248 + if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) { 8249 + pr_warn("struct_ops reloc %s: prog %s is not struct_ops BPF program\n", 8250 + map->name, prog->name); 8251 + return -EINVAL; 8252 + } 8283 8253 8284 - sec_def = find_sec_def(prog->sec_name); 8285 - if (sec_def && 8286 - sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) { 8287 - /* for pr_warn */ 8288 - prog->type = sec_def->prog_type; 8289 - goto invalid_prog; 8290 - } 8291 - 8292 - prog->type = BPF_PROG_TYPE_STRUCT_OPS; 8254 + /* if we haven't yet processed this BPF program, record proper 8255 + * attach_btf_id and member_idx 8256 + */ 8257 + if (!prog->attach_btf_id) { 8293 8258 prog->attach_btf_id = st_ops->type_id; 8294 8259 prog->expected_attach_type = member_idx; 8295 - } else if (prog->type != BPF_PROG_TYPE_STRUCT_OPS || 8296 - prog->attach_btf_id != st_ops->type_id || 8297 - prog->expected_attach_type != member_idx) { 8298 - goto invalid_prog; 8299 8260 } 8261 + 8262 + /* struct_ops BPF prog can be re-used between multiple 8263 + * .struct_ops as long as it's the same struct_ops struct 8264 + * definition and the same function pointer field 8265 + */ 8266 + if (prog->attach_btf_id != st_ops->type_id || 8267 + prog->expected_attach_type != member_idx) { 8268 + pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n", 8269 + map->name, prog->name, prog->sec_name, prog->type, 8270 + prog->attach_btf_id, prog->expected_attach_type, name); 8271 + return -EINVAL; 8272 + } 8273 + 8300 8274 st_ops->progs[member_idx] = prog; 8301 8275 } 8302 8276 8303 8277 return 0; 8304 - 8305 - invalid_prog: 8306 - pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n", 8307 - map->name, prog->name, prog->sec_name, prog->type, 8308 - prog->attach_btf_id, prog->expected_attach_type, name); 8309 - return -EINVAL; 8310 8278 } 8311 8279 8312 8280 #define BTF_TRACE_PREFIX "btf_trace_" ··· 8464 8428 { 8465 8429 enum bpf_attach_type attach_type = prog->expected_attach_type; 8466 8430 __u32 attach_prog_fd = prog->attach_prog_fd; 8467 - const char *name = prog->sec_name, *attach_name; 8468 - const struct bpf_sec_def *sec = NULL; 8469 - int i, err = 0; 8431 + const char *attach_name; 8432 + int err = 0; 8470 8433 8471 - if (!name) 8472 - return -EINVAL; 8473 - 8474 - for (i = 0; i < ARRAY_SIZE(section_defs); i++) { 8475 - if (!section_defs[i].is_attach_btf) 8476 - continue; 8477 - if (strncmp(name, section_defs[i].sec, section_defs[i].len)) 8478 - continue; 8479 - 8480 - sec = &section_defs[i]; 8481 - break; 8482 - } 8483 - 8484 - if (!sec) { 8485 - pr_warn("failed to identify BTF ID based on ELF section name '%s'\n", name); 8434 + if (!prog->sec_def || !prog->sec_def->is_attach_btf) { 8435 + pr_warn("failed to identify BTF ID based on ELF section name '%s'\n", 8436 + prog->sec_name); 8486 8437 return -ESRCH; 8487 8438 } 8488 - attach_name = name + sec->len; 8439 + attach_name = prog->sec_name + prog->sec_def->len; 8489 8440 8490 8441 /* BPF program's BTF ID */ 8491 8442 if (attach_prog_fd) { ··· 8506 8483 enum bpf_attach_type *attach_type) 8507 8484 { 8508 8485 char *type_names; 8509 - int i; 8486 + const struct bpf_sec_def *sec_def; 8510 8487 8511 8488 if (!name) 8512 8489 return libbpf_err(-EINVAL); 8513 8490 8514 - for (i = 0; i < ARRAY_SIZE(section_defs); i++) { 8515 - if (strncmp(name, section_defs[i].sec, section_defs[i].len)) 8516 - continue; 8517 - if (!section_defs[i].is_attachable) 8518 - return libbpf_err(-EINVAL); 8519 - *attach_type = section_defs[i].expected_attach_type; 8520 - return 0; 8521 - } 8522 - pr_debug("failed to guess attach type based on ELF section name '%s'\n", name); 8523 - type_names = libbpf_get_type_names(true); 8524 - if (type_names != NULL) { 8525 - pr_debug("attachable section(type) names are:%s\n", type_names); 8526 - free(type_names); 8491 + sec_def = find_sec_def(name); 8492 + if (!sec_def) { 8493 + pr_debug("failed to guess attach type based on ELF section name '%s'\n", name); 8494 + type_names = libbpf_get_type_names(true); 8495 + if (type_names != NULL) { 8496 + pr_debug("attachable section(type) names are:%s\n", type_names); 8497 + free(type_names); 8498 + } 8499 + 8500 + return libbpf_err(-EINVAL); 8527 8501 } 8528 8502 8529 - return libbpf_err(-EINVAL); 8503 + if (!sec_def->is_attachable) 8504 + return libbpf_err(-EINVAL); 8505 + 8506 + *attach_type = sec_def->expected_attach_type; 8507 + return 0; 8530 8508 } 8531 8509 8532 8510 int bpf_map__fd(const struct bpf_map *map) ··· 9011 8987 return 0; 9012 8988 } 9013 8989 8990 + static int poke_kprobe_events(bool add, const char *name, bool retprobe, uint64_t offset) 8991 + { 8992 + int fd, ret = 0; 8993 + pid_t p = getpid(); 8994 + char cmd[260], probename[128], probefunc[128]; 8995 + const char *file = "/sys/kernel/debug/tracing/kprobe_events"; 8996 + 8997 + if (retprobe) 8998 + snprintf(probename, sizeof(probename), "kretprobes/%s_libbpf_%u", name, p); 8999 + else 9000 + snprintf(probename, sizeof(probename), "kprobes/%s_libbpf_%u", name, p); 9001 + 9002 + if (offset) 9003 + snprintf(probefunc, sizeof(probefunc), "%s+%zu", name, (size_t)offset); 9004 + 9005 + if (add) { 9006 + snprintf(cmd, sizeof(cmd), "%c:%s %s", 9007 + retprobe ? 'r' : 'p', 9008 + probename, 9009 + offset ? probefunc : name); 9010 + } else { 9011 + snprintf(cmd, sizeof(cmd), "-:%s", probename); 9012 + } 9013 + 9014 + fd = open(file, O_WRONLY | O_APPEND, 0); 9015 + if (!fd) 9016 + return -errno; 9017 + ret = write(fd, cmd, strlen(cmd)); 9018 + if (ret < 0) 9019 + ret = -errno; 9020 + close(fd); 9021 + 9022 + return ret; 9023 + } 9024 + 9025 + static inline int add_kprobe_event_legacy(const char *name, bool retprobe, uint64_t offset) 9026 + { 9027 + return poke_kprobe_events(true, name, retprobe, offset); 9028 + } 9029 + 9030 + static inline int remove_kprobe_event_legacy(const char *name, bool retprobe) 9031 + { 9032 + return poke_kprobe_events(false, name, retprobe, 0); 9033 + } 9034 + 9014 9035 struct bpf_link_perf { 9015 9036 struct bpf_link link; 9016 9037 int perf_event_fd; 9038 + /* legacy kprobe support: keep track of probe identifier and type */ 9039 + char *legacy_probe_name; 9040 + bool legacy_is_retprobe; 9017 9041 }; 9018 9042 9019 9043 static int bpf_link_perf_detach(struct bpf_link *link) ··· 9076 9004 close(perf_link->perf_event_fd); 9077 9005 close(link->fd); 9078 9006 9079 - return libbpf_err(err); 9007 + /* legacy kprobe needs to be removed after perf event fd closure */ 9008 + if (perf_link->legacy_probe_name) 9009 + err = remove_kprobe_event_legacy(perf_link->legacy_probe_name, 9010 + perf_link->legacy_is_retprobe); 9011 + 9012 + return err; 9080 9013 } 9081 9014 9082 9015 static void bpf_link_perf_dealloc(struct bpf_link *link) 9083 9016 { 9084 9017 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); 9085 9018 9019 + free(perf_link->legacy_probe_name); 9086 9020 free(perf_link); 9087 9021 } 9088 9022 9089 - struct bpf_link *bpf_program__attach_perf_event_opts(struct bpf_program *prog, int pfd, 9023 + struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd, 9090 9024 const struct bpf_perf_event_opts *opts) 9091 9025 { 9092 9026 char errmsg[STRERR_BUFSIZE]; ··· 9167 9089 return libbpf_err_ptr(err); 9168 9090 } 9169 9091 9170 - struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog, int pfd) 9092 + struct bpf_link *bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd) 9171 9093 { 9172 9094 return bpf_program__attach_perf_event_opts(prog, pfd, NULL); 9173 9095 } ··· 9200 9122 } 9201 9123 fclose(f); 9202 9124 return ret; 9125 + } 9126 + 9127 + static int determine_kprobe_perf_type_legacy(const char *func_name, bool is_retprobe) 9128 + { 9129 + char file[192]; 9130 + 9131 + snprintf(file, sizeof(file), 9132 + "/sys/kernel/debug/tracing/events/%s/%s_libbpf_%d/id", 9133 + is_retprobe ? "kretprobes" : "kprobes", 9134 + func_name, getpid()); 9135 + 9136 + return parse_uint_from_file(file, "%d\n"); 9203 9137 } 9204 9138 9205 9139 static int determine_kprobe_perf_type(void) ··· 9296 9206 return pfd; 9297 9207 } 9298 9208 9209 + static int perf_event_kprobe_open_legacy(bool retprobe, const char *name, uint64_t offset, int pid) 9210 + { 9211 + struct perf_event_attr attr = {}; 9212 + char errmsg[STRERR_BUFSIZE]; 9213 + int type, pfd, err; 9214 + 9215 + err = add_kprobe_event_legacy(name, retprobe, offset); 9216 + if (err < 0) { 9217 + pr_warn("failed to add legacy kprobe event: %s\n", 9218 + libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 9219 + return err; 9220 + } 9221 + type = determine_kprobe_perf_type_legacy(name, retprobe); 9222 + if (type < 0) { 9223 + pr_warn("failed to determine legacy kprobe event id: %s\n", 9224 + libbpf_strerror_r(type, errmsg, sizeof(errmsg))); 9225 + return type; 9226 + } 9227 + attr.size = sizeof(attr); 9228 + attr.config = type; 9229 + attr.type = PERF_TYPE_TRACEPOINT; 9230 + 9231 + pfd = syscall(__NR_perf_event_open, &attr, 9232 + pid < 0 ? -1 : pid, /* pid */ 9233 + pid == -1 ? 0 : -1, /* cpu */ 9234 + -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); 9235 + if (pfd < 0) { 9236 + err = -errno; 9237 + pr_warn("legacy kprobe perf_event_open() failed: %s\n", 9238 + libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 9239 + return err; 9240 + } 9241 + return pfd; 9242 + } 9243 + 9299 9244 struct bpf_link * 9300 - bpf_program__attach_kprobe_opts(struct bpf_program *prog, 9245 + bpf_program__attach_kprobe_opts(const struct bpf_program *prog, 9301 9246 const char *func_name, 9302 9247 const struct bpf_kprobe_opts *opts) 9303 9248 { 9304 9249 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); 9305 9250 char errmsg[STRERR_BUFSIZE]; 9251 + char *legacy_probe = NULL; 9306 9252 struct bpf_link *link; 9307 9253 unsigned long offset; 9308 - bool retprobe; 9254 + bool retprobe, legacy; 9309 9255 int pfd, err; 9310 9256 9311 9257 if (!OPTS_VALID(opts, bpf_kprobe_opts)) ··· 9351 9225 offset = OPTS_GET(opts, offset, 0); 9352 9226 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); 9353 9227 9354 - pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name, 9355 - offset, -1 /* pid */, 0 /* ref_ctr_off */); 9228 + legacy = determine_kprobe_perf_type() < 0; 9229 + if (!legacy) { 9230 + pfd = perf_event_open_probe(false /* uprobe */, retprobe, 9231 + func_name, offset, 9232 + -1 /* pid */, 0 /* ref_ctr_off */); 9233 + } else { 9234 + legacy_probe = strdup(func_name); 9235 + if (!legacy_probe) 9236 + return libbpf_err_ptr(-ENOMEM); 9237 + 9238 + pfd = perf_event_kprobe_open_legacy(retprobe, func_name, 9239 + offset, -1 /* pid */); 9240 + } 9356 9241 if (pfd < 0) { 9357 9242 pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n", 9358 9243 prog->name, retprobe ? "kretprobe" : "kprobe", func_name, ··· 9379 9242 libbpf_strerror_r(err, errmsg, sizeof(errmsg))); 9380 9243 return libbpf_err_ptr(err); 9381 9244 } 9245 + if (legacy) { 9246 + struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); 9247 + 9248 + perf_link->legacy_probe_name = legacy_probe; 9249 + perf_link->legacy_is_retprobe = retprobe; 9250 + } 9251 + 9382 9252 return link; 9383 9253 } 9384 9254 9385 - struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog, 9255 + struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog, 9386 9256 bool retprobe, 9387 9257 const char *func_name) 9388 9258 { ··· 9400 9256 return bpf_program__attach_kprobe_opts(prog, func_name, &opts); 9401 9257 } 9402 9258 9403 - static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec, 9404 - struct bpf_program *prog) 9259 + static struct bpf_link *attach_kprobe(const struct bpf_program *prog) 9405 9260 { 9406 9261 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts); 9407 9262 unsigned long offset = 0; ··· 9409 9266 char *func; 9410 9267 int n, err; 9411 9268 9412 - func_name = prog->sec_name + sec->len; 9413 - opts.retprobe = strcmp(sec->sec, "kretprobe/") == 0; 9269 + func_name = prog->sec_name + prog->sec_def->len; 9270 + opts.retprobe = strcmp(prog->sec_def->sec, "kretprobe/") == 0; 9414 9271 9415 9272 n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset); 9416 9273 if (n < 1) { ··· 9432 9289 } 9433 9290 9434 9291 LIBBPF_API struct bpf_link * 9435 - bpf_program__attach_uprobe_opts(struct bpf_program *prog, pid_t pid, 9292 + bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, 9436 9293 const char *binary_path, size_t func_offset, 9437 9294 const struct bpf_uprobe_opts *opts) 9438 9295 { ··· 9472 9329 return link; 9473 9330 } 9474 9331 9475 - struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog, 9332 + struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog, 9476 9333 bool retprobe, pid_t pid, 9477 9334 const char *binary_path, 9478 9335 size_t func_offset) ··· 9532 9389 return pfd; 9533 9390 } 9534 9391 9535 - struct bpf_link *bpf_program__attach_tracepoint_opts(struct bpf_program *prog, 9392 + struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *prog, 9536 9393 const char *tp_category, 9537 9394 const char *tp_name, 9538 9395 const struct bpf_tracepoint_opts *opts) ··· 9566 9423 return link; 9567 9424 } 9568 9425 9569 - struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog, 9426 + struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog, 9570 9427 const char *tp_category, 9571 9428 const char *tp_name) 9572 9429 { 9573 9430 return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL); 9574 9431 } 9575 9432 9576 - static struct bpf_link *attach_tp(const struct bpf_sec_def *sec, 9577 - struct bpf_program *prog) 9433 + static struct bpf_link *attach_tp(const struct bpf_program *prog) 9578 9434 { 9579 9435 char *sec_name, *tp_cat, *tp_name; 9580 9436 struct bpf_link *link; ··· 9583 9441 return libbpf_err_ptr(-ENOMEM); 9584 9442 9585 9443 /* extract "tp/<category>/<name>" */ 9586 - tp_cat = sec_name + sec->len; 9444 + tp_cat = sec_name + prog->sec_def->len; 9587 9445 tp_name = strchr(tp_cat, '/'); 9588 9446 if (!tp_name) { 9589 9447 free(sec_name); ··· 9597 9455 return link; 9598 9456 } 9599 9457 9600 - struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog, 9458 + struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog, 9601 9459 const char *tp_name) 9602 9460 { 9603 9461 char errmsg[STRERR_BUFSIZE]; ··· 9627 9485 return link; 9628 9486 } 9629 9487 9630 - static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec, 9631 - struct bpf_program *prog) 9488 + static struct bpf_link *attach_raw_tp(const struct bpf_program *prog) 9632 9489 { 9633 - const char *tp_name = prog->sec_name + sec->len; 9490 + const char *tp_name = prog->sec_name + prog->sec_def->len; 9634 9491 9635 9492 return bpf_program__attach_raw_tracepoint(prog, tp_name); 9636 9493 } 9637 9494 9638 9495 /* Common logic for all BPF program types that attach to a btf_id */ 9639 - static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog) 9496 + static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog) 9640 9497 { 9641 9498 char errmsg[STRERR_BUFSIZE]; 9642 9499 struct bpf_link *link; ··· 9664 9523 return (struct bpf_link *)link; 9665 9524 } 9666 9525 9667 - struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog) 9526 + struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog) 9668 9527 { 9669 9528 return bpf_program__attach_btf_id(prog); 9670 9529 } 9671 9530 9672 - struct bpf_link *bpf_program__attach_lsm(struct bpf_program *prog) 9531 + struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog) 9673 9532 { 9674 9533 return bpf_program__attach_btf_id(prog); 9675 9534 } 9676 9535 9677 - static struct bpf_link *attach_trace(const struct bpf_sec_def *sec, 9678 - struct bpf_program *prog) 9536 + static struct bpf_link *attach_trace(const struct bpf_program *prog) 9679 9537 { 9680 9538 return bpf_program__attach_trace(prog); 9681 9539 } 9682 9540 9683 - static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec, 9684 - struct bpf_program *prog) 9541 + static struct bpf_link *attach_lsm(const struct bpf_program *prog) 9685 9542 { 9686 9543 return bpf_program__attach_lsm(prog); 9687 9544 } 9688 9545 9689 9546 static struct bpf_link * 9690 - bpf_program__attach_fd(struct bpf_program *prog, int target_fd, int btf_id, 9547 + bpf_program__attach_fd(const struct bpf_program *prog, int target_fd, int btf_id, 9691 9548 const char *target_name) 9692 9549 { 9693 9550 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts, ··· 9721 9582 } 9722 9583 9723 9584 struct bpf_link * 9724 - bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd) 9585 + bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd) 9725 9586 { 9726 9587 return bpf_program__attach_fd(prog, cgroup_fd, 0, "cgroup"); 9727 9588 } 9728 9589 9729 9590 struct bpf_link * 9730 - bpf_program__attach_netns(struct bpf_program *prog, int netns_fd) 9591 + bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd) 9731 9592 { 9732 9593 return bpf_program__attach_fd(prog, netns_fd, 0, "netns"); 9733 9594 } 9734 9595 9735 - struct bpf_link *bpf_program__attach_xdp(struct bpf_program *prog, int ifindex) 9596 + struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex) 9736 9597 { 9737 9598 /* target_fd/target_ifindex use the same field in LINK_CREATE */ 9738 9599 return bpf_program__attach_fd(prog, ifindex, 0, "xdp"); 9739 9600 } 9740 9601 9741 - struct bpf_link *bpf_program__attach_freplace(struct bpf_program *prog, 9602 + struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog, 9742 9603 int target_fd, 9743 9604 const char *attach_func_name) 9744 9605 { ··· 9771 9632 } 9772 9633 9773 9634 struct bpf_link * 9774 - bpf_program__attach_iter(struct bpf_program *prog, 9635 + bpf_program__attach_iter(const struct bpf_program *prog, 9775 9636 const struct bpf_iter_attach_opts *opts) 9776 9637 { 9777 9638 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); ··· 9810 9671 return link; 9811 9672 } 9812 9673 9813 - static struct bpf_link *attach_iter(const struct bpf_sec_def *sec, 9814 - struct bpf_program *prog) 9674 + static struct bpf_link *attach_iter(const struct bpf_program *prog) 9815 9675 { 9816 9676 return bpf_program__attach_iter(prog, NULL); 9817 9677 } 9818 9678 9819 - struct bpf_link *bpf_program__attach(struct bpf_program *prog) 9679 + struct bpf_link *bpf_program__attach(const struct bpf_program *prog) 9820 9680 { 9821 - const struct bpf_sec_def *sec_def; 9822 - 9823 - sec_def = find_sec_def(prog->sec_name); 9824 - if (!sec_def || !sec_def->attach_fn) 9681 + if (!prog->sec_def || !prog->sec_def->attach_fn) 9825 9682 return libbpf_err_ptr(-ESRCH); 9826 9683 9827 - return sec_def->attach_fn(sec_def, prog); 9684 + return prog->sec_def->attach_fn(prog); 9828 9685 } 9829 9686 9830 9687 static int bpf_link__detach_struct_ops(struct bpf_link *link) ··· 9833 9698 return 0; 9834 9699 } 9835 9700 9836 - struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map) 9701 + struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map) 9837 9702 { 9838 9703 struct bpf_struct_ops *st_ops; 9839 9704 struct bpf_link *link; ··· 10646 10511 { 10647 10512 int btf_obj_fd = 0, btf_id = 0, err; 10648 10513 10649 - if (!prog || attach_prog_fd < 0 || !attach_func_name) 10514 + if (!prog || attach_prog_fd < 0) 10650 10515 return libbpf_err(-EINVAL); 10651 10516 10652 10517 if (prog->obj->loaded) 10653 10518 return libbpf_err(-EINVAL); 10519 + 10520 + if (attach_prog_fd && !attach_func_name) { 10521 + /* remember attach_prog_fd and let bpf_program__load() find 10522 + * BTF ID during the program load 10523 + */ 10524 + prog->attach_prog_fd = attach_prog_fd; 10525 + return 0; 10526 + } 10654 10527 10655 10528 if (attach_prog_fd) { 10656 10529 btf_id = libbpf_find_prog_btf_id(attach_func_name, ··· 10666 10523 if (btf_id < 0) 10667 10524 return libbpf_err(btf_id); 10668 10525 } else { 10526 + if (!attach_func_name) 10527 + return libbpf_err(-EINVAL); 10528 + 10669 10529 /* load btf_vmlinux, if not yet */ 10670 10530 err = bpf_object__load_vmlinux_btf(prog->obj, true); 10671 10531 if (err) ··· 10910 10764 for (i = 0; i < s->prog_cnt; i++) { 10911 10765 struct bpf_program *prog = *s->progs[i].prog; 10912 10766 struct bpf_link **link = s->progs[i].link; 10913 - const struct bpf_sec_def *sec_def; 10914 10767 10915 10768 if (!prog->load) 10916 10769 continue; 10917 10770 10918 - sec_def = find_sec_def(prog->sec_name); 10919 - if (!sec_def || !sec_def->attach_fn) 10771 + /* auto-attaching not supported for this program */ 10772 + if (!prog->sec_def || !prog->sec_def->attach_fn) 10920 10773 continue; 10921 10774 10922 - *link = sec_def->attach_fn(sec_def, prog); 10775 + *link = bpf_program__attach(prog); 10923 10776 err = libbpf_get_error(*link); 10924 10777 if (err) { 10925 10778 pr_warn("failed to auto-attach program '%s': %d\n",
+22 -19
tools/lib/bpf/libbpf.h
··· 83 83 * Non-relocatable instructions are replaced with invalid ones to 84 84 * prevent accidental errors. 85 85 * */ 86 + LIBBPF_DEPRECATED_SINCE(0, 6, "field has no effect") 86 87 bool relaxed_core_relocs; 87 88 /* maps that set the 'pinning' attribute in their definition will have 88 89 * their pin_path attribute set to a file in this directory, and be 89 90 * auto-pinned to that path on load; defaults to "/sys/fs/bpf". 90 91 */ 91 92 const char *pin_root_path; 93 + 94 + LIBBPF_DEPRECATED_SINCE(0, 7, "use bpf_program__set_attach_target() on each individual bpf_program") 92 95 __u32 attach_prog_fd; 93 96 /* Additional kernel config content that augments and overrides 94 97 * system Kconfig for CONFIG_xxx externs. ··· 246 243 LIBBPF_API int bpf_link__destroy(struct bpf_link *link); 247 244 248 245 LIBBPF_API struct bpf_link * 249 - bpf_program__attach(struct bpf_program *prog); 246 + bpf_program__attach(const struct bpf_program *prog); 250 247 251 248 struct bpf_perf_event_opts { 252 249 /* size of this struct, for forward/backward compatiblity */ ··· 257 254 #define bpf_perf_event_opts__last_field bpf_cookie 258 255 259 256 LIBBPF_API struct bpf_link * 260 - bpf_program__attach_perf_event(struct bpf_program *prog, int pfd); 257 + bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd); 261 258 262 259 LIBBPF_API struct bpf_link * 263 - bpf_program__attach_perf_event_opts(struct bpf_program *prog, int pfd, 260 + bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd, 264 261 const struct bpf_perf_event_opts *opts); 265 262 266 263 struct bpf_kprobe_opts { ··· 277 274 #define bpf_kprobe_opts__last_field retprobe 278 275 279 276 LIBBPF_API struct bpf_link * 280 - bpf_program__attach_kprobe(struct bpf_program *prog, bool retprobe, 277 + bpf_program__attach_kprobe(const struct bpf_program *prog, bool retprobe, 281 278 const char *func_name); 282 279 LIBBPF_API struct bpf_link * 283 - bpf_program__attach_kprobe_opts(struct bpf_program *prog, 280 + bpf_program__attach_kprobe_opts(const struct bpf_program *prog, 284 281 const char *func_name, 285 282 const struct bpf_kprobe_opts *opts); 286 283 ··· 300 297 #define bpf_uprobe_opts__last_field retprobe 301 298 302 299 LIBBPF_API struct bpf_link * 303 - bpf_program__attach_uprobe(struct bpf_program *prog, bool retprobe, 300 + bpf_program__attach_uprobe(const struct bpf_program *prog, bool retprobe, 304 301 pid_t pid, const char *binary_path, 305 302 size_t func_offset); 306 303 LIBBPF_API struct bpf_link * 307 - bpf_program__attach_uprobe_opts(struct bpf_program *prog, pid_t pid, 304 + bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, 308 305 const char *binary_path, size_t func_offset, 309 306 const struct bpf_uprobe_opts *opts); 310 307 ··· 317 314 #define bpf_tracepoint_opts__last_field bpf_cookie 318 315 319 316 LIBBPF_API struct bpf_link * 320 - bpf_program__attach_tracepoint(struct bpf_program *prog, 317 + bpf_program__attach_tracepoint(const struct bpf_program *prog, 321 318 const char *tp_category, 322 319 const char *tp_name); 323 320 LIBBPF_API struct bpf_link * 324 - bpf_program__attach_tracepoint_opts(struct bpf_program *prog, 321 + bpf_program__attach_tracepoint_opts(const struct bpf_program *prog, 325 322 const char *tp_category, 326 323 const char *tp_name, 327 324 const struct bpf_tracepoint_opts *opts); 328 325 329 326 LIBBPF_API struct bpf_link * 330 - bpf_program__attach_raw_tracepoint(struct bpf_program *prog, 327 + bpf_program__attach_raw_tracepoint(const struct bpf_program *prog, 331 328 const char *tp_name); 332 329 LIBBPF_API struct bpf_link * 333 - bpf_program__attach_trace(struct bpf_program *prog); 330 + bpf_program__attach_trace(const struct bpf_program *prog); 334 331 LIBBPF_API struct bpf_link * 335 - bpf_program__attach_lsm(struct bpf_program *prog); 332 + bpf_program__attach_lsm(const struct bpf_program *prog); 336 333 LIBBPF_API struct bpf_link * 337 - bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd); 334 + bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd); 338 335 LIBBPF_API struct bpf_link * 339 - bpf_program__attach_netns(struct bpf_program *prog, int netns_fd); 336 + bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd); 340 337 LIBBPF_API struct bpf_link * 341 - bpf_program__attach_xdp(struct bpf_program *prog, int ifindex); 338 + bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex); 342 339 LIBBPF_API struct bpf_link * 343 - bpf_program__attach_freplace(struct bpf_program *prog, 340 + bpf_program__attach_freplace(const struct bpf_program *prog, 344 341 int target_fd, const char *attach_func_name); 345 342 346 343 struct bpf_map; 347 344 348 - LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map); 345 + LIBBPF_API struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map); 349 346 350 347 struct bpf_iter_attach_opts { 351 348 size_t sz; /* size of this struct for forward/backward compatibility */ ··· 355 352 #define bpf_iter_attach_opts__last_field link_info_len 356 353 357 354 LIBBPF_API struct bpf_link * 358 - bpf_program__attach_iter(struct bpf_program *prog, 355 + bpf_program__attach_iter(const struct bpf_program *prog, 359 356 const struct bpf_iter_attach_opts *opts); 360 357 361 358 struct bpf_insn; ··· 857 854 size_t sz; /* size of this struct, for forward/backward compatibility */ 858 855 859 856 const char *name; 860 - void *data; 857 + const void *data; 861 858 size_t data_sz; 862 859 863 860 struct bpf_object **obj;
+5
tools/lib/bpf/libbpf.map
··· 386 386 btf_dump__dump_type_data; 387 387 libbpf_set_strict_mode; 388 388 } LIBBPF_0.4.0; 389 + 390 + LIBBPF_0.6.0 { 391 + global: 392 + btf__add_tag; 393 + } LIBBPF_0.5.0;
+24
tools/lib/bpf/libbpf_common.h
··· 10 10 #define __LIBBPF_LIBBPF_COMMON_H 11 11 12 12 #include <string.h> 13 + #include "libbpf_version.h" 13 14 14 15 #ifndef LIBBPF_API 15 16 #define LIBBPF_API __attribute__((visibility("default"))) 16 17 #endif 17 18 18 19 #define LIBBPF_DEPRECATED(msg) __attribute__((deprecated(msg))) 20 + 21 + /* Mark a symbol as deprecated when libbpf version is >= {major}.{minor} */ 22 + #define LIBBPF_DEPRECATED_SINCE(major, minor, msg) \ 23 + __LIBBPF_MARK_DEPRECATED_ ## major ## _ ## minor \ 24 + (LIBBPF_DEPRECATED("libbpf v" # major "." # minor "+: " msg)) 25 + 26 + #define __LIBBPF_CURRENT_VERSION_GEQ(major, minor) \ 27 + (LIBBPF_MAJOR_VERSION > (major) || \ 28 + (LIBBPF_MAJOR_VERSION == (major) && LIBBPF_MINOR_VERSION >= (minor))) 29 + 30 + /* Add checks for other versions below when planning deprecation of API symbols 31 + * with the LIBBPF_DEPRECATED_SINCE macro. 32 + */ 33 + #if __LIBBPF_CURRENT_VERSION_GEQ(0, 6) 34 + #define __LIBBPF_MARK_DEPRECATED_0_6(X) X 35 + #else 36 + #define __LIBBPF_MARK_DEPRECATED_0_6(X) 37 + #endif 38 + #if __LIBBPF_CURRENT_VERSION_GEQ(0, 7) 39 + #define __LIBBPF_MARK_DEPRECATED_0_7(X) X 40 + #else 41 + #define __LIBBPF_MARK_DEPRECATED_0_7(X) 42 + #endif 19 43 20 44 /* Helper macro to declare and initialize libbpf options struct 21 45 *
+21 -6
tools/lib/bpf/libbpf_internal.h
··· 69 69 #define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size) 70 70 #define BTF_TYPE_FLOAT_ENC(name, sz) \ 71 71 BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 0), sz) 72 + #define BTF_TYPE_TAG_ENC(value, type, component_idx) \ 73 + BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_TAG, 0, 0), type), (component_idx) 72 74 73 75 #ifndef likely 74 76 #define likely(x) __builtin_expect(!!(x), 1) ··· 92 90 /* Symbol versioning is different between static and shared library. 93 91 * Properly versioned symbols are needed for shared library, but 94 92 * only the symbol of the new version is needed for static library. 93 + * Starting with GNU C 10, use symver attribute instead of .symver assembler 94 + * directive, which works better with GCC LTO builds. 95 95 */ 96 - #ifdef SHARED 97 - # define COMPAT_VERSION(internal_name, api_name, version) \ 96 + #if defined(SHARED) && defined(__GNUC__) && __GNUC__ >= 10 97 + 98 + #define DEFAULT_VERSION(internal_name, api_name, version) \ 99 + __attribute__((symver(#api_name "@@" #version))) 100 + #define COMPAT_VERSION(internal_name, api_name, version) \ 101 + __attribute__((symver(#api_name "@" #version))) 102 + 103 + #elif defined(SHARED) 104 + 105 + #define COMPAT_VERSION(internal_name, api_name, version) \ 98 106 asm(".symver " #internal_name "," #api_name "@" #version); 99 - # define DEFAULT_VERSION(internal_name, api_name, version) \ 107 + #define DEFAULT_VERSION(internal_name, api_name, version) \ 100 108 asm(".symver " #internal_name "," #api_name "@@" #version); 101 - #else 102 - # define COMPAT_VERSION(internal_name, api_name, version) 103 - # define DEFAULT_VERSION(internal_name, api_name, version) \ 109 + 110 + #else /* !SHARED */ 111 + 112 + #define COMPAT_VERSION(internal_name, api_name, version) 113 + #define DEFAULT_VERSION(internal_name, api_name, version) \ 104 114 extern typeof(internal_name) api_name \ 105 115 __attribute__((alias(#internal_name))); 116 + 106 117 #endif 107 118 108 119 extern void libbpf_print(enum libbpf_print_level level,
+9
tools/lib/bpf/libbpf_version.h
··· 1 + /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ 2 + /* Copyright (C) 2021 Facebook */ 3 + #ifndef __LIBBPF_VERSION_H 4 + #define __LIBBPF_VERSION_H 5 + 6 + #define LIBBPF_MAJOR_VERSION 0 7 + #define LIBBPF_MINOR_VERSION 6 8 + 9 + #endif /* __LIBBPF_VERSION_H */
+2 -2
tools/lib/bpf/xsk.c
··· 281 281 return err; 282 282 } 283 283 284 + DEFAULT_VERSION(xsk_umem__create_v0_0_4, xsk_umem__create, LIBBPF_0.0.4) 284 285 int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area, 285 286 __u64 size, struct xsk_ring_prod *fill, 286 287 struct xsk_ring_cons *comp, ··· 346 345 __u32 frame_headroom; 347 346 }; 348 347 348 + COMPAT_VERSION(xsk_umem__create_v0_0_2, xsk_umem__create, LIBBPF_0.0.2) 349 349 int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area, 350 350 __u64 size, struct xsk_ring_prod *fill, 351 351 struct xsk_ring_cons *comp, ··· 360 358 return xsk_umem__create_v0_0_4(umem_ptr, umem_area, size, fill, comp, 361 359 &config); 362 360 } 363 - COMPAT_VERSION(xsk_umem__create_v0_0_2, xsk_umem__create, LIBBPF_0.0.2) 364 - DEFAULT_VERSION(xsk_umem__create_v0_0_4, xsk_umem__create, LIBBPF_0.0.4) 365 361 366 362 static enum xsk_prog get_xsk_prog(void) 367 363 {
+3 -2
tools/testing/selftests/bpf/.gitignore
··· 9 9 FEATURE-DUMP.libbpf 10 10 fixdep 11 11 test_dev_cgroup 12 - /test_progs* 13 - !test_progs.h 12 + /test_progs 13 + /test_progs-no_alu32 14 + /test_progs-bpf_gcc 14 15 test_verifier_log 15 16 feature 16 17 test_sock
+2 -2
tools/testing/selftests/bpf/Makefile
··· 512 512 $(Q)$(CXX) $(CFLAGS) $(filter %.a %.o %.cpp,$^) $(LDLIBS) -o $@ 513 513 514 514 # Benchmark runner 515 - $(OUTPUT)/bench_%.o: benchs/bench_%.c bench.h 515 + $(OUTPUT)/bench_%.o: benchs/bench_%.c bench.h $(BPFOBJ) 516 516 $(call msg,CC,,$@) 517 517 $(Q)$(CC) $(CFLAGS) -c $(filter %.c,$^) $(LDLIBS) -o $@ 518 518 $(OUTPUT)/bench_rename.o: $(OUTPUT)/test_overhead.skel.h 519 519 $(OUTPUT)/bench_trigger.o: $(OUTPUT)/trigger_bench.skel.h 520 520 $(OUTPUT)/bench_ringbufs.o: $(OUTPUT)/ringbuf_bench.skel.h \ 521 521 $(OUTPUT)/perfbuf_bench.skel.h 522 - $(OUTPUT)/bench.o: bench.h testing_helpers.h 522 + $(OUTPUT)/bench.o: bench.h testing_helpers.h $(BPFOBJ) 523 523 $(OUTPUT)/bench: LDLIBS += -lm 524 524 $(OUTPUT)/bench: $(OUTPUT)/bench.o $(OUTPUT)/testing_helpers.o \ 525 525 $(OUTPUT)/bench_count.o \
+14
tools/testing/selftests/bpf/README.rst
··· 201 201 202 202 __ https://reviews.llvm.org/D93563 203 203 204 + btf_tag test and Clang version 205 + ============================== 206 + 207 + The btf_tag selftest require LLVM support to recognize the btf_tag attribute. 208 + It was introduced in `Clang 14`__. 209 + 210 + Without it, the btf_tag selftest will be skipped and you will observe: 211 + 212 + .. code-block:: console 213 + 214 + #<test_num> btf_tag:SKIP 215 + 216 + __ https://reviews.llvm.org/D106614 217 + 204 218 Clang dependencies for static linking tests 205 219 =========================================== 206 220
+17 -2
tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c
··· 13 13 14 14 DEFINE_PER_CPU(int, bpf_testmod_ksym_percpu) = 123; 15 15 16 + noinline int bpf_testmod_loop_test(int n) 17 + { 18 + int i, sum = 0; 19 + 20 + /* the primary goal of this test is to test LBR. Create a lot of 21 + * branches in the function, so we can catch it easily. 22 + */ 23 + for (i = 0; i < n; i++) 24 + sum += i; 25 + return sum; 26 + } 27 + 16 28 noinline ssize_t 17 29 bpf_testmod_test_read(struct file *file, struct kobject *kobj, 18 30 struct bin_attribute *bin_attr, ··· 36 24 .len = len, 37 25 }; 38 26 39 - trace_bpf_testmod_test_read(current, &ctx); 27 + /* This is always true. Use the check to make sure the compiler 28 + * doesn't remove bpf_testmod_loop_test. 29 + */ 30 + if (bpf_testmod_loop_test(101) > 100) 31 + trace_bpf_testmod_test_read(current, &ctx); 40 32 41 33 return -EIO; /* always fail */ 42 34 } ··· 87 71 MODULE_AUTHOR("Andrii Nakryiko"); 88 72 MODULE_DESCRIPTION("BPF selftests module"); 89 73 MODULE_LICENSE("Dual BSD/GPL"); 90 -
+6 -1
tools/testing/selftests/bpf/btf_helpers.c
··· 24 24 [BTF_KIND_VAR] = "VAR", 25 25 [BTF_KIND_DATASEC] = "DATASEC", 26 26 [BTF_KIND_FLOAT] = "FLOAT", 27 + [BTF_KIND_TAG] = "TAG", 27 28 }; 28 29 29 30 static const char *btf_kind_str(__u16 kind) 30 31 { 31 - if (kind > BTF_KIND_DATASEC) 32 + if (kind > BTF_KIND_TAG) 32 33 return "UNKNOWN"; 33 34 return btf_kind_str_mapping[kind]; 34 35 } ··· 177 176 } 178 177 case BTF_KIND_FLOAT: 179 178 fprintf(out, " size=%u", t->size); 179 + break; 180 + case BTF_KIND_TAG: 181 + fprintf(out, " type_id=%u component_idx=%d", 182 + t->type, btf_tag(t)->component_idx); 180 183 break; 181 184 default: 182 185 break;
+2 -4
tools/testing/selftests/bpf/prog_tests/bpf_iter.c
··· 589 589 590 590 static void test_bpf_hash_map(void) 591 591 { 592 - __u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0; 592 + __u32 expected_key_a = 0, expected_key_b = 0; 593 593 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 594 594 struct bpf_iter_bpf_hash_map *skel; 595 595 int err, i, len, map_fd, iter_fd; ··· 638 638 val = i + 4; 639 639 expected_key_a += key.a; 640 640 expected_key_b += key.b; 641 - expected_key_c += key.c; 642 641 expected_val += val; 643 642 644 643 err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY); ··· 684 685 685 686 static void test_bpf_percpu_hash_map(void) 686 687 { 687 - __u32 expected_key_a = 0, expected_key_b = 0, expected_key_c = 0; 688 + __u32 expected_key_a = 0, expected_key_b = 0; 688 689 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts); 689 690 struct bpf_iter_bpf_percpu_hash_map *skel; 690 691 int err, i, j, len, map_fd, iter_fd; ··· 721 722 key.c = i + 3; 722 723 expected_key_a += key.a; 723 724 expected_key_b += key.b; 724 - expected_key_c += key.c; 725 725 726 726 for (j = 0; j < bpf_num_possible_cpus(); j++) { 727 727 *(__u32 *)(val + j * 8) = i + j;
+422 -19
tools/testing/selftests/bpf/prog_tests/btf.c
··· 39 39 #define BTF_END_RAW 0xdeadbeef 40 40 #define NAME_TBD 0xdeadb33f 41 41 42 - #define NAME_NTH(N) (0xffff0000 | N) 43 - #define IS_NAME_NTH(X) ((X & 0xffff0000) == 0xffff0000) 42 + #define NAME_NTH(N) (0xfffe0000 | N) 43 + #define IS_NAME_NTH(X) ((X & 0xffff0000) == 0xfffe0000) 44 44 #define GET_NAME_NTH_IDX(X) (X & 0x0000ffff) 45 45 46 46 #define MAX_NR_RAW_U32 1024 ··· 3661 3661 .err_str = "Invalid type_size", 3662 3662 }, 3663 3663 3664 + { 3665 + .descr = "tag test #1, struct/member, well-formed", 3666 + .raw_types = { 3667 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 3668 + BTF_STRUCT_ENC(0, 2, 8), /* [2] */ 3669 + BTF_MEMBER_ENC(NAME_TBD, 1, 0), 3670 + BTF_MEMBER_ENC(NAME_TBD, 1, 32), 3671 + BTF_TAG_ENC(NAME_TBD, 2, -1), 3672 + BTF_TAG_ENC(NAME_TBD, 2, 0), 3673 + BTF_TAG_ENC(NAME_TBD, 2, 1), 3674 + BTF_END_RAW, 3675 + }, 3676 + BTF_STR_SEC("\0m1\0m2\0tag1\0tag2\0tag3"), 3677 + .map_type = BPF_MAP_TYPE_ARRAY, 3678 + .map_name = "tag_type_check_btf", 3679 + .key_size = sizeof(int), 3680 + .value_size = 8, 3681 + .key_type_id = 1, 3682 + .value_type_id = 2, 3683 + .max_entries = 1, 3684 + }, 3685 + { 3686 + .descr = "tag test #2, union/member, well-formed", 3687 + .raw_types = { 3688 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 3689 + BTF_UNION_ENC(NAME_TBD, 2, 4), /* [2] */ 3690 + BTF_MEMBER_ENC(NAME_TBD, 1, 0), 3691 + BTF_MEMBER_ENC(NAME_TBD, 1, 0), 3692 + BTF_TAG_ENC(NAME_TBD, 2, -1), 3693 + BTF_TAG_ENC(NAME_TBD, 2, 0), 3694 + BTF_TAG_ENC(NAME_TBD, 2, 1), 3695 + BTF_END_RAW, 3696 + }, 3697 + BTF_STR_SEC("\0t\0m1\0m2\0tag1\0tag2\0tag3"), 3698 + .map_type = BPF_MAP_TYPE_ARRAY, 3699 + .map_name = "tag_type_check_btf", 3700 + .key_size = sizeof(int), 3701 + .value_size = 4, 3702 + .key_type_id = 1, 3703 + .value_type_id = 2, 3704 + .max_entries = 1, 3705 + }, 3706 + { 3707 + .descr = "tag test #3, variable, well-formed", 3708 + .raw_types = { 3709 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 3710 + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ 3711 + BTF_VAR_ENC(NAME_TBD, 1, 1), /* [3] */ 3712 + BTF_TAG_ENC(NAME_TBD, 2, -1), 3713 + BTF_TAG_ENC(NAME_TBD, 3, -1), 3714 + BTF_END_RAW, 3715 + }, 3716 + BTF_STR_SEC("\0local\0global\0tag1\0tag2"), 3717 + .map_type = BPF_MAP_TYPE_ARRAY, 3718 + .map_name = "tag_type_check_btf", 3719 + .key_size = sizeof(int), 3720 + .value_size = 4, 3721 + .key_type_id = 1, 3722 + .value_type_id = 1, 3723 + .max_entries = 1, 3724 + }, 3725 + { 3726 + .descr = "tag test #4, func/parameter, well-formed", 3727 + .raw_types = { 3728 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 3729 + BTF_FUNC_PROTO_ENC(0, 2), /* [2] */ 3730 + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), 3731 + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), 3732 + BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */ 3733 + BTF_TAG_ENC(NAME_TBD, 3, -1), 3734 + BTF_TAG_ENC(NAME_TBD, 3, 0), 3735 + BTF_TAG_ENC(NAME_TBD, 3, 1), 3736 + BTF_END_RAW, 3737 + }, 3738 + BTF_STR_SEC("\0arg1\0arg2\0f\0tag1\0tag2\0tag3"), 3739 + .map_type = BPF_MAP_TYPE_ARRAY, 3740 + .map_name = "tag_type_check_btf", 3741 + .key_size = sizeof(int), 3742 + .value_size = 4, 3743 + .key_type_id = 1, 3744 + .value_type_id = 1, 3745 + .max_entries = 1, 3746 + }, 3747 + { 3748 + .descr = "tag test #5, invalid value", 3749 + .raw_types = { 3750 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 3751 + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ 3752 + BTF_TAG_ENC(0, 2, -1), 3753 + BTF_END_RAW, 3754 + }, 3755 + BTF_STR_SEC("\0local\0tag"), 3756 + .map_type = BPF_MAP_TYPE_ARRAY, 3757 + .map_name = "tag_type_check_btf", 3758 + .key_size = sizeof(int), 3759 + .value_size = 4, 3760 + .key_type_id = 1, 3761 + .value_type_id = 1, 3762 + .max_entries = 1, 3763 + .btf_load_err = true, 3764 + .err_str = "Invalid value", 3765 + }, 3766 + { 3767 + .descr = "tag test #6, invalid target type", 3768 + .raw_types = { 3769 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 3770 + BTF_TAG_ENC(NAME_TBD, 1, -1), 3771 + BTF_END_RAW, 3772 + }, 3773 + BTF_STR_SEC("\0tag1"), 3774 + .map_type = BPF_MAP_TYPE_ARRAY, 3775 + .map_name = "tag_type_check_btf", 3776 + .key_size = sizeof(int), 3777 + .value_size = 4, 3778 + .key_type_id = 1, 3779 + .value_type_id = 1, 3780 + .max_entries = 1, 3781 + .btf_load_err = true, 3782 + .err_str = "Invalid type", 3783 + }, 3784 + { 3785 + .descr = "tag test #7, invalid vlen", 3786 + .raw_types = { 3787 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 3788 + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ 3789 + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_TAG, 0, 1), 2), (0), 3790 + BTF_END_RAW, 3791 + }, 3792 + BTF_STR_SEC("\0local\0tag1"), 3793 + .map_type = BPF_MAP_TYPE_ARRAY, 3794 + .map_name = "tag_type_check_btf", 3795 + .key_size = sizeof(int), 3796 + .value_size = 4, 3797 + .key_type_id = 1, 3798 + .value_type_id = 1, 3799 + .max_entries = 1, 3800 + .btf_load_err = true, 3801 + .err_str = "vlen != 0", 3802 + }, 3803 + { 3804 + .descr = "tag test #8, invalid kflag", 3805 + .raw_types = { 3806 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 3807 + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ 3808 + BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_TAG, 1, 0), 2), (-1), 3809 + BTF_END_RAW, 3810 + }, 3811 + BTF_STR_SEC("\0local\0tag1"), 3812 + .map_type = BPF_MAP_TYPE_ARRAY, 3813 + .map_name = "tag_type_check_btf", 3814 + .key_size = sizeof(int), 3815 + .value_size = 4, 3816 + .key_type_id = 1, 3817 + .value_type_id = 1, 3818 + .max_entries = 1, 3819 + .btf_load_err = true, 3820 + .err_str = "Invalid btf_info kind_flag", 3821 + }, 3822 + { 3823 + .descr = "tag test #9, var, invalid component_idx", 3824 + .raw_types = { 3825 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 3826 + BTF_VAR_ENC(NAME_TBD, 1, 0), /* [2] */ 3827 + BTF_TAG_ENC(NAME_TBD, 2, 0), 3828 + BTF_END_RAW, 3829 + }, 3830 + BTF_STR_SEC("\0local\0tag"), 3831 + .map_type = BPF_MAP_TYPE_ARRAY, 3832 + .map_name = "tag_type_check_btf", 3833 + .key_size = sizeof(int), 3834 + .value_size = 4, 3835 + .key_type_id = 1, 3836 + .value_type_id = 1, 3837 + .max_entries = 1, 3838 + .btf_load_err = true, 3839 + .err_str = "Invalid component_idx", 3840 + }, 3841 + { 3842 + .descr = "tag test #10, struct member, invalid component_idx", 3843 + .raw_types = { 3844 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 3845 + BTF_STRUCT_ENC(0, 2, 8), /* [2] */ 3846 + BTF_MEMBER_ENC(NAME_TBD, 1, 0), 3847 + BTF_MEMBER_ENC(NAME_TBD, 1, 32), 3848 + BTF_TAG_ENC(NAME_TBD, 2, 2), 3849 + BTF_END_RAW, 3850 + }, 3851 + BTF_STR_SEC("\0m1\0m2\0tag"), 3852 + .map_type = BPF_MAP_TYPE_ARRAY, 3853 + .map_name = "tag_type_check_btf", 3854 + .key_size = sizeof(int), 3855 + .value_size = 8, 3856 + .key_type_id = 1, 3857 + .value_type_id = 2, 3858 + .max_entries = 1, 3859 + .btf_load_err = true, 3860 + .err_str = "Invalid component_idx", 3861 + }, 3862 + { 3863 + .descr = "tag test #11, func parameter, invalid component_idx", 3864 + .raw_types = { 3865 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 3866 + BTF_FUNC_PROTO_ENC(0, 2), /* [2] */ 3867 + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), 3868 + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), 3869 + BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */ 3870 + BTF_TAG_ENC(NAME_TBD, 3, 2), 3871 + BTF_END_RAW, 3872 + }, 3873 + BTF_STR_SEC("\0arg1\0arg2\0f\0tag"), 3874 + .map_type = BPF_MAP_TYPE_ARRAY, 3875 + .map_name = "tag_type_check_btf", 3876 + .key_size = sizeof(int), 3877 + .value_size = 4, 3878 + .key_type_id = 1, 3879 + .value_type_id = 1, 3880 + .max_entries = 1, 3881 + .btf_load_err = true, 3882 + .err_str = "Invalid component_idx", 3883 + }, 3884 + { 3885 + .descr = "tag test #12, < -1 component_idx", 3886 + .raw_types = { 3887 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 3888 + BTF_FUNC_PROTO_ENC(0, 2), /* [2] */ 3889 + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), 3890 + BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), 3891 + BTF_FUNC_ENC(NAME_TBD, 2), /* [3] */ 3892 + BTF_TAG_ENC(NAME_TBD, 3, -2), 3893 + BTF_END_RAW, 3894 + }, 3895 + BTF_STR_SEC("\0arg1\0arg2\0f\0tag"), 3896 + .map_type = BPF_MAP_TYPE_ARRAY, 3897 + .map_name = "tag_type_check_btf", 3898 + .key_size = sizeof(int), 3899 + .value_size = 4, 3900 + .key_type_id = 1, 3901 + .value_type_id = 1, 3902 + .max_entries = 1, 3903 + .btf_load_err = true, 3904 + .err_str = "Invalid component_idx", 3905 + }, 3906 + 3664 3907 }; /* struct btf_raw_test raw_tests[] */ 3665 3908 3666 3909 static const char *get_next_str(const char *start, const char *end) ··· 6664 6421 BTF_MEMBER_ENC(NAME_NTH(4), 5, 64), /* const int *a; */ 6665 6422 BTF_MEMBER_ENC(NAME_NTH(5), 2, 128), /* int b[16]; */ 6666 6423 BTF_MEMBER_ENC(NAME_NTH(6), 1, 640), /* int c; */ 6667 - BTF_MEMBER_ENC(NAME_NTH(8), 13, 672), /* float d; */ 6424 + BTF_MEMBER_ENC(NAME_NTH(8), 15, 672), /* float d; */ 6668 6425 /* ptr -> [3] struct s */ 6669 6426 BTF_PTR_ENC(3), /* [4] */ 6670 6427 /* ptr -> [6] const int */ 6671 6428 BTF_PTR_ENC(6), /* [5] */ 6672 6429 /* const -> [1] int */ 6673 6430 BTF_CONST_ENC(1), /* [6] */ 6431 + /* tag -> [3] struct s */ 6432 + BTF_TAG_ENC(NAME_NTH(2), 3, -1), /* [7] */ 6433 + /* tag -> [3] struct s, member 1 */ 6434 + BTF_TAG_ENC(NAME_NTH(2), 3, 1), /* [8] */ 6674 6435 6675 6436 /* full copy of the above */ 6676 - BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), /* [7] */ 6677 - BTF_TYPE_ARRAY_ENC(7, 7, 16), /* [8] */ 6678 - BTF_STRUCT_ENC(NAME_NTH(2), 5, 88), /* [9] */ 6679 - BTF_MEMBER_ENC(NAME_NTH(3), 10, 0), 6680 - BTF_MEMBER_ENC(NAME_NTH(4), 11, 64), 6681 - BTF_MEMBER_ENC(NAME_NTH(5), 8, 128), 6682 - BTF_MEMBER_ENC(NAME_NTH(6), 7, 640), 6683 - BTF_MEMBER_ENC(NAME_NTH(8), 13, 672), 6684 - BTF_PTR_ENC(9), /* [10] */ 6685 - BTF_PTR_ENC(12), /* [11] */ 6686 - BTF_CONST_ENC(7), /* [12] */ 6687 - BTF_TYPE_FLOAT_ENC(NAME_NTH(7), 4), /* [13] */ 6437 + BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), /* [9] */ 6438 + BTF_TYPE_ARRAY_ENC(9, 9, 16), /* [10] */ 6439 + BTF_STRUCT_ENC(NAME_NTH(2), 5, 88), /* [11] */ 6440 + BTF_MEMBER_ENC(NAME_NTH(3), 12, 0), 6441 + BTF_MEMBER_ENC(NAME_NTH(4), 13, 64), 6442 + BTF_MEMBER_ENC(NAME_NTH(5), 10, 128), 6443 + BTF_MEMBER_ENC(NAME_NTH(6), 9, 640), 6444 + BTF_MEMBER_ENC(NAME_NTH(8), 15, 672), 6445 + BTF_PTR_ENC(11), /* [12] */ 6446 + BTF_PTR_ENC(14), /* [13] */ 6447 + BTF_CONST_ENC(9), /* [14] */ 6448 + BTF_TYPE_FLOAT_ENC(NAME_NTH(7), 4), /* [15] */ 6449 + BTF_TAG_ENC(NAME_NTH(2), 11, -1), /* [16] */ 6450 + BTF_TAG_ENC(NAME_NTH(2), 11, 1), /* [17] */ 6688 6451 BTF_END_RAW, 6689 6452 }, 6690 6453 BTF_STR_SEC("\0int\0s\0next\0a\0b\0c\0float\0d"), ··· 6707 6458 BTF_MEMBER_ENC(NAME_NTH(1), 5, 64), /* const int *a; */ 6708 6459 BTF_MEMBER_ENC(NAME_NTH(2), 2, 128), /* int b[16]; */ 6709 6460 BTF_MEMBER_ENC(NAME_NTH(3), 1, 640), /* int c; */ 6710 - BTF_MEMBER_ENC(NAME_NTH(4), 7, 672), /* float d; */ 6461 + BTF_MEMBER_ENC(NAME_NTH(4), 9, 672), /* float d; */ 6711 6462 /* ptr -> [3] struct s */ 6712 6463 BTF_PTR_ENC(3), /* [4] */ 6713 6464 /* ptr -> [6] const int */ 6714 6465 BTF_PTR_ENC(6), /* [5] */ 6715 6466 /* const -> [1] int */ 6716 6467 BTF_CONST_ENC(1), /* [6] */ 6717 - BTF_TYPE_FLOAT_ENC(NAME_NTH(7), 4), /* [7] */ 6468 + BTF_TAG_ENC(NAME_NTH(2), 3, -1), /* [7] */ 6469 + BTF_TAG_ENC(NAME_NTH(2), 3, 1), /* [8] */ 6470 + BTF_TYPE_FLOAT_ENC(NAME_NTH(7), 4), /* [9] */ 6718 6471 BTF_END_RAW, 6719 6472 }, 6720 6473 BTF_STR_SEC("\0a\0b\0c\0d\0int\0float\0next\0s"), ··· 6841 6590 BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8), 6842 6591 BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */ 6843 6592 BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [14] float */ 6593 + BTF_TAG_ENC(NAME_TBD, 13, -1), /* [15] tag */ 6594 + BTF_TAG_ENC(NAME_TBD, 13, 1), /* [16] tag */ 6844 6595 BTF_END_RAW, 6845 6596 }, 6846 - BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N"), 6597 + BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P"), 6847 6598 }, 6848 6599 .expect = { 6849 6600 .raw_types = { ··· 6869 6616 BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8), 6870 6617 BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */ 6871 6618 BTF_TYPE_FLOAT_ENC(NAME_TBD, 2), /* [14] float */ 6619 + BTF_TAG_ENC(NAME_TBD, 13, -1), /* [15] tag */ 6620 + BTF_TAG_ENC(NAME_TBD, 13, 1), /* [16] tag */ 6872 6621 BTF_END_RAW, 6873 6622 }, 6874 - BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N"), 6623 + BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M\0N\0O\0P"), 6875 6624 }, 6876 6625 .opts = { 6877 6626 .dont_resolve_fwds = false, ··· 7022 6767 .dedup_table_size = 1 7023 6768 }, 7024 6769 }, 6770 + { 6771 + .descr = "dedup: func/func_arg/var tags", 6772 + .input = { 6773 + .raw_types = { 6774 + /* int */ 6775 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 6776 + /* static int t */ 6777 + BTF_VAR_ENC(NAME_NTH(1), 1, 0), /* [2] */ 6778 + /* void f(int a1, int a2) */ 6779 + BTF_FUNC_PROTO_ENC(0, 2), /* [3] */ 6780 + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1), 6781 + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(3), 1), 6782 + BTF_FUNC_ENC(NAME_NTH(4), 2), /* [4] */ 6783 + /* tag -> t */ 6784 + BTF_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */ 6785 + BTF_TAG_ENC(NAME_NTH(5), 2, -1), /* [6] */ 6786 + /* tag -> func */ 6787 + BTF_TAG_ENC(NAME_NTH(5), 4, -1), /* [7] */ 6788 + BTF_TAG_ENC(NAME_NTH(5), 4, -1), /* [8] */ 6789 + /* tag -> func arg a1 */ 6790 + BTF_TAG_ENC(NAME_NTH(5), 4, 1), /* [9] */ 6791 + BTF_TAG_ENC(NAME_NTH(5), 4, 1), /* [10] */ 6792 + BTF_END_RAW, 6793 + }, 6794 + BTF_STR_SEC("\0t\0a1\0a2\0f\0tag"), 6795 + }, 6796 + .expect = { 6797 + .raw_types = { 6798 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 6799 + BTF_VAR_ENC(NAME_NTH(1), 1, 0), /* [2] */ 6800 + BTF_FUNC_PROTO_ENC(0, 2), /* [3] */ 6801 + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1), 6802 + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(3), 1), 6803 + BTF_FUNC_ENC(NAME_NTH(4), 2), /* [4] */ 6804 + BTF_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */ 6805 + BTF_TAG_ENC(NAME_NTH(5), 4, -1), /* [6] */ 6806 + BTF_TAG_ENC(NAME_NTH(5), 4, 1), /* [7] */ 6807 + BTF_END_RAW, 6808 + }, 6809 + BTF_STR_SEC("\0t\0a1\0a2\0f\0tag"), 6810 + }, 6811 + .opts = { 6812 + .dont_resolve_fwds = false, 6813 + }, 6814 + }, 6815 + { 6816 + .descr = "dedup: func/func_param tags", 6817 + .input = { 6818 + .raw_types = { 6819 + /* int */ 6820 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 6821 + /* void f(int a1, int a2) */ 6822 + BTF_FUNC_PROTO_ENC(0, 2), /* [2] */ 6823 + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(1), 1), 6824 + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1), 6825 + BTF_FUNC_ENC(NAME_NTH(3), 2), /* [3] */ 6826 + /* void f(int a1, int a2) */ 6827 + BTF_FUNC_PROTO_ENC(0, 2), /* [4] */ 6828 + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(1), 1), 6829 + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1), 6830 + BTF_FUNC_ENC(NAME_NTH(3), 4), /* [5] */ 6831 + /* tag -> f: tag1, tag2 */ 6832 + BTF_TAG_ENC(NAME_NTH(4), 3, -1), /* [6] */ 6833 + BTF_TAG_ENC(NAME_NTH(5), 3, -1), /* [7] */ 6834 + /* tag -> f/a2: tag1, tag2 */ 6835 + BTF_TAG_ENC(NAME_NTH(4), 3, 1), /* [8] */ 6836 + BTF_TAG_ENC(NAME_NTH(5), 3, 1), /* [9] */ 6837 + /* tag -> f: tag1, tag3 */ 6838 + BTF_TAG_ENC(NAME_NTH(4), 5, -1), /* [10] */ 6839 + BTF_TAG_ENC(NAME_NTH(6), 5, -1), /* [11] */ 6840 + /* tag -> f/a2: tag1, tag3 */ 6841 + BTF_TAG_ENC(NAME_NTH(4), 5, 1), /* [12] */ 6842 + BTF_TAG_ENC(NAME_NTH(6), 5, 1), /* [13] */ 6843 + BTF_END_RAW, 6844 + }, 6845 + BTF_STR_SEC("\0a1\0a2\0f\0tag1\0tag2\0tag3"), 6846 + }, 6847 + .expect = { 6848 + .raw_types = { 6849 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 6850 + BTF_FUNC_PROTO_ENC(0, 2), /* [2] */ 6851 + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(1), 1), 6852 + BTF_FUNC_PROTO_ARG_ENC(NAME_NTH(2), 1), 6853 + BTF_FUNC_ENC(NAME_NTH(3), 2), /* [3] */ 6854 + BTF_TAG_ENC(NAME_NTH(4), 3, -1), /* [4] */ 6855 + BTF_TAG_ENC(NAME_NTH(5), 3, -1), /* [5] */ 6856 + BTF_TAG_ENC(NAME_NTH(6), 3, -1), /* [6] */ 6857 + BTF_TAG_ENC(NAME_NTH(4), 3, 1), /* [7] */ 6858 + BTF_TAG_ENC(NAME_NTH(5), 3, 1), /* [8] */ 6859 + BTF_TAG_ENC(NAME_NTH(6), 3, 1), /* [9] */ 6860 + BTF_END_RAW, 6861 + }, 6862 + BTF_STR_SEC("\0a1\0a2\0f\0tag1\0tag2\0tag3"), 6863 + }, 6864 + .opts = { 6865 + .dont_resolve_fwds = false, 6866 + }, 6867 + }, 6868 + { 6869 + .descr = "dedup: struct/struct_member tags", 6870 + .input = { 6871 + .raw_types = { 6872 + /* int */ 6873 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 6874 + BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [2] */ 6875 + BTF_MEMBER_ENC(NAME_NTH(2), 1, 0), 6876 + BTF_MEMBER_ENC(NAME_NTH(3), 1, 32), 6877 + BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [3] */ 6878 + BTF_MEMBER_ENC(NAME_NTH(2), 1, 0), 6879 + BTF_MEMBER_ENC(NAME_NTH(3), 1, 32), 6880 + /* tag -> t: tag1, tag2 */ 6881 + BTF_TAG_ENC(NAME_NTH(4), 2, -1), /* [4] */ 6882 + BTF_TAG_ENC(NAME_NTH(5), 2, -1), /* [5] */ 6883 + /* tag -> t/m2: tag1, tag2 */ 6884 + BTF_TAG_ENC(NAME_NTH(4), 2, 1), /* [6] */ 6885 + BTF_TAG_ENC(NAME_NTH(5), 2, 1), /* [7] */ 6886 + /* tag -> t: tag1, tag3 */ 6887 + BTF_TAG_ENC(NAME_NTH(4), 3, -1), /* [8] */ 6888 + BTF_TAG_ENC(NAME_NTH(6), 3, -1), /* [9] */ 6889 + /* tag -> t/m2: tag1, tag3 */ 6890 + BTF_TAG_ENC(NAME_NTH(4), 3, 1), /* [10] */ 6891 + BTF_TAG_ENC(NAME_NTH(6), 3, 1), /* [11] */ 6892 + BTF_END_RAW, 6893 + }, 6894 + BTF_STR_SEC("\0t\0m1\0m2\0tag1\0tag2\0tag3"), 6895 + }, 6896 + .expect = { 6897 + .raw_types = { 6898 + BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ 6899 + BTF_STRUCT_ENC(NAME_NTH(1), 2, 8), /* [2] */ 6900 + BTF_MEMBER_ENC(NAME_NTH(2), 1, 0), 6901 + BTF_MEMBER_ENC(NAME_NTH(3), 1, 32), 6902 + BTF_TAG_ENC(NAME_NTH(4), 2, -1), /* [3] */ 6903 + BTF_TAG_ENC(NAME_NTH(5), 2, -1), /* [4] */ 6904 + BTF_TAG_ENC(NAME_NTH(6), 2, -1), /* [5] */ 6905 + BTF_TAG_ENC(NAME_NTH(4), 2, 1), /* [6] */ 6906 + BTF_TAG_ENC(NAME_NTH(5), 2, 1), /* [7] */ 6907 + BTF_TAG_ENC(NAME_NTH(6), 2, 1), /* [8] */ 6908 + BTF_END_RAW, 6909 + }, 6910 + BTF_STR_SEC("\0t\0m1\0m2\0tag1\0tag2\0tag3"), 6911 + }, 6912 + .opts = { 6913 + .dont_resolve_fwds = false, 6914 + }, 6915 + }, 7025 6916 7026 6917 }; 7027 6918 ··· 7202 6801 return base_size + sizeof(struct btf_var); 7203 6802 case BTF_KIND_DATASEC: 7204 6803 return base_size + vlen * sizeof(struct btf_var_secinfo); 6804 + case BTF_KIND_TAG: 6805 + return base_size + sizeof(struct btf_tag); 7205 6806 default: 7206 6807 fprintf(stderr, "Unsupported BTF_KIND:%u\n", kind); 7207 6808 return -EINVAL;
+20
tools/testing/selftests/bpf/prog_tests/btf_tag.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2021 Facebook */ 3 + #include <test_progs.h> 4 + #include "tag.skel.h" 5 + 6 + void test_btf_tag(void) 7 + { 8 + struct tag *skel; 9 + 10 + skel = tag__open_and_load(); 11 + if (!ASSERT_OK_PTR(skel, "btf_tag")) 12 + return; 13 + 14 + if (skel->rodata->skip_tests) { 15 + printf("%s:SKIP: btf_tag attribute not supported", __func__); 16 + test__skip(); 17 + } 18 + 19 + tag__destroy(skel); 20 + }
+21
tools/testing/selftests/bpf/prog_tests/btf_write.c
··· 281 281 "[17] DATASEC 'datasec1' size=12 vlen=1\n" 282 282 "\ttype_id=1 offset=4 size=8", "raw_dump"); 283 283 284 + /* TAG */ 285 + id = btf__add_tag(btf, "tag1", 16, -1); 286 + ASSERT_EQ(id, 18, "tag_id"); 287 + t = btf__type_by_id(btf, 18); 288 + ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "tag1", "tag_value"); 289 + ASSERT_EQ(btf_kind(t), BTF_KIND_TAG, "tag_kind"); 290 + ASSERT_EQ(t->type, 16, "tag_type"); 291 + ASSERT_EQ(btf_tag(t)->component_idx, -1, "tag_component_idx"); 292 + ASSERT_STREQ(btf_type_raw_dump(btf, 18), 293 + "[18] TAG 'tag1' type_id=16 component_idx=-1", "raw_dump"); 294 + 295 + id = btf__add_tag(btf, "tag2", 14, 1); 296 + ASSERT_EQ(id, 19, "tag_id"); 297 + t = btf__type_by_id(btf, 19); 298 + ASSERT_STREQ(btf__str_by_offset(btf, t->name_off), "tag2", "tag_value"); 299 + ASSERT_EQ(btf_kind(t), BTF_KIND_TAG, "tag_kind"); 300 + ASSERT_EQ(t->type, 14, "tag_type"); 301 + ASSERT_EQ(btf_tag(t)->component_idx, 1, "tag_component_idx"); 302 + ASSERT_STREQ(btf_type_raw_dump(btf, 19), 303 + "[19] TAG 'tag2' type_id=14 component_idx=1", "raw_dump"); 304 + 284 305 btf__free(btf); 285 306 }
+4 -13
tools/testing/selftests/bpf/prog_tests/core_reloc.c
··· 30 30 .output_len = sizeof(struct core_reloc_module_output), \ 31 31 .prog_sec_name = sec_name, \ 32 32 .raw_tp_name = tp_name, \ 33 - .trigger = trigger_module_test_read, \ 33 + .trigger = __trigger_module_test_read, \ 34 34 .needs_testmod = true, \ 35 35 } 36 36 ··· 249 249 #define SIZE_CASE_COMMON(name) \ 250 250 .case_name = #name, \ 251 251 .bpf_obj_file = "test_core_reloc_size.o", \ 252 - .btf_src_file = "btf__core_reloc_" #name ".o", \ 253 - .relaxed_core_relocs = true 252 + .btf_src_file = "btf__core_reloc_" #name ".o" 254 253 255 254 #define SIZE_OUTPUT_DATA(type) \ 256 255 STRUCT_TO_CHAR_PTR(core_reloc_size_output) { \ ··· 474 475 return 0; 475 476 } 476 477 477 - static int trigger_module_test_read(const struct core_reloc_test_case *test) 478 + static int __trigger_module_test_read(const struct core_reloc_test_case *test) 478 479 { 479 480 struct core_reloc_module_output *exp = (void *)test->output; 480 - int fd, err; 481 481 482 - fd = open("/sys/kernel/bpf_testmod", O_RDONLY); 483 - err = -errno; 484 - if (CHECK(fd < 0, "testmod_file_open", "failed: %d\n", err)) 485 - return err; 486 - 487 - read(fd, NULL, exp->len); /* request expected number of bytes */ 488 - close(fd); 489 - 482 + trigger_module_test_read(exp->len); 490 483 return 0; 491 484 } 492 485
+26 -17
tools/testing/selftests/bpf/prog_tests/fexit_bpf2bpf.c
··· 60 60 struct bpf_object *obj = NULL, *tgt_obj; 61 61 __u32 retval, tgt_prog_id, info_len; 62 62 struct bpf_prog_info prog_info = {}; 63 - struct bpf_program **prog = NULL; 63 + struct bpf_program **prog = NULL, *p; 64 64 struct bpf_link **link = NULL; 65 65 int err, tgt_fd, i; 66 66 struct btf *btf; ··· 69 69 &tgt_obj, &tgt_fd); 70 70 if (!ASSERT_OK(err, "tgt_prog_load")) 71 71 return; 72 - DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, 73 - .attach_prog_fd = tgt_fd, 74 - ); 75 72 76 73 info_len = sizeof(prog_info); 77 74 err = bpf_obj_get_info_by_fd(tgt_fd, &prog_info, &info_len); ··· 86 89 if (!ASSERT_OK_PTR(prog, "prog_ptr")) 87 90 goto close_prog; 88 91 89 - obj = bpf_object__open_file(obj_file, &opts); 92 + obj = bpf_object__open_file(obj_file, NULL); 90 93 if (!ASSERT_OK_PTR(obj, "obj_open")) 91 94 goto close_prog; 95 + 96 + bpf_object__for_each_program(p, obj) { 97 + err = bpf_program__set_attach_target(p, tgt_fd, NULL); 98 + ASSERT_OK(err, "set_attach_target"); 99 + } 92 100 93 101 err = bpf_object__load(obj); 94 102 if (!ASSERT_OK(err, "obj_load")) ··· 272 270 struct bpf_link *freplace_link = NULL; 273 271 struct bpf_program *prog; 274 272 __u32 duration = 0; 275 - int err, pkt_fd; 273 + int err, pkt_fd, attach_prog_fd; 276 274 277 275 err = bpf_prog_load(tgt_name, BPF_PROG_TYPE_UNSPEC, 278 276 &pkt_obj, &pkt_fd); ··· 280 278 if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n", 281 279 tgt_name, err, errno)) 282 280 return; 283 - opts.attach_prog_fd = pkt_fd; 284 281 285 - freplace_obj = bpf_object__open_file(freplace_name, &opts); 282 + freplace_obj = bpf_object__open_file(freplace_name, NULL); 286 283 if (!ASSERT_OK_PTR(freplace_obj, "freplace_obj_open")) 287 284 goto out; 285 + 286 + prog = bpf_program__next(NULL, freplace_obj); 287 + err = bpf_program__set_attach_target(prog, pkt_fd, NULL); 288 + ASSERT_OK(err, "freplace__set_attach_target"); 288 289 289 290 err = bpf_object__load(freplace_obj); 290 291 if (CHECK(err, "freplace_obj_load", "err %d\n", err)) 291 292 goto out; 292 293 293 - prog = bpf_program__next(NULL, freplace_obj); 294 294 freplace_link = bpf_program__attach_trace(prog); 295 295 if (!ASSERT_OK_PTR(freplace_link, "freplace_attach_trace")) 296 296 goto out; 297 297 298 - opts.attach_prog_fd = bpf_program__fd(prog); 299 - fmod_obj = bpf_object__open_file(fmod_ret_name, &opts); 298 + fmod_obj = bpf_object__open_file(fmod_ret_name, NULL); 300 299 if (!ASSERT_OK_PTR(fmod_obj, "fmod_obj_open")) 301 300 goto out; 301 + 302 + attach_prog_fd = bpf_program__fd(prog); 303 + prog = bpf_program__next(NULL, fmod_obj); 304 + err = bpf_program__set_attach_target(prog, attach_prog_fd, NULL); 305 + ASSERT_OK(err, "fmod_ret_set_attach_target"); 302 306 303 307 err = bpf_object__load(fmod_obj); 304 308 if (CHECK(!err, "fmod_obj_load", "loading fmod_ret should fail\n")) ··· 330 322 } 331 323 332 324 static void test_obj_load_failure_common(const char *obj_file, 333 - const char *target_obj_file) 334 - 325 + const char *target_obj_file) 335 326 { 336 327 /* 337 328 * standalone test that asserts failure to load freplace prog 338 329 * because of invalid return code. 339 330 */ 340 331 struct bpf_object *obj = NULL, *pkt_obj; 332 + struct bpf_program *prog; 341 333 int err, pkt_fd; 342 334 __u32 duration = 0; 343 335 ··· 347 339 if (CHECK(err, "tgt_prog_load", "file %s err %d errno %d\n", 348 340 target_obj_file, err, errno)) 349 341 return; 350 - DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, 351 - .attach_prog_fd = pkt_fd, 352 - ); 353 342 354 - obj = bpf_object__open_file(obj_file, &opts); 343 + obj = bpf_object__open_file(obj_file, NULL); 355 344 if (!ASSERT_OK_PTR(obj, "obj_open")) 356 345 goto close_prog; 346 + 347 + prog = bpf_program__next(NULL, obj); 348 + err = bpf_program__set_attach_target(prog, pkt_fd, NULL); 349 + ASSERT_OK(err, "set_attach_target"); 357 350 358 351 /* It should fail to load the program */ 359 352 err = bpf_object__load(obj);
+100
tools/testing/selftests/bpf/prog_tests/get_branch_snapshot.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2021 Facebook */ 3 + #include <test_progs.h> 4 + #include "get_branch_snapshot.skel.h" 5 + 6 + static int *pfd_array; 7 + static int cpu_cnt; 8 + 9 + static int create_perf_events(void) 10 + { 11 + struct perf_event_attr attr = {0}; 12 + int cpu; 13 + 14 + /* create perf event */ 15 + attr.size = sizeof(attr); 16 + attr.type = PERF_TYPE_RAW; 17 + attr.config = 0x1b00; 18 + attr.sample_type = PERF_SAMPLE_BRANCH_STACK; 19 + attr.branch_sample_type = PERF_SAMPLE_BRANCH_KERNEL | 20 + PERF_SAMPLE_BRANCH_USER | PERF_SAMPLE_BRANCH_ANY; 21 + 22 + cpu_cnt = libbpf_num_possible_cpus(); 23 + pfd_array = malloc(sizeof(int) * cpu_cnt); 24 + if (!pfd_array) { 25 + cpu_cnt = 0; 26 + return 1; 27 + } 28 + 29 + for (cpu = 0; cpu < cpu_cnt; cpu++) { 30 + pfd_array[cpu] = syscall(__NR_perf_event_open, &attr, 31 + -1, cpu, -1, PERF_FLAG_FD_CLOEXEC); 32 + if (pfd_array[cpu] < 0) 33 + break; 34 + } 35 + 36 + return cpu == 0; 37 + } 38 + 39 + static void close_perf_events(void) 40 + { 41 + int cpu = 0; 42 + int fd; 43 + 44 + while (cpu++ < cpu_cnt) { 45 + fd = pfd_array[cpu]; 46 + if (fd < 0) 47 + break; 48 + close(fd); 49 + } 50 + free(pfd_array); 51 + } 52 + 53 + void test_get_branch_snapshot(void) 54 + { 55 + struct get_branch_snapshot *skel = NULL; 56 + int err; 57 + 58 + if (create_perf_events()) { 59 + test__skip(); /* system doesn't support LBR */ 60 + goto cleanup; 61 + } 62 + 63 + skel = get_branch_snapshot__open_and_load(); 64 + if (!ASSERT_OK_PTR(skel, "get_branch_snapshot__open_and_load")) 65 + goto cleanup; 66 + 67 + err = kallsyms_find("bpf_testmod_loop_test", &skel->bss->address_low); 68 + if (!ASSERT_OK(err, "kallsyms_find")) 69 + goto cleanup; 70 + 71 + err = kallsyms_find_next("bpf_testmod_loop_test", &skel->bss->address_high); 72 + if (!ASSERT_OK(err, "kallsyms_find_next")) 73 + goto cleanup; 74 + 75 + err = get_branch_snapshot__attach(skel); 76 + if (!ASSERT_OK(err, "get_branch_snapshot__attach")) 77 + goto cleanup; 78 + 79 + trigger_module_test_read(100); 80 + 81 + if (skel->bss->total_entries < 16) { 82 + /* too few entries for the hit/waste test */ 83 + test__skip(); 84 + goto cleanup; 85 + } 86 + 87 + ASSERT_GT(skel->bss->test1_hits, 6, "find_looptest_in_lbr"); 88 + 89 + /* Given we stop LBR in software, we will waste a few entries. 90 + * But we should try to waste as few as possible entries. We are at 91 + * about 7 on x86_64 systems. 92 + * Add a check for < 10 so that we get heads-up when something 93 + * changes and wastes too many entries. 94 + */ 95 + ASSERT_LT(skel->bss->wasted_entries, 10, "check_wasted_entries"); 96 + 97 + cleanup: 98 + get_branch_snapshot__destroy(skel); 99 + close_perf_events(); 100 + }
-39
tools/testing/selftests/bpf/prog_tests/module_attach.c
··· 6 6 7 7 static int duration; 8 8 9 - static int trigger_module_test_read(int read_sz) 10 - { 11 - int fd, err; 12 - 13 - fd = open("/sys/kernel/bpf_testmod", O_RDONLY); 14 - err = -errno; 15 - if (CHECK(fd < 0, "testmod_file_open", "failed: %d\n", err)) 16 - return err; 17 - 18 - read(fd, NULL, read_sz); 19 - close(fd); 20 - 21 - return 0; 22 - } 23 - 24 - static int trigger_module_test_write(int write_sz) 25 - { 26 - int fd, err; 27 - char *buf = malloc(write_sz); 28 - 29 - if (!buf) 30 - return -ENOMEM; 31 - 32 - memset(buf, 'a', write_sz); 33 - buf[write_sz-1] = '\0'; 34 - 35 - fd = open("/sys/kernel/bpf_testmod", O_WRONLY); 36 - err = -errno; 37 - if (CHECK(fd < 0, "testmod_file_open", "failed: %d\n", err)) { 38 - free(buf); 39 - return err; 40 - } 41 - 42 - write(fd, buf, write_sz); 43 - close(fd); 44 - free(buf); 45 - return 0; 46 - } 47 - 48 9 static int delete_module(const char *name, int flags) 49 10 { 50 11 return syscall(__NR_delete_module, name, flags);
+6
tools/testing/selftests/bpf/prog_tests/skb_ctx.c
··· 11 11 .cb[3] = 4, 12 12 .cb[4] = 5, 13 13 .priority = 6, 14 + .ingress_ifindex = 11, 14 15 .ifindex = 1, 15 16 .tstamp = 7, 16 17 .wire_len = 100, 17 18 .gso_segs = 8, 18 19 .mark = 9, 19 20 .gso_size = 10, 21 + .hwtstamp = 11, 20 22 }; 21 23 struct bpf_prog_test_run_attr tattr = { 22 24 .data_in = &pkt_v4, ··· 99 97 "ctx_out_ifindex", 100 98 "skb->ifindex == %d, expected %d\n", 101 99 skb.ifindex, 1); 100 + CHECK_ATTR(skb.ingress_ifindex != 11, 101 + "ctx_out_ingress_ifindex", 102 + "skb->ingress_ifindex == %d, expected %d\n", 103 + skb.ingress_ifindex, 11); 102 104 CHECK_ATTR(skb.tstamp != 8, 103 105 "ctx_out_tstamp", 104 106 "skb->tstamp == %lld, expected %d\n",
+6
tools/testing/selftests/bpf/prog_tests/skeleton.c
··· 18 18 struct test_skeleton__data *data; 19 19 struct test_skeleton__rodata *rodata; 20 20 struct test_skeleton__kconfig *kcfg; 21 + const void *elf_bytes; 22 + size_t elf_bytes_sz = 0; 21 23 22 24 skel = test_skeleton__open(); 23 25 if (CHECK(!skel, "skel_open", "failed to open skeleton\n")) ··· 92 90 "got %d != exp %d\n", bss->bpf_syscall, kcfg->CONFIG_BPF_SYSCALL); 93 91 CHECK(bss->kern_ver != kcfg->LINUX_KERNEL_VERSION, "ext2", 94 92 "got %d != exp %d\n", bss->kern_ver, kcfg->LINUX_KERNEL_VERSION); 93 + 94 + elf_bytes = test_skeleton__elf_bytes(&elf_bytes_sz); 95 + ASSERT_OK_PTR(elf_bytes, "elf_bytes"); 96 + ASSERT_GE(elf_bytes_sz, 0, "elf_bytes_sz"); 95 97 96 98 cleanup: 97 99 test_skeleton__destroy(skel);
+20 -5
tools/testing/selftests/bpf/prog_tests/tailcalls.c
··· 219 219 bpf_object__close(obj); 220 220 } 221 221 222 - /* test_tailcall_3 checks that the count value of the tail call limit 223 - * enforcement matches with expectations. 224 - */ 225 - static void test_tailcall_3(void) 222 + static void test_tailcall_count(const char *which) 226 223 { 227 224 int err, map_fd, prog_fd, main_fd, data_fd, i, val; 228 225 struct bpf_map *prog_array, *data_map; ··· 228 231 __u32 retval, duration; 229 232 char buff[128] = {}; 230 233 231 - err = bpf_prog_load("tailcall3.o", BPF_PROG_TYPE_SCHED_CLS, &obj, 234 + err = bpf_prog_load(which, BPF_PROG_TYPE_SCHED_CLS, &obj, 232 235 &prog_fd); 233 236 if (CHECK_FAIL(err)) 234 237 return; ··· 291 294 err, errno, retval); 292 295 out: 293 296 bpf_object__close(obj); 297 + } 298 + 299 + /* test_tailcall_3 checks that the count value of the tail call limit 300 + * enforcement matches with expectations. JIT uses direct jump. 301 + */ 302 + static void test_tailcall_3(void) 303 + { 304 + test_tailcall_count("tailcall3.o"); 305 + } 306 + 307 + /* test_tailcall_6 checks that the count value of the tail call limit 308 + * enforcement matches with expectations. JIT uses indirect jump. 309 + */ 310 + static void test_tailcall_6(void) 311 + { 312 + test_tailcall_count("tailcall6.o"); 294 313 } 295 314 296 315 /* test_tailcall_4 checks that the kernel properly selects indirect jump ··· 835 822 test_tailcall_4(); 836 823 if (test__start_subtest("tailcall_5")) 837 824 test_tailcall_5(); 825 + if (test__start_subtest("tailcall_6")) 826 + test_tailcall_6(); 838 827 if (test__start_subtest("tailcall_bpf2bpf_1")) 839 828 test_tailcall_bpf2bpf_1(); 840 829 if (test__start_subtest("tailcall_bpf2bpf_2"))
+1 -1
tools/testing/selftests/bpf/prog_tests/tc_redirect.c
··· 633 633 struct nstoken *nstoken = NULL; 634 634 int err; 635 635 int tunnel_pid = -1; 636 - int src_fd, target_fd; 636 + int src_fd, target_fd = -1; 637 637 int ifindex; 638 638 639 639 /* Start a L3 TUN/TAP tunnel between the src and dst namespaces.
+3 -9
tools/testing/selftests/bpf/progs/bpf_cubic.c
··· 169 169 ca->sample_cnt = 0; 170 170 } 171 171 172 - /* "struct_ops/" prefix is not a requirement 173 - * It will be recognized as BPF_PROG_TYPE_STRUCT_OPS 174 - * as long as it is used in one of the func ptr 175 - * under SEC(".struct_ops"). 176 - */ 172 + /* "struct_ops/" prefix is a requirement */ 177 173 SEC("struct_ops/bpf_cubic_init") 178 174 void BPF_PROG(bpf_cubic_init, struct sock *sk) 179 175 { ··· 184 188 tcp_sk(sk)->snd_ssthresh = initial_ssthresh; 185 189 } 186 190 187 - /* No prefix in SEC will also work. 188 - * The remaining tcp-cubic functions have an easier way. 189 - */ 190 - SEC("no-sec-prefix-bictcp_cwnd_event") 191 + /* "struct_ops" prefix is a requirement */ 192 + SEC("struct_ops/bpf_cubic_cwnd_event") 191 193 void BPF_PROG(bpf_cubic_cwnd_event, struct sock *sk, enum tcp_ca_event event) 192 194 { 193 195 if (event == CA_EVENT_TX_START) {
+40
tools/testing/selftests/bpf/progs/get_branch_snapshot.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2021 Facebook */ 3 + #include "vmlinux.h" 4 + #include <bpf/bpf_helpers.h> 5 + #include <bpf/bpf_tracing.h> 6 + 7 + char _license[] SEC("license") = "GPL"; 8 + 9 + __u64 test1_hits = 0; 10 + __u64 address_low = 0; 11 + __u64 address_high = 0; 12 + int wasted_entries = 0; 13 + long total_entries = 0; 14 + 15 + #define ENTRY_CNT 32 16 + struct perf_branch_entry entries[ENTRY_CNT] = {}; 17 + 18 + static inline bool in_range(__u64 val) 19 + { 20 + return (val >= address_low) && (val < address_high); 21 + } 22 + 23 + SEC("fexit/bpf_testmod_loop_test") 24 + int BPF_PROG(test1, int n, int ret) 25 + { 26 + long i; 27 + 28 + total_entries = bpf_get_branch_snapshot(entries, sizeof(entries), 0); 29 + total_entries /= sizeof(struct perf_branch_entry); 30 + 31 + for (i = 0; i < ENTRY_CNT; i++) { 32 + if (i >= total_entries) 33 + break; 34 + if (in_range(entries[i].from) && in_range(entries[i].to)) 35 + test1_hits++; 36 + else if (!test1_hits) 37 + wasted_entries++; 38 + } 39 + return 0; 40 + }
+49
tools/testing/selftests/bpf/progs/tag.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2021 Facebook */ 3 + #include "vmlinux.h" 4 + #include <bpf/bpf_helpers.h> 5 + #include <bpf/bpf_tracing.h> 6 + 7 + #ifndef __has_attribute 8 + #define __has_attribute(x) 0 9 + #endif 10 + 11 + #if __has_attribute(btf_tag) 12 + #define __tag1 __attribute__((btf_tag("tag1"))) 13 + #define __tag2 __attribute__((btf_tag("tag2"))) 14 + volatile const bool skip_tests __tag1 __tag2 = false; 15 + #else 16 + #define __tag1 17 + #define __tag2 18 + volatile const bool skip_tests = true; 19 + #endif 20 + 21 + struct key_t { 22 + int a; 23 + int b __tag1 __tag2; 24 + int c; 25 + } __tag1 __tag2; 26 + 27 + struct { 28 + __uint(type, BPF_MAP_TYPE_HASH); 29 + __uint(max_entries, 3); 30 + __type(key, struct key_t); 31 + __type(value, __u64); 32 + } hashmap1 SEC(".maps"); 33 + 34 + 35 + static __noinline int foo(int x __tag1 __tag2) __tag1 __tag2 36 + { 37 + struct key_t key; 38 + __u64 val = 1; 39 + 40 + key.a = key.b = key.c = x; 41 + bpf_map_update_elem(&hashmap1, &key, &val, 0); 42 + return 0; 43 + } 44 + 45 + SEC("fentry/bpf_fentry_test1") 46 + int BPF_PROG(sub, int x) 47 + { 48 + return foo(x); 49 + }
+34
tools/testing/selftests/bpf/progs/tailcall6.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/bpf.h> 3 + 4 + #include <bpf/bpf_helpers.h> 5 + 6 + struct { 7 + __uint(type, BPF_MAP_TYPE_PROG_ARRAY); 8 + __uint(max_entries, 1); 9 + __uint(key_size, sizeof(__u32)); 10 + __uint(value_size, sizeof(__u32)); 11 + } jmp_table SEC(".maps"); 12 + 13 + int count, which; 14 + 15 + SEC("classifier/0") 16 + int bpf_func_0(struct __sk_buff *skb) 17 + { 18 + count++; 19 + if (__builtin_constant_p(which)) 20 + __bpf_unreachable(); 21 + bpf_tail_call(skb, &jmp_table, which); 22 + return 1; 23 + } 24 + 25 + SEC("classifier") 26 + int entry(struct __sk_buff *skb) 27 + { 28 + if (__builtin_constant_p(which)) 29 + __bpf_unreachable(); 30 + bpf_tail_call(skb, &jmp_table, which); 31 + return 0; 32 + } 33 + 34 + char __license[] SEC("license") = "GPL";
+6
tools/testing/selftests/bpf/progs/test_skb_ctx.c
··· 25 25 return 1; 26 26 if (skb->gso_size != 10) 27 27 return 1; 28 + if (skb->ingress_ifindex != 11) 29 + return 1; 30 + if (skb->ifindex != 1) 31 + return 1; 32 + if (skb->hwtstamp != 11) 33 + return 1; 28 34 29 35 return 0; 30 36 }
+3
tools/testing/selftests/bpf/test_btf.h
··· 69 69 #define BTF_TYPE_FLOAT_ENC(name, sz) \ 70 70 BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FLOAT, 0, 0), sz) 71 71 72 + #define BTF_TAG_ENC(value, type, component_idx) \ 73 + BTF_TYPE_ENC(value, BTF_INFO_ENC(BTF_KIND_TAG, 0, 0), type), (component_idx) 74 + 72 75 #endif /* _TEST_BTF_H */
+39
tools/testing/selftests/bpf/test_progs.c
··· 743 743 return chdir(flavor); 744 744 } 745 745 746 + int trigger_module_test_read(int read_sz) 747 + { 748 + int fd, err; 749 + 750 + fd = open("/sys/kernel/bpf_testmod", O_RDONLY); 751 + err = -errno; 752 + if (!ASSERT_GE(fd, 0, "testmod_file_open")) 753 + return err; 754 + 755 + read(fd, NULL, read_sz); 756 + close(fd); 757 + 758 + return 0; 759 + } 760 + 761 + int trigger_module_test_write(int write_sz) 762 + { 763 + int fd, err; 764 + char *buf = malloc(write_sz); 765 + 766 + if (!buf) 767 + return -ENOMEM; 768 + 769 + memset(buf, 'a', write_sz); 770 + buf[write_sz-1] = '\0'; 771 + 772 + fd = open("/sys/kernel/bpf_testmod", O_WRONLY); 773 + err = -errno; 774 + if (!ASSERT_GE(fd, 0, "testmod_file_open")) { 775 + free(buf); 776 + return err; 777 + } 778 + 779 + write(fd, buf, write_sz); 780 + close(fd); 781 + free(buf); 782 + return 0; 783 + } 784 + 746 785 #define MAX_BACKTRACE_SZ 128 747 786 void crash_handler(int signum) 748 787 {
+2
tools/testing/selftests/bpf/test_progs.h
··· 291 291 int compare_stack_ips(int smap_fd, int amap_fd, int stack_trace_len); 292 292 int extract_build_id(char *build_id, size_t size); 293 293 int kern_sync_rcu(void); 294 + int trigger_module_test_read(int read_sz); 295 + int trigger_module_test_write(int write_sz); 294 296 295 297 #ifdef __x86_64__ 296 298 #define SYS_NANOSLEEP_KPROBE_NAME "__x64_sys_nanosleep"
+37
tools/testing/selftests/bpf/trace_helpers.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 + #include <ctype.h> 2 3 #include <stdio.h> 3 4 #include <stdlib.h> 4 5 #include <string.h> ··· 109 108 if (strcmp(name, sym) == 0) { 110 109 *addr = value; 111 110 goto out; 111 + } 112 + } 113 + err = -ENOENT; 114 + 115 + out: 116 + fclose(f); 117 + return err; 118 + } 119 + 120 + /* find the address of the next symbol of the same type, this can be used 121 + * to determine the end of a function. 122 + */ 123 + int kallsyms_find_next(const char *sym, unsigned long long *addr) 124 + { 125 + char type, found_type, name[500]; 126 + unsigned long long value; 127 + bool found = false; 128 + int err = 0; 129 + FILE *f; 130 + 131 + f = fopen("/proc/kallsyms", "r"); 132 + if (!f) 133 + return -EINVAL; 134 + 135 + while (fscanf(f, "%llx %c %499s%*[^\n]\n", &value, &type, name) > 0) { 136 + /* Different types of symbols in kernel modules are mixed 137 + * in /proc/kallsyms. Only return the next matching type. 138 + * Use tolower() for type so that 'T' matches 't'. 139 + */ 140 + if (found && found_type == tolower(type)) { 141 + *addr = value; 142 + goto out; 143 + } 144 + if (strcmp(name, sym) == 0) { 145 + found = true; 146 + found_type = tolower(type); 112 147 } 113 148 } 114 149 err = -ENOENT;
+5
tools/testing/selftests/bpf/trace_helpers.h
··· 16 16 /* open kallsyms and find addresses on the fly, faster than load + search. */ 17 17 int kallsyms_find(const char *sym, unsigned long long *addr); 18 18 19 + /* find the address of the next symbol, this can be used to determine the 20 + * end of a function 21 + */ 22 + int kallsyms_find_next(const char *sym, unsigned long long *addr); 23 + 19 24 void read_trace_pipe(void); 20 25 21 26 ssize_t get_uprobe_offset(const void *addr, ssize_t base);
+60
tools/testing/selftests/bpf/verifier/ctx_skb.c
··· 1058 1058 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1059 1059 }, 1060 1060 { 1061 + "padding after gso_size is not accessible", 1062 + .insns = { 1063 + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 1064 + offsetofend(struct __sk_buff, gso_size)), 1065 + BPF_MOV64_IMM(BPF_REG_0, 0), 1066 + BPF_EXIT_INSN(), 1067 + }, 1068 + .result = REJECT, 1069 + .result_unpriv = REJECT, 1070 + .errstr = "invalid bpf_context access off=180 size=4", 1071 + .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1072 + }, 1073 + { 1074 + "read hwtstamp from CGROUP_SKB", 1075 + .insns = { 1076 + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 1077 + offsetof(struct __sk_buff, hwtstamp)), 1078 + BPF_MOV64_IMM(BPF_REG_0, 0), 1079 + BPF_EXIT_INSN(), 1080 + }, 1081 + .result = ACCEPT, 1082 + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 1083 + }, 1084 + { 1085 + "read hwtstamp from CGROUP_SKB", 1086 + .insns = { 1087 + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 1088 + offsetof(struct __sk_buff, hwtstamp)), 1089 + BPF_MOV64_IMM(BPF_REG_0, 0), 1090 + BPF_EXIT_INSN(), 1091 + }, 1092 + .result = ACCEPT, 1093 + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 1094 + }, 1095 + { 1096 + "write hwtstamp from CGROUP_SKB", 1097 + .insns = { 1098 + BPF_MOV64_IMM(BPF_REG_0, 0), 1099 + BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 1100 + offsetof(struct __sk_buff, hwtstamp)), 1101 + BPF_MOV64_IMM(BPF_REG_0, 0), 1102 + BPF_EXIT_INSN(), 1103 + }, 1104 + .result = REJECT, 1105 + .result_unpriv = REJECT, 1106 + .errstr = "invalid bpf_context access off=184 size=8", 1107 + .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 1108 + }, 1109 + { 1110 + "read hwtstamp from CLS", 1111 + .insns = { 1112 + BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 1113 + offsetof(struct __sk_buff, hwtstamp)), 1114 + BPF_MOV64_IMM(BPF_REG_0, 0), 1115 + BPF_EXIT_INSN(), 1116 + }, 1117 + .result = ACCEPT, 1118 + .prog_type = BPF_PROG_TYPE_SCHED_CLS, 1119 + }, 1120 + { 1061 1121 "check wire_len is not readable by sockets", 1062 1122 .insns = { 1063 1123 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+19 -3
tools/testing/selftests/bpf/verifier/jit.c
··· 62 62 BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2), 63 63 BPF_MOV64_IMM(BPF_REG_0, 1), 64 64 BPF_EXIT_INSN(), 65 + BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL), 66 + BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 0xefefef), 67 + BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2), 68 + BPF_MOV64_IMM(BPF_REG_0, 1), 69 + BPF_EXIT_INSN(), 65 70 BPF_MOV32_REG(BPF_REG_2, BPF_REG_2), 66 71 BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL), 67 72 BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1), ··· 78 73 BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2), 79 74 BPF_MOV64_IMM(BPF_REG_0, 1), 80 75 BPF_EXIT_INSN(), 76 + BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL), 77 + BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0xefefef), 78 + BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2), 79 + BPF_MOV64_IMM(BPF_REG_0, 1), 80 + BPF_EXIT_INSN(), 81 + BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL), 82 + BPF_LD_IMM64(BPF_REG_2, 0x2ad4d4aaULL), 83 + BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, 0x2b), 84 + BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2), 85 + BPF_MOV64_IMM(BPF_REG_0, 1), 86 + BPF_EXIT_INSN(), 81 87 BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL), 82 88 BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL), 83 - BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL), 84 - BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1), 85 - BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2), 89 + BPF_LD_IMM64(BPF_REG_5, 0xeeff0d413122ULL), 90 + BPF_ALU32_REG(BPF_MUL, BPF_REG_5, BPF_REG_1), 91 + BPF_JMP_REG(BPF_JEQ, BPF_REG_5, BPF_REG_0, 2), 86 92 BPF_MOV64_IMM(BPF_REG_0, 1), 87 93 BPF_EXIT_INSN(), 88 94 BPF_MOV64_IMM(BPF_REG_0, 2),
+575 -307
tools/testing/selftests/bpf/xdpxceiver.c
··· 19 19 * Virtual Ethernet interfaces. 20 20 * 21 21 * For each mode, the following tests are run: 22 - * a. nopoll - soft-irq processing 22 + * a. nopoll - soft-irq processing in run-to-completion mode 23 23 * b. poll - using poll() syscall 24 24 * c. Socket Teardown 25 25 * Create a Tx and a Rx socket, Tx from one socket, Rx on another. Destroy ··· 45 45 * Configure sockets at indexes 0 and 1, run a traffic on queue ids 0, 46 46 * then remove xsk sockets from queue 0 on both veth interfaces and 47 47 * finally run a traffic on queues ids 1 48 + * g. unaligned mode 49 + * h. tests for invalid and corner case Tx descriptors so that the correct ones 50 + * are discarded and let through, respectively. 51 + * i. 2K frame size tests 48 52 * 49 53 * Total tests: 12 50 54 * ··· 116 112 117 113 #define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, __LINE__) 118 114 119 - #define print_ksft_result(void)\ 120 - (ksft_test_result_pass("PASS: %s %s %s%s%s%s\n", configured_mode ? "DRV" : "SKB",\ 121 - test_type == TEST_TYPE_POLL ? "POLL" : "NOPOLL",\ 122 - test_type == TEST_TYPE_TEARDOWN ? "Socket Teardown" : "",\ 123 - test_type == TEST_TYPE_BIDI ? "Bi-directional Sockets" : "",\ 124 - test_type == TEST_TYPE_STATS ? "Stats" : "",\ 125 - test_type == TEST_TYPE_BPF_RES ? "BPF RES" : "")) 115 + #define mode_string(test) (test)->ifobj_tx->xdp_flags & XDP_FLAGS_SKB_MODE ? "SKB" : "DRV" 116 + 117 + #define print_ksft_result(test) \ 118 + (ksft_test_result_pass("PASS: %s %s\n", mode_string(test), (test)->name)) 126 119 127 120 static void memset32_htonl(void *dest, u32 val, u32 size) 128 121 { ··· 236 235 udp_csum(ip_hdr->saddr, ip_hdr->daddr, UDP_PKT_SIZE, IPPROTO_UDP, (u16 *)udp_hdr); 237 236 } 238 237 239 - static void xsk_configure_umem(struct ifobject *data, void *buffer, u64 size, int idx) 238 + static int xsk_configure_umem(struct xsk_umem_info *umem, void *buffer, u64 size) 240 239 { 241 240 struct xsk_umem_config cfg = { 242 241 .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS, 243 242 .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS, 244 - .frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE, 245 - .frame_headroom = frame_headroom, 243 + .frame_size = umem->frame_size, 244 + .frame_headroom = umem->frame_headroom, 246 245 .flags = XSK_UMEM__DEFAULT_FLAGS 247 246 }; 248 - struct xsk_umem_info *umem; 249 247 int ret; 250 248 251 - umem = calloc(1, sizeof(struct xsk_umem_info)); 252 - if (!umem) 253 - exit_with_error(errno); 249 + if (umem->unaligned_mode) 250 + cfg.flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG; 254 251 255 252 ret = xsk_umem__create(&umem->umem, buffer, size, 256 253 &umem->fq, &umem->cq, &cfg); 257 254 if (ret) 258 - exit_with_error(-ret); 255 + return ret; 259 256 260 257 umem->buffer = buffer; 261 - 262 - data->umem_arr[idx] = umem; 258 + return 0; 263 259 } 264 260 265 - static void xsk_populate_fill_ring(struct xsk_umem_info *umem) 266 - { 267 - int ret, i; 268 - u32 idx = 0; 269 - 270 - ret = xsk_ring_prod__reserve(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS, &idx); 271 - if (ret != XSK_RING_PROD__DEFAULT_NUM_DESCS) 272 - exit_with_error(-ret); 273 - for (i = 0; i < XSK_RING_PROD__DEFAULT_NUM_DESCS; i++) 274 - *xsk_ring_prod__fill_addr(&umem->fq, idx++) = i * XSK_UMEM__DEFAULT_FRAME_SIZE; 275 - xsk_ring_prod__submit(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS); 276 - } 277 - 278 - static int xsk_configure_socket(struct ifobject *ifobject, int idx) 261 + static int xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_info *umem, 262 + struct ifobject *ifobject, u32 qid) 279 263 { 280 264 struct xsk_socket_config cfg; 281 - struct xsk_socket_info *xsk; 282 265 struct xsk_ring_cons *rxr; 283 266 struct xsk_ring_prod *txr; 284 - int ret; 285 267 286 - xsk = calloc(1, sizeof(struct xsk_socket_info)); 287 - if (!xsk) 288 - exit_with_error(errno); 289 - 290 - xsk->umem = ifobject->umem; 291 - cfg.rx_size = rxqsize; 268 + xsk->umem = umem; 269 + cfg.rx_size = xsk->rxqsize; 292 270 cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS; 293 271 cfg.libbpf_flags = 0; 294 - cfg.xdp_flags = xdp_flags; 295 - cfg.bind_flags = xdp_bind_flags; 272 + cfg.xdp_flags = ifobject->xdp_flags; 273 + cfg.bind_flags = ifobject->bind_flags; 296 274 297 - if (test_type != TEST_TYPE_BIDI) { 298 - rxr = (ifobject->fv.vector == rx) ? &xsk->rx : NULL; 299 - txr = (ifobject->fv.vector == tx) ? &xsk->tx : NULL; 300 - } else { 301 - rxr = &xsk->rx; 302 - txr = &xsk->tx; 303 - } 304 - 305 - ret = xsk_socket__create(&xsk->xsk, ifobject->ifname, idx, 306 - ifobject->umem->umem, rxr, txr, &cfg); 307 - if (ret) 308 - return 1; 309 - 310 - ifobject->xsk_arr[idx] = xsk; 311 - 312 - return 0; 275 + txr = ifobject->tx_on ? &xsk->tx : NULL; 276 + rxr = ifobject->rx_on ? &xsk->rx : NULL; 277 + return xsk_socket__create(&xsk->xsk, ifobject->ifname, qid, umem->umem, rxr, txr, &cfg); 313 278 } 314 279 315 280 static struct option long_options[] = { ··· 321 354 return nsfd; 322 355 } 323 356 324 - static int validate_interfaces(void) 357 + static bool validate_interface(struct ifobject *ifobj) 325 358 { 326 - bool ret = true; 327 - 328 - for (int i = 0; i < MAX_INTERFACES; i++) { 329 - if (!strcmp(ifdict[i]->ifname, "")) { 330 - ret = false; 331 - ksft_test_result_fail("ERROR: interfaces: -i <int>,<ns> -i <int>,<ns>."); 332 - } 333 - } 334 - return ret; 359 + if (!strcmp(ifobj->ifname, "")) 360 + return false; 361 + return true; 335 362 } 336 363 337 - static void parse_command_line(int argc, char **argv) 364 + static void parse_command_line(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx, int argc, 365 + char **argv) 338 366 { 339 - int option_index, interface_index = 0, c; 367 + struct ifobject *ifobj; 368 + u32 interface_nb = 0; 369 + int option_index, c; 340 370 341 371 opterr = 0; 342 372 343 373 for (;;) { 344 - c = getopt_long(argc, argv, "i:Dv", long_options, &option_index); 374 + char *sptr, *token; 345 375 376 + c = getopt_long(argc, argv, "i:Dv", long_options, &option_index); 346 377 if (c == -1) 347 378 break; 348 379 349 380 switch (c) { 350 381 case 'i': 351 - if (interface_index == MAX_INTERFACES) 382 + if (interface_nb == 0) 383 + ifobj = ifobj_tx; 384 + else if (interface_nb == 1) 385 + ifobj = ifobj_rx; 386 + else 352 387 break; 353 - char *sptr, *token; 354 388 355 389 sptr = strndupa(optarg, strlen(optarg)); 356 - memcpy(ifdict[interface_index]->ifname, 357 - strsep(&sptr, ","), MAX_INTERFACE_NAME_CHARS); 390 + memcpy(ifobj->ifname, strsep(&sptr, ","), MAX_INTERFACE_NAME_CHARS); 358 391 token = strsep(&sptr, ","); 359 392 if (token) 360 - memcpy(ifdict[interface_index]->nsname, token, 361 - MAX_INTERFACES_NAMESPACE_CHARS); 362 - interface_index++; 393 + memcpy(ifobj->nsname, token, MAX_INTERFACES_NAMESPACE_CHARS); 394 + interface_nb++; 363 395 break; 364 396 case 'D': 365 397 opt_pkt_dump = true; ··· 371 405 ksft_exit_xfail(); 372 406 } 373 407 } 408 + } 374 409 375 - if (!validate_interfaces()) { 376 - usage(basename(argv[0])); 377 - ksft_exit_xfail(); 410 + static void __test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx, 411 + struct ifobject *ifobj_rx) 412 + { 413 + u32 i, j; 414 + 415 + for (i = 0; i < MAX_INTERFACES; i++) { 416 + struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx; 417 + 418 + ifobj->umem = &ifobj->umem_arr[0]; 419 + ifobj->xsk = &ifobj->xsk_arr[0]; 420 + ifobj->use_poll = false; 421 + ifobj->pkt_stream = test->pkt_stream_default; 422 + 423 + if (i == 0) { 424 + ifobj->rx_on = false; 425 + ifobj->tx_on = true; 426 + } else { 427 + ifobj->rx_on = true; 428 + ifobj->tx_on = false; 429 + } 430 + 431 + for (j = 0; j < MAX_SOCKETS; j++) { 432 + memset(&ifobj->umem_arr[j], 0, sizeof(ifobj->umem_arr[j])); 433 + memset(&ifobj->xsk_arr[j], 0, sizeof(ifobj->xsk_arr[j])); 434 + ifobj->umem_arr[j].num_frames = DEFAULT_UMEM_BUFFERS; 435 + ifobj->umem_arr[j].frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; 436 + ifobj->xsk_arr[j].rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS; 437 + } 378 438 } 439 + 440 + test->ifobj_tx = ifobj_tx; 441 + test->ifobj_rx = ifobj_rx; 442 + test->current_step = 0; 443 + test->total_steps = 1; 444 + test->nb_sockets = 1; 445 + } 446 + 447 + static void test_spec_init(struct test_spec *test, struct ifobject *ifobj_tx, 448 + struct ifobject *ifobj_rx, enum test_mode mode) 449 + { 450 + struct pkt_stream *pkt_stream; 451 + u32 i; 452 + 453 + pkt_stream = test->pkt_stream_default; 454 + memset(test, 0, sizeof(*test)); 455 + test->pkt_stream_default = pkt_stream; 456 + 457 + for (i = 0; i < MAX_INTERFACES; i++) { 458 + struct ifobject *ifobj = i ? ifobj_rx : ifobj_tx; 459 + 460 + ifobj->xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST; 461 + if (mode == TEST_MODE_SKB) 462 + ifobj->xdp_flags |= XDP_FLAGS_SKB_MODE; 463 + else 464 + ifobj->xdp_flags |= XDP_FLAGS_DRV_MODE; 465 + 466 + ifobj->bind_flags = XDP_USE_NEED_WAKEUP | XDP_COPY; 467 + } 468 + 469 + __test_spec_init(test, ifobj_tx, ifobj_rx); 470 + } 471 + 472 + static void test_spec_reset(struct test_spec *test) 473 + { 474 + __test_spec_init(test, test->ifobj_tx, test->ifobj_rx); 475 + } 476 + 477 + static void test_spec_set_name(struct test_spec *test, const char *name) 478 + { 479 + strncpy(test->name, name, MAX_TEST_NAME_SIZE); 379 480 } 380 481 381 482 static struct pkt *pkt_stream_get_pkt(struct pkt_stream *pkt_stream, u32 pkt_nb) ··· 453 420 return &pkt_stream->pkts[pkt_nb]; 454 421 } 455 422 456 - static struct pkt_stream *pkt_stream_generate(u32 nb_pkts, u32 pkt_len) 423 + static struct pkt *pkt_stream_get_next_rx_pkt(struct pkt_stream *pkt_stream) 424 + { 425 + while (pkt_stream->rx_pkt_nb < pkt_stream->nb_pkts) { 426 + if (pkt_stream->pkts[pkt_stream->rx_pkt_nb].valid) 427 + return &pkt_stream->pkts[pkt_stream->rx_pkt_nb++]; 428 + pkt_stream->rx_pkt_nb++; 429 + } 430 + return NULL; 431 + } 432 + 433 + static void pkt_stream_delete(struct pkt_stream *pkt_stream) 434 + { 435 + free(pkt_stream->pkts); 436 + free(pkt_stream); 437 + } 438 + 439 + static void pkt_stream_restore_default(struct test_spec *test) 440 + { 441 + if (test->ifobj_tx->pkt_stream != test->pkt_stream_default) { 442 + pkt_stream_delete(test->ifobj_tx->pkt_stream); 443 + test->ifobj_tx->pkt_stream = test->pkt_stream_default; 444 + } 445 + test->ifobj_rx->pkt_stream = test->pkt_stream_default; 446 + } 447 + 448 + static struct pkt_stream *__pkt_stream_alloc(u32 nb_pkts) 449 + { 450 + struct pkt_stream *pkt_stream; 451 + 452 + pkt_stream = calloc(1, sizeof(*pkt_stream)); 453 + if (!pkt_stream) 454 + return NULL; 455 + 456 + pkt_stream->pkts = calloc(nb_pkts, sizeof(*pkt_stream->pkts)); 457 + if (!pkt_stream->pkts) { 458 + free(pkt_stream); 459 + return NULL; 460 + } 461 + 462 + pkt_stream->nb_pkts = nb_pkts; 463 + return pkt_stream; 464 + } 465 + 466 + static struct pkt_stream *pkt_stream_generate(struct xsk_umem_info *umem, u32 nb_pkts, u32 pkt_len) 457 467 { 458 468 struct pkt_stream *pkt_stream; 459 469 u32 i; 460 470 461 - pkt_stream = malloc(sizeof(*pkt_stream)); 471 + pkt_stream = __pkt_stream_alloc(nb_pkts); 462 472 if (!pkt_stream) 463 - exit_with_error(ENOMEM); 464 - 465 - pkt_stream->pkts = calloc(nb_pkts, sizeof(*pkt_stream->pkts)); 466 - if (!pkt_stream->pkts) 467 473 exit_with_error(ENOMEM); 468 474 469 475 pkt_stream->nb_pkts = nb_pkts; 470 476 for (i = 0; i < nb_pkts; i++) { 471 - pkt_stream->pkts[i].addr = (i % num_frames) * XSK_UMEM__DEFAULT_FRAME_SIZE; 477 + pkt_stream->pkts[i].addr = (i % umem->num_frames) * umem->frame_size + 478 + DEFAULT_OFFSET; 472 479 pkt_stream->pkts[i].len = pkt_len; 473 480 pkt_stream->pkts[i].payload = i; 481 + 482 + if (pkt_len > umem->frame_size) 483 + pkt_stream->pkts[i].valid = false; 484 + else 485 + pkt_stream->pkts[i].valid = true; 474 486 } 475 487 476 488 return pkt_stream; 489 + } 490 + 491 + static struct pkt_stream *pkt_stream_clone(struct xsk_umem_info *umem, 492 + struct pkt_stream *pkt_stream) 493 + { 494 + return pkt_stream_generate(umem, pkt_stream->nb_pkts, pkt_stream->pkts[0].len); 495 + } 496 + 497 + static void pkt_stream_replace(struct test_spec *test, u32 nb_pkts, u32 pkt_len) 498 + { 499 + struct pkt_stream *pkt_stream; 500 + 501 + pkt_stream = pkt_stream_generate(test->ifobj_tx->umem, nb_pkts, pkt_len); 502 + test->ifobj_tx->pkt_stream = pkt_stream; 503 + test->ifobj_rx->pkt_stream = pkt_stream; 504 + } 505 + 506 + static void pkt_stream_replace_half(struct test_spec *test, u32 pkt_len, u32 offset) 507 + { 508 + struct xsk_umem_info *umem = test->ifobj_tx->umem; 509 + struct pkt_stream *pkt_stream; 510 + u32 i; 511 + 512 + pkt_stream = pkt_stream_clone(umem, test->pkt_stream_default); 513 + for (i = 0; i < test->pkt_stream_default->nb_pkts; i += 2) { 514 + pkt_stream->pkts[i].addr = (i % umem->num_frames) * umem->frame_size + offset; 515 + pkt_stream->pkts[i].len = pkt_len; 516 + } 517 + 518 + test->ifobj_tx->pkt_stream = pkt_stream; 519 + test->ifobj_rx->pkt_stream = pkt_stream; 477 520 } 478 521 479 522 static struct pkt *pkt_generate(struct ifobject *ifobject, u32 pkt_nb) ··· 562 453 563 454 if (!pkt) 564 455 return NULL; 456 + if (!pkt->valid || pkt->len < PKT_SIZE) 457 + return pkt; 565 458 566 459 data = xsk_umem__get_data(ifobject->umem->buffer, pkt->addr); 567 460 udp_hdr = (struct udphdr *)(data + sizeof(struct ethhdr) + sizeof(struct iphdr)); ··· 576 465 gen_eth_hdr(ifobject, eth_hdr); 577 466 578 467 return pkt; 468 + } 469 + 470 + static void pkt_stream_generate_custom(struct test_spec *test, struct pkt *pkts, u32 nb_pkts) 471 + { 472 + struct pkt_stream *pkt_stream; 473 + u32 i; 474 + 475 + pkt_stream = __pkt_stream_alloc(nb_pkts); 476 + if (!pkt_stream) 477 + exit_with_error(ENOMEM); 478 + 479 + test->ifobj_tx->pkt_stream = pkt_stream; 480 + test->ifobj_rx->pkt_stream = pkt_stream; 481 + 482 + for (i = 0; i < nb_pkts; i++) { 483 + pkt_stream->pkts[i].addr = pkts[i].addr; 484 + pkt_stream->pkts[i].len = pkts[i].len; 485 + pkt_stream->pkts[i].payload = i; 486 + pkt_stream->pkts[i].valid = pkts[i].valid; 487 + } 579 488 } 580 489 581 490 static void pkt_dump(void *pkt, u32 len) ··· 635 504 fprintf(stdout, "---------------------------------------\n"); 636 505 } 637 506 638 - static bool is_pkt_valid(struct pkt *pkt, void *buffer, const struct xdp_desc *desc) 507 + static bool is_pkt_valid(struct pkt *pkt, void *buffer, u64 addr, u32 len) 639 508 { 640 - void *data = xsk_umem__get_data(buffer, desc->addr); 509 + void *data = xsk_umem__get_data(buffer, addr); 641 510 struct iphdr *iphdr = (struct iphdr *)(data + sizeof(struct ethhdr)); 642 511 643 512 if (!pkt) { ··· 645 514 return false; 646 515 } 647 516 517 + if (len < PKT_SIZE) { 518 + /*Do not try to verify packets that are smaller than minimum size. */ 519 + return true; 520 + } 521 + 522 + if (pkt->len != len) { 523 + ksft_test_result_fail 524 + ("ERROR: [%s] expected length [%d], got length [%d]\n", 525 + __func__, pkt->len, len); 526 + return false; 527 + } 528 + 648 529 if (iphdr->version == IP_PKT_VER && iphdr->tos == IP_PKT_TOS) { 649 530 u32 seqnum = ntohl(*((u32 *)(data + PKT_HDR_SIZE))); 650 531 651 - if (opt_pkt_dump && test_type != TEST_TYPE_STATS) 532 + if (opt_pkt_dump) 652 533 pkt_dump(data, PKT_SIZE); 653 - 654 - if (pkt->len != desc->len) { 655 - ksft_test_result_fail 656 - ("ERROR: [%s] expected length [%d], got length [%d]\n", 657 - __func__, pkt->len, desc->len); 658 - return false; 659 - } 660 534 661 535 if (pkt->payload != seqnum) { 662 536 ksft_test_result_fail ··· 694 558 unsigned int rcvd; 695 559 u32 idx; 696 560 697 - if (!xsk->outstanding_tx) 698 - return; 699 - 700 561 if (xsk_ring_prod__needs_wakeup(&xsk->tx)) 701 562 kick_tx(xsk); 702 563 703 564 rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx); 704 565 if (rcvd) { 566 + if (rcvd > xsk->outstanding_tx) { 567 + u64 addr = *xsk_ring_cons__comp_addr(&xsk->umem->cq, idx + rcvd - 1); 568 + 569 + ksft_test_result_fail("ERROR: [%s] Too many packets completed\n", 570 + __func__); 571 + ksft_print_msg("Last completion address: %llx\n", addr); 572 + return; 573 + } 574 + 705 575 xsk_ring_cons__release(&xsk->umem->cq, rcvd); 706 576 xsk->outstanding_tx -= rcvd; 707 577 } ··· 716 574 static void receive_pkts(struct pkt_stream *pkt_stream, struct xsk_socket_info *xsk, 717 575 struct pollfd *fds) 718 576 { 719 - u32 idx_rx = 0, idx_fq = 0, rcvd, i, pkt_count = 0; 720 - struct pkt *pkt; 577 + struct pkt *pkt = pkt_stream_get_next_rx_pkt(pkt_stream); 578 + u32 idx_rx = 0, idx_fq = 0, rcvd, i; 721 579 int ret; 722 580 723 - pkt = pkt_stream_get_pkt(pkt_stream, pkt_count++); 724 581 while (pkt) { 725 582 rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx); 726 583 if (!rcvd) { ··· 747 606 const struct xdp_desc *desc = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++); 748 607 u64 addr = desc->addr, orig; 749 608 609 + if (!pkt) { 610 + ksft_test_result_fail("ERROR: [%s] Received too many packets.\n", 611 + __func__); 612 + ksft_print_msg("Last packet has addr: %llx len: %u\n", 613 + addr, desc->len); 614 + return; 615 + } 616 + 750 617 orig = xsk_umem__extract_addr(addr); 751 618 addr = xsk_umem__add_offset_to_addr(addr); 752 - if (!is_pkt_valid(pkt, xsk->umem->buffer, desc)) 619 + if (!is_pkt_valid(pkt, xsk->umem->buffer, addr, desc->len)) 753 620 return; 754 621 755 622 *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = orig; 756 - pkt = pkt_stream_get_pkt(pkt_stream, pkt_count++); 623 + pkt = pkt_stream_get_next_rx_pkt(pkt_stream); 757 624 } 758 625 759 626 xsk_ring_prod__submit(&xsk->umem->fq, rcvd); ··· 772 623 static u32 __send_pkts(struct ifobject *ifobject, u32 pkt_nb) 773 624 { 774 625 struct xsk_socket_info *xsk = ifobject->xsk; 775 - u32 i, idx; 626 + u32 i, idx, valid_pkts = 0; 776 627 777 628 while (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) < BATCH_SIZE) 778 629 complete_pkts(xsk, BATCH_SIZE); ··· 787 638 tx_desc->addr = pkt->addr; 788 639 tx_desc->len = pkt->len; 789 640 pkt_nb++; 641 + if (pkt->valid) 642 + valid_pkts++; 790 643 } 791 644 792 645 xsk_ring_prod__submit(&xsk->tx, i); 793 - if (stat_test_type != STAT_TEST_TX_INVALID) 794 - xsk->outstanding_tx += i; 795 - else if (xsk_ring_prod__needs_wakeup(&xsk->tx)) 796 - kick_tx(xsk); 797 - complete_pkts(xsk, i); 646 + xsk->outstanding_tx += valid_pkts; 647 + complete_pkts(xsk, BATCH_SIZE); 798 648 799 649 return i; 800 650 } ··· 806 658 807 659 static void send_pkts(struct ifobject *ifobject) 808 660 { 809 - struct pollfd fds[MAX_SOCKS] = { }; 661 + struct pollfd fds = { }; 810 662 u32 pkt_cnt = 0; 811 663 812 - fds[0].fd = xsk_socket__fd(ifobject->xsk->xsk); 813 - fds[0].events = POLLOUT; 664 + fds.fd = xsk_socket__fd(ifobject->xsk->xsk); 665 + fds.events = POLLOUT; 814 666 815 667 while (pkt_cnt < ifobject->pkt_stream->nb_pkts) { 816 668 u32 sent; 817 669 818 - if (test_type == TEST_TYPE_POLL) { 670 + if (ifobject->use_poll) { 819 671 int ret; 820 672 821 - ret = poll(fds, 1, POLL_TMOUT); 673 + ret = poll(&fds, 1, POLL_TMOUT); 822 674 if (ret <= 0) 823 675 continue; 824 676 825 - if (!(fds[0].revents & POLLOUT)) 677 + if (!(fds.revents & POLLOUT)) 826 678 continue; 827 679 } 828 680 ··· 846 698 optlen = sizeof(stats); 847 699 err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen); 848 700 if (err) { 849 - ksft_test_result_fail("ERROR: [%s] getsockopt(XDP_STATISTICS) error %u %s\n", 701 + ksft_test_result_fail("ERROR Rx: [%s] getsockopt(XDP_STATISTICS) error %u %s\n", 850 702 __func__, -err, strerror(-err)); 851 703 return true; 852 704 } ··· 887 739 optlen = sizeof(stats); 888 740 err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen); 889 741 if (err) { 890 - ksft_test_result_fail("ERROR: [%s] getsockopt(XDP_STATISTICS) error %u %s\n", 742 + ksft_test_result_fail("ERROR Tx: [%s] getsockopt(XDP_STATISTICS) error %u %s\n", 891 743 __func__, -err, strerror(-err)); 892 744 return; 893 745 } ··· 899 751 __func__, stats.tx_invalid_descs, ifobject->pkt_stream->nb_pkts); 900 752 } 901 753 902 - static void thread_common_ops(struct ifobject *ifobject, void *bufs) 754 + static void thread_common_ops(struct test_spec *test, struct ifobject *ifobject) 903 755 { 904 - u64 umem_sz = num_frames * XSK_UMEM__DEFAULT_FRAME_SIZE; 905 756 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE; 906 - size_t mmap_sz = umem_sz; 907 - int ctr = 0; 908 - int ret; 757 + u32 i; 909 758 910 759 ifobject->ns_fd = switch_namespace(ifobject->nsname); 911 760 912 - if (test_type == TEST_TYPE_BPF_RES) 913 - mmap_sz *= 2; 761 + if (ifobject->umem->unaligned_mode) 762 + mmap_flags |= MAP_HUGETLB; 914 763 915 - bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); 916 - if (bufs == MAP_FAILED) 917 - exit_with_error(errno); 764 + for (i = 0; i < test->nb_sockets; i++) { 765 + u64 umem_sz = ifobject->umem->num_frames * ifobject->umem->frame_size; 766 + u32 ctr = 0; 767 + void *bufs; 918 768 919 - while (ctr++ < SOCK_RECONF_CTR) { 920 - xsk_configure_umem(ifobject, bufs, umem_sz, 0); 921 - ifobject->umem = ifobject->umem_arr[0]; 922 - ret = xsk_configure_socket(ifobject, 0); 923 - if (!ret) 924 - break; 769 + bufs = mmap(NULL, umem_sz, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); 770 + if (bufs == MAP_FAILED) 771 + exit_with_error(errno); 925 772 926 - /* Retry Create Socket if it fails as xsk_socket__create() is asynchronous */ 927 - usleep(USLEEP_MAX); 928 - if (ctr >= SOCK_RECONF_CTR) 929 - exit_with_error(-ret); 773 + while (ctr++ < SOCK_RECONF_CTR) { 774 + int ret; 775 + 776 + ret = xsk_configure_umem(&ifobject->umem_arr[i], bufs, umem_sz); 777 + if (ret) 778 + exit_with_error(-ret); 779 + 780 + ret = xsk_configure_socket(&ifobject->xsk_arr[i], &ifobject->umem_arr[i], 781 + ifobject, i); 782 + if (!ret) 783 + break; 784 + 785 + /* Retry if it fails as xsk_socket__create() is asynchronous */ 786 + if (ctr >= SOCK_RECONF_CTR) 787 + exit_with_error(-ret); 788 + usleep(USLEEP_MAX); 789 + } 930 790 } 931 791 932 - ifobject->umem = ifobject->umem_arr[0]; 933 - ifobject->xsk = ifobject->xsk_arr[0]; 934 - 935 - if (test_type == TEST_TYPE_BPF_RES) { 936 - xsk_configure_umem(ifobject, (u8 *)bufs + umem_sz, umem_sz, 1); 937 - ifobject->umem = ifobject->umem_arr[1]; 938 - ret = xsk_configure_socket(ifobject, 1); 939 - } 940 - 941 - ifobject->umem = ifobject->umem_arr[0]; 942 - ifobject->xsk = ifobject->xsk_arr[0]; 943 - print_verbose("Interface [%s] vector [%s]\n", 944 - ifobject->ifname, ifobject->fv.vector == tx ? "Tx" : "Rx"); 945 - } 946 - 947 - static bool testapp_is_test_two_stepped(void) 948 - { 949 - return (test_type != TEST_TYPE_BIDI && test_type != TEST_TYPE_BPF_RES) || second_step; 792 + ifobject->umem = &ifobject->umem_arr[0]; 793 + ifobject->xsk = &ifobject->xsk_arr[0]; 950 794 } 951 795 952 796 static void testapp_cleanup_xsk_res(struct ifobject *ifobj) 953 797 { 954 - if (testapp_is_test_two_stepped()) { 955 - xsk_socket__delete(ifobj->xsk->xsk); 956 - (void)xsk_umem__delete(ifobj->umem->umem); 957 - } 798 + print_verbose("Destroying socket\n"); 799 + xsk_socket__delete(ifobj->xsk->xsk); 800 + munmap(ifobj->umem->buffer, ifobj->umem->num_frames * ifobj->umem->frame_size); 801 + xsk_umem__delete(ifobj->umem->umem); 958 802 } 959 803 960 804 static void *worker_testapp_validate_tx(void *arg) 961 805 { 962 - struct ifobject *ifobject = (struct ifobject *)arg; 963 - void *bufs = NULL; 806 + struct test_spec *test = (struct test_spec *)arg; 807 + struct ifobject *ifobject = test->ifobj_tx; 964 808 965 - if (!second_step) 966 - thread_common_ops(ifobject, bufs); 809 + if (test->current_step == 1) 810 + thread_common_ops(test, ifobject); 967 811 968 812 print_verbose("Sending %d packets on interface %s\n", ifobject->pkt_stream->nb_pkts, 969 813 ifobject->ifname); ··· 964 824 if (stat_test_type == STAT_TEST_TX_INVALID) 965 825 tx_stats_validate(ifobject); 966 826 967 - testapp_cleanup_xsk_res(ifobject); 827 + if (test->total_steps == test->current_step) 828 + testapp_cleanup_xsk_res(ifobject); 968 829 pthread_exit(NULL); 830 + } 831 + 832 + static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream *pkt_stream) 833 + { 834 + u32 idx = 0, i; 835 + int ret; 836 + 837 + ret = xsk_ring_prod__reserve(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS, &idx); 838 + if (ret != XSK_RING_PROD__DEFAULT_NUM_DESCS) 839 + exit_with_error(ENOSPC); 840 + for (i = 0; i < XSK_RING_PROD__DEFAULT_NUM_DESCS; i++) { 841 + u64 addr; 842 + 843 + if (pkt_stream->use_addr_for_fill) { 844 + struct pkt *pkt = pkt_stream_get_pkt(pkt_stream, i); 845 + 846 + if (!pkt) 847 + break; 848 + addr = pkt->addr; 849 + } else { 850 + addr = (i % umem->num_frames) * umem->frame_size + DEFAULT_OFFSET; 851 + } 852 + 853 + *xsk_ring_prod__fill_addr(&umem->fq, idx++) = addr; 854 + } 855 + xsk_ring_prod__submit(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS); 969 856 } 970 857 971 858 static void *worker_testapp_validate_rx(void *arg) 972 859 { 973 - struct ifobject *ifobject = (struct ifobject *)arg; 974 - struct pollfd fds[MAX_SOCKS] = { }; 975 - void *bufs = NULL; 860 + struct test_spec *test = (struct test_spec *)arg; 861 + struct ifobject *ifobject = test->ifobj_rx; 862 + struct pollfd fds = { }; 976 863 977 - if (!second_step) 978 - thread_common_ops(ifobject, bufs); 864 + if (test->current_step == 1) 865 + thread_common_ops(test, ifobject); 979 866 980 - if (stat_test_type != STAT_TEST_RX_FILL_EMPTY) 981 - xsk_populate_fill_ring(ifobject->umem); 867 + xsk_populate_fill_ring(ifobject->umem, ifobject->pkt_stream); 982 868 983 - fds[0].fd = xsk_socket__fd(ifobject->xsk->xsk); 984 - fds[0].events = POLLIN; 869 + fds.fd = xsk_socket__fd(ifobject->xsk->xsk); 870 + fds.events = POLLIN; 985 871 986 872 pthread_barrier_wait(&barr); 987 873 ··· 1015 849 while (!rx_stats_are_valid(ifobject)) 1016 850 continue; 1017 851 else 1018 - receive_pkts(ifobject->pkt_stream, ifobject->xsk, fds); 852 + receive_pkts(ifobject->pkt_stream, ifobject->xsk, &fds); 1019 853 1020 - if (test_type == TEST_TYPE_TEARDOWN) 1021 - print_verbose("Destroying socket\n"); 1022 - 1023 - testapp_cleanup_xsk_res(ifobject); 854 + if (test->total_steps == test->current_step) 855 + testapp_cleanup_xsk_res(ifobject); 1024 856 pthread_exit(NULL); 1025 857 } 1026 858 1027 - static void testapp_validate(void) 859 + static void testapp_validate_traffic(struct test_spec *test) 1028 860 { 1029 - bool bidi = test_type == TEST_TYPE_BIDI; 1030 - bool bpf = test_type == TEST_TYPE_BPF_RES; 1031 - struct pkt_stream *pkt_stream; 861 + struct ifobject *ifobj_tx = test->ifobj_tx; 862 + struct ifobject *ifobj_rx = test->ifobj_rx; 863 + pthread_t t0, t1; 1032 864 1033 865 if (pthread_barrier_init(&barr, NULL, 2)) 1034 866 exit_with_error(errno); 1035 867 1036 - if (stat_test_type == STAT_TEST_TX_INVALID) 1037 - pkt_stream = pkt_stream_generate(DEFAULT_PKT_CNT, XSK_UMEM__INVALID_FRAME_SIZE); 1038 - else 1039 - pkt_stream = pkt_stream_generate(DEFAULT_PKT_CNT, PKT_SIZE); 1040 - ifdict_tx->pkt_stream = pkt_stream; 1041 - ifdict_rx->pkt_stream = pkt_stream; 868 + test->current_step++; 1042 869 1043 870 /*Spawn RX thread */ 1044 - pthread_create(&t0, NULL, ifdict_rx->func_ptr, ifdict_rx); 871 + pthread_create(&t0, NULL, ifobj_rx->func_ptr, test); 1045 872 1046 873 pthread_barrier_wait(&barr); 1047 874 if (pthread_barrier_destroy(&barr)) 1048 875 exit_with_error(errno); 1049 876 1050 877 /*Spawn TX thread */ 1051 - pthread_create(&t1, NULL, ifdict_tx->func_ptr, ifdict_tx); 878 + pthread_create(&t1, NULL, ifobj_tx->func_ptr, test); 1052 879 1053 880 pthread_join(t1, NULL); 1054 881 pthread_join(t0, NULL); 1055 - 1056 - if (!(test_type == TEST_TYPE_TEARDOWN) && !bidi && !bpf && !(test_type == TEST_TYPE_STATS)) 1057 - print_ksft_result(); 1058 882 } 1059 883 1060 - static void testapp_teardown(void) 884 + static void testapp_teardown(struct test_spec *test) 1061 885 { 1062 886 int i; 1063 887 888 + test_spec_set_name(test, "TEARDOWN"); 1064 889 for (i = 0; i < MAX_TEARDOWN_ITER; i++) { 1065 - print_verbose("Creating socket\n"); 1066 - testapp_validate(); 890 + testapp_validate_traffic(test); 891 + test_spec_reset(test); 1067 892 } 1068 - 1069 - print_ksft_result(); 1070 893 } 1071 894 1072 - static void swap_vectors(struct ifobject *ifobj1, struct ifobject *ifobj2) 895 + static void swap_directions(struct ifobject **ifobj1, struct ifobject **ifobj2) 1073 896 { 1074 - void *(*tmp_func_ptr)(void *) = ifobj1->func_ptr; 1075 - enum fvector tmp_vector = ifobj1->fv.vector; 897 + thread_func_t tmp_func_ptr = (*ifobj1)->func_ptr; 898 + struct ifobject *tmp_ifobj = (*ifobj1); 1076 899 1077 - ifobj1->func_ptr = ifobj2->func_ptr; 1078 - ifobj1->fv.vector = ifobj2->fv.vector; 900 + (*ifobj1)->func_ptr = (*ifobj2)->func_ptr; 901 + (*ifobj2)->func_ptr = tmp_func_ptr; 1079 902 1080 - ifobj2->func_ptr = tmp_func_ptr; 1081 - ifobj2->fv.vector = tmp_vector; 1082 - 1083 - ifdict_tx = ifobj1; 1084 - ifdict_rx = ifobj2; 903 + *ifobj1 = *ifobj2; 904 + *ifobj2 = tmp_ifobj; 1085 905 } 1086 906 1087 - static void testapp_bidi(void) 907 + static void testapp_bidi(struct test_spec *test) 1088 908 { 1089 - for (int i = 0; i < MAX_BIDI_ITER; i++) { 1090 - print_verbose("Creating socket\n"); 1091 - testapp_validate(); 1092 - if (!second_step) { 1093 - print_verbose("Switching Tx/Rx vectors\n"); 1094 - swap_vectors(ifdict[1], ifdict[0]); 1095 - } 1096 - second_step = true; 1097 - } 909 + test_spec_set_name(test, "BIDIRECTIONAL"); 910 + test->ifobj_tx->rx_on = true; 911 + test->ifobj_rx->tx_on = true; 912 + test->total_steps = 2; 913 + testapp_validate_traffic(test); 1098 914 1099 - swap_vectors(ifdict[0], ifdict[1]); 915 + print_verbose("Switching Tx/Rx vectors\n"); 916 + swap_directions(&test->ifobj_rx, &test->ifobj_tx); 917 + testapp_validate_traffic(test); 1100 918 1101 - print_ksft_result(); 919 + swap_directions(&test->ifobj_rx, &test->ifobj_tx); 1102 920 } 1103 921 1104 - static void swap_xsk_res(void) 922 + static void swap_xsk_resources(struct ifobject *ifobj_tx, struct ifobject *ifobj_rx) 1105 923 { 1106 - xsk_socket__delete(ifdict_tx->xsk->xsk); 1107 - xsk_umem__delete(ifdict_tx->umem->umem); 1108 - xsk_socket__delete(ifdict_rx->xsk->xsk); 1109 - xsk_umem__delete(ifdict_rx->umem->umem); 1110 - ifdict_tx->umem = ifdict_tx->umem_arr[1]; 1111 - ifdict_tx->xsk = ifdict_tx->xsk_arr[1]; 1112 - ifdict_rx->umem = ifdict_rx->umem_arr[1]; 1113 - ifdict_rx->xsk = ifdict_rx->xsk_arr[1]; 924 + xsk_socket__delete(ifobj_tx->xsk->xsk); 925 + xsk_umem__delete(ifobj_tx->umem->umem); 926 + xsk_socket__delete(ifobj_rx->xsk->xsk); 927 + xsk_umem__delete(ifobj_rx->umem->umem); 928 + ifobj_tx->umem = &ifobj_tx->umem_arr[1]; 929 + ifobj_tx->xsk = &ifobj_tx->xsk_arr[1]; 930 + ifobj_rx->umem = &ifobj_rx->umem_arr[1]; 931 + ifobj_rx->xsk = &ifobj_rx->xsk_arr[1]; 1114 932 } 1115 933 1116 - static void testapp_bpf_res(void) 934 + static void testapp_bpf_res(struct test_spec *test) 935 + { 936 + test_spec_set_name(test, "BPF_RES"); 937 + test->total_steps = 2; 938 + test->nb_sockets = 2; 939 + testapp_validate_traffic(test); 940 + 941 + swap_xsk_resources(test->ifobj_tx, test->ifobj_rx); 942 + testapp_validate_traffic(test); 943 + } 944 + 945 + static void testapp_stats(struct test_spec *test) 1117 946 { 1118 947 int i; 1119 948 1120 - for (i = 0; i < MAX_BPF_ITER; i++) { 1121 - print_verbose("Creating socket\n"); 1122 - testapp_validate(); 1123 - if (!second_step) 1124 - swap_xsk_res(); 1125 - second_step = true; 1126 - } 1127 - 1128 - print_ksft_result(); 1129 - } 1130 - 1131 - static void testapp_stats(void) 1132 - { 1133 - for (int i = 0; i < STAT_TEST_TYPE_MAX; i++) { 949 + for (i = 0; i < STAT_TEST_TYPE_MAX; i++) { 950 + test_spec_reset(test); 1134 951 stat_test_type = i; 1135 - 1136 - /* reset defaults */ 1137 - rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS; 1138 - frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM; 1139 952 1140 953 switch (stat_test_type) { 1141 954 case STAT_TEST_RX_DROPPED: 1142 - frame_headroom = XSK_UMEM__DEFAULT_FRAME_SIZE - 1143 - XDP_PACKET_HEADROOM - 1; 955 + test_spec_set_name(test, "STAT_RX_DROPPED"); 956 + test->ifobj_rx->umem->frame_headroom = test->ifobj_rx->umem->frame_size - 957 + XDP_PACKET_HEADROOM - 1; 958 + testapp_validate_traffic(test); 1144 959 break; 1145 960 case STAT_TEST_RX_FULL: 1146 - rxqsize = RX_FULL_RXQSIZE; 961 + test_spec_set_name(test, "STAT_RX_FULL"); 962 + test->ifobj_rx->xsk->rxqsize = RX_FULL_RXQSIZE; 963 + testapp_validate_traffic(test); 1147 964 break; 1148 965 case STAT_TEST_TX_INVALID: 1149 - continue; 966 + test_spec_set_name(test, "STAT_TX_INVALID"); 967 + pkt_stream_replace(test, DEFAULT_PKT_CNT, XSK_UMEM__INVALID_FRAME_SIZE); 968 + testapp_validate_traffic(test); 969 + 970 + pkt_stream_restore_default(test); 971 + break; 972 + case STAT_TEST_RX_FILL_EMPTY: 973 + test_spec_set_name(test, "STAT_RX_FILL_EMPTY"); 974 + test->ifobj_rx->pkt_stream = pkt_stream_generate(test->ifobj_rx->umem, 0, 975 + MIN_PKT_SIZE); 976 + if (!test->ifobj_rx->pkt_stream) 977 + exit_with_error(ENOMEM); 978 + test->ifobj_rx->pkt_stream->use_addr_for_fill = true; 979 + testapp_validate_traffic(test); 980 + 981 + pkt_stream_restore_default(test); 982 + break; 1150 983 default: 1151 984 break; 1152 985 } 1153 - testapp_validate(); 1154 986 } 1155 987 1156 - print_ksft_result(); 988 + /* To only see the whole stat set being completed unless an individual test fails. */ 989 + test_spec_set_name(test, "STATS"); 1157 990 } 1158 991 1159 - static void init_iface(struct ifobject *ifobj, const char *dst_mac, 1160 - const char *src_mac, const char *dst_ip, 1161 - const char *src_ip, const u16 dst_port, 1162 - const u16 src_port, enum fvector vector) 992 + /* Simple test */ 993 + static bool hugepages_present(struct ifobject *ifobject) 994 + { 995 + const size_t mmap_sz = 2 * ifobject->umem->num_frames * ifobject->umem->frame_size; 996 + void *bufs; 997 + 998 + bufs = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE, 999 + MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_HUGETLB, -1, 0); 1000 + if (bufs == MAP_FAILED) 1001 + return false; 1002 + 1003 + munmap(bufs, mmap_sz); 1004 + return true; 1005 + } 1006 + 1007 + static bool testapp_unaligned(struct test_spec *test) 1008 + { 1009 + if (!hugepages_present(test->ifobj_tx)) { 1010 + ksft_test_result_skip("No 2M huge pages present.\n"); 1011 + return false; 1012 + } 1013 + 1014 + test_spec_set_name(test, "UNALIGNED_MODE"); 1015 + test->ifobj_tx->umem->unaligned_mode = true; 1016 + test->ifobj_rx->umem->unaligned_mode = true; 1017 + /* Let half of the packets straddle a buffer boundrary */ 1018 + pkt_stream_replace_half(test, PKT_SIZE, test->ifobj_tx->umem->frame_size - 32); 1019 + test->ifobj_rx->pkt_stream->use_addr_for_fill = true; 1020 + testapp_validate_traffic(test); 1021 + 1022 + pkt_stream_restore_default(test); 1023 + return true; 1024 + } 1025 + 1026 + static void testapp_invalid_desc(struct test_spec *test) 1027 + { 1028 + struct pkt pkts[] = { 1029 + /* Zero packet length at address zero allowed */ 1030 + {0, 0, 0, true}, 1031 + /* Zero packet length allowed */ 1032 + {0x1000, 0, 0, true}, 1033 + /* Straddling the start of umem */ 1034 + {-2, PKT_SIZE, 0, false}, 1035 + /* Packet too large */ 1036 + {0x2000, XSK_UMEM__INVALID_FRAME_SIZE, 0, false}, 1037 + /* After umem ends */ 1038 + {UMEM_SIZE, PKT_SIZE, 0, false}, 1039 + /* Straddle the end of umem */ 1040 + {UMEM_SIZE - PKT_SIZE / 2, PKT_SIZE, 0, false}, 1041 + /* Straddle a page boundrary */ 1042 + {0x3000 - PKT_SIZE / 2, PKT_SIZE, 0, false}, 1043 + /* Straddle a 2K boundrary */ 1044 + {0x3800 - PKT_SIZE / 2, PKT_SIZE, 0, true}, 1045 + /* Valid packet for synch so that something is received */ 1046 + {0x4000, PKT_SIZE, 0, true}}; 1047 + 1048 + if (test->ifobj_tx->umem->unaligned_mode) { 1049 + /* Crossing a page boundrary allowed */ 1050 + pkts[6].valid = true; 1051 + } 1052 + if (test->ifobj_tx->umem->frame_size == XSK_UMEM__DEFAULT_FRAME_SIZE / 2) { 1053 + /* Crossing a 2K frame size boundrary not allowed */ 1054 + pkts[7].valid = false; 1055 + } 1056 + 1057 + pkt_stream_generate_custom(test, pkts, ARRAY_SIZE(pkts)); 1058 + testapp_validate_traffic(test); 1059 + pkt_stream_restore_default(test); 1060 + } 1061 + 1062 + static void init_iface(struct ifobject *ifobj, const char *dst_mac, const char *src_mac, 1063 + const char *dst_ip, const char *src_ip, const u16 dst_port, 1064 + const u16 src_port, thread_func_t func_ptr) 1163 1065 { 1164 1066 struct in_addr ip; 1165 1067 ··· 1243 1009 ifobj->dst_port = dst_port; 1244 1010 ifobj->src_port = src_port; 1245 1011 1246 - if (vector == tx) { 1247 - ifobj->fv.vector = tx; 1248 - ifobj->func_ptr = worker_testapp_validate_tx; 1249 - ifdict_tx = ifobj; 1250 - } else { 1251 - ifobj->fv.vector = rx; 1252 - ifobj->func_ptr = worker_testapp_validate_rx; 1253 - ifdict_rx = ifobj; 1254 - } 1012 + ifobj->func_ptr = func_ptr; 1255 1013 } 1256 1014 1257 - static void run_pkt_test(int mode, int type) 1015 + static void run_pkt_test(struct test_spec *test, enum test_mode mode, enum test_type type) 1258 1016 { 1259 1017 test_type = type; 1260 1018 1261 1019 /* reset defaults after potential previous test */ 1262 - xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST; 1263 - second_step = 0; 1264 1020 stat_test_type = -1; 1265 - rxqsize = XSK_RING_CONS__DEFAULT_NUM_DESCS; 1266 - frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM; 1267 - 1268 - configured_mode = mode; 1269 - 1270 - switch (mode) { 1271 - case (TEST_MODE_SKB): 1272 - xdp_flags |= XDP_FLAGS_SKB_MODE; 1273 - break; 1274 - case (TEST_MODE_DRV): 1275 - xdp_flags |= XDP_FLAGS_DRV_MODE; 1276 - break; 1277 - default: 1278 - break; 1279 - } 1280 1021 1281 1022 switch (test_type) { 1282 1023 case TEST_TYPE_STATS: 1283 - testapp_stats(); 1024 + testapp_stats(test); 1284 1025 break; 1285 1026 case TEST_TYPE_TEARDOWN: 1286 - testapp_teardown(); 1027 + testapp_teardown(test); 1287 1028 break; 1288 1029 case TEST_TYPE_BIDI: 1289 - testapp_bidi(); 1030 + testapp_bidi(test); 1290 1031 break; 1291 1032 case TEST_TYPE_BPF_RES: 1292 - testapp_bpf_res(); 1033 + testapp_bpf_res(test); 1034 + break; 1035 + case TEST_TYPE_RUN_TO_COMPLETION: 1036 + test_spec_set_name(test, "RUN_TO_COMPLETION"); 1037 + testapp_validate_traffic(test); 1038 + break; 1039 + case TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME: 1040 + test_spec_set_name(test, "RUN_TO_COMPLETION_2K_FRAME_SIZE"); 1041 + test->ifobj_tx->umem->frame_size = 2048; 1042 + test->ifobj_rx->umem->frame_size = 2048; 1043 + pkt_stream_replace(test, DEFAULT_PKT_CNT, MIN_PKT_SIZE); 1044 + testapp_validate_traffic(test); 1045 + 1046 + pkt_stream_restore_default(test); 1047 + break; 1048 + case TEST_TYPE_POLL: 1049 + test->ifobj_tx->use_poll = true; 1050 + test->ifobj_rx->use_poll = true; 1051 + test_spec_set_name(test, "POLL"); 1052 + testapp_validate_traffic(test); 1053 + break; 1054 + case TEST_TYPE_ALIGNED_INV_DESC: 1055 + test_spec_set_name(test, "ALIGNED_INV_DESC"); 1056 + testapp_invalid_desc(test); 1057 + break; 1058 + case TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME: 1059 + test_spec_set_name(test, "ALIGNED_INV_DESC_2K_FRAME_SIZE"); 1060 + test->ifobj_tx->umem->frame_size = 2048; 1061 + test->ifobj_rx->umem->frame_size = 2048; 1062 + testapp_invalid_desc(test); 1063 + break; 1064 + case TEST_TYPE_UNALIGNED_INV_DESC: 1065 + test_spec_set_name(test, "UNALIGNED_INV_DESC"); 1066 + test->ifobj_tx->umem->unaligned_mode = true; 1067 + test->ifobj_rx->umem->unaligned_mode = true; 1068 + testapp_invalid_desc(test); 1069 + break; 1070 + case TEST_TYPE_UNALIGNED: 1071 + if (!testapp_unaligned(test)) 1072 + return; 1293 1073 break; 1294 1074 default: 1295 - testapp_validate(); 1296 1075 break; 1297 1076 } 1077 + 1078 + print_ksft_result(test); 1298 1079 } 1299 1080 1300 1081 static struct ifobject *ifobject_create(void) ··· 1320 1071 if (!ifobj) 1321 1072 return NULL; 1322 1073 1323 - ifobj->xsk_arr = calloc(2, sizeof(struct xsk_socket_info *)); 1074 + ifobj->xsk_arr = calloc(MAX_SOCKETS, sizeof(*ifobj->xsk_arr)); 1324 1075 if (!ifobj->xsk_arr) 1325 1076 goto out_xsk_arr; 1326 1077 1327 - ifobj->umem_arr = calloc(2, sizeof(struct xsk_umem_info *)); 1078 + ifobj->umem_arr = calloc(MAX_SOCKETS, sizeof(*ifobj->umem_arr)); 1328 1079 if (!ifobj->umem_arr) 1329 1080 goto out_umem_arr; 1330 1081 ··· 1347 1098 int main(int argc, char **argv) 1348 1099 { 1349 1100 struct rlimit _rlim = { RLIM_INFINITY, RLIM_INFINITY }; 1350 - int i, j; 1101 + struct pkt_stream *pkt_stream_default; 1102 + struct ifobject *ifobj_tx, *ifobj_rx; 1103 + struct test_spec test; 1104 + u32 i, j; 1351 1105 1352 1106 if (setrlimit(RLIMIT_MEMLOCK, &_rlim)) 1353 1107 exit_with_error(errno); 1354 1108 1355 - for (i = 0; i < MAX_INTERFACES; i++) { 1356 - ifdict[i] = ifobject_create(); 1357 - if (!ifdict[i]) 1358 - exit_with_error(ENOMEM); 1359 - } 1109 + ifobj_tx = ifobject_create(); 1110 + if (!ifobj_tx) 1111 + exit_with_error(ENOMEM); 1112 + ifobj_rx = ifobject_create(); 1113 + if (!ifobj_rx) 1114 + exit_with_error(ENOMEM); 1360 1115 1361 1116 setlocale(LC_ALL, ""); 1362 1117 1363 - parse_command_line(argc, argv); 1118 + parse_command_line(ifobj_tx, ifobj_rx, argc, argv); 1364 1119 1365 - init_iface(ifdict[tx], MAC1, MAC2, IP1, IP2, UDP_PORT1, UDP_PORT2, tx); 1366 - init_iface(ifdict[rx], MAC2, MAC1, IP2, IP1, UDP_PORT2, UDP_PORT1, rx); 1120 + if (!validate_interface(ifobj_tx) || !validate_interface(ifobj_rx)) { 1121 + usage(basename(argv[0])); 1122 + ksft_exit_xfail(); 1123 + } 1124 + 1125 + init_iface(ifobj_tx, MAC1, MAC2, IP1, IP2, UDP_PORT1, UDP_PORT2, 1126 + worker_testapp_validate_tx); 1127 + init_iface(ifobj_rx, MAC2, MAC1, IP2, IP1, UDP_PORT2, UDP_PORT1, 1128 + worker_testapp_validate_rx); 1129 + 1130 + test_spec_init(&test, ifobj_tx, ifobj_rx, 0); 1131 + pkt_stream_default = pkt_stream_generate(ifobj_tx->umem, DEFAULT_PKT_CNT, PKT_SIZE); 1132 + if (!pkt_stream_default) 1133 + exit_with_error(ENOMEM); 1134 + test.pkt_stream_default = pkt_stream_default; 1367 1135 1368 1136 ksft_set_plan(TEST_MODE_MAX * TEST_TYPE_MAX); 1369 1137 1370 1138 for (i = 0; i < TEST_MODE_MAX; i++) 1371 1139 for (j = 0; j < TEST_TYPE_MAX; j++) { 1372 - run_pkt_test(i, j); 1140 + test_spec_init(&test, ifobj_tx, ifobj_rx, i); 1141 + run_pkt_test(&test, i, j); 1373 1142 usleep(USLEEP_MAX); 1374 1143 } 1375 1144 1376 - for (i = 0; i < MAX_INTERFACES; i++) 1377 - ifobject_delete(ifdict[i]); 1145 + pkt_stream_delete(pkt_stream_default); 1146 + ifobject_delete(ifobj_tx); 1147 + ifobject_delete(ifobj_rx); 1378 1148 1379 1149 ksft_exit_pass(); 1380 1150 return 0;
+38 -28
tools/testing/selftests/bpf/xdpxceiver.h
··· 20 20 #define MAX_INTERFACES 2 21 21 #define MAX_INTERFACE_NAME_CHARS 7 22 22 #define MAX_INTERFACES_NAMESPACE_CHARS 10 23 - #define MAX_SOCKS 1 23 + #define MAX_SOCKETS 2 24 + #define MAX_TEST_NAME_SIZE 32 24 25 #define MAX_TEARDOWN_ITER 10 25 - #define MAX_BIDI_ITER 2 26 - #define MAX_BPF_ITER 2 27 26 #define PKT_HDR_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \ 28 27 sizeof(struct udphdr)) 29 28 #define MIN_PKT_SIZE 64 ··· 38 39 #define BATCH_SIZE 8 39 40 #define POLL_TMOUT 1000 40 41 #define DEFAULT_PKT_CNT (4 * 1024) 42 + #define DEFAULT_UMEM_BUFFERS (DEFAULT_PKT_CNT / 4) 43 + #define UMEM_SIZE (DEFAULT_UMEM_BUFFERS * XSK_UMEM__DEFAULT_FRAME_SIZE) 41 44 #define RX_FULL_RXQSIZE 32 45 + #define DEFAULT_OFFSET 256 42 46 #define XSK_UMEM__INVALID_FRAME_SIZE (XSK_UMEM__DEFAULT_FRAME_SIZE + 1) 43 47 44 48 #define print_verbose(x...) do { if (opt_verbose) ksft_print_msg(x); } while (0) ··· 53 51 }; 54 52 55 53 enum test_type { 56 - TEST_TYPE_NOPOLL, 54 + TEST_TYPE_RUN_TO_COMPLETION, 55 + TEST_TYPE_RUN_TO_COMPLETION_2K_FRAME, 57 56 TEST_TYPE_POLL, 57 + TEST_TYPE_UNALIGNED, 58 + TEST_TYPE_ALIGNED_INV_DESC, 59 + TEST_TYPE_ALIGNED_INV_DESC_2K_FRAME, 60 + TEST_TYPE_UNALIGNED_INV_DESC, 58 61 TEST_TYPE_TEARDOWN, 59 62 TEST_TYPE_BIDI, 60 63 TEST_TYPE_STATS, ··· 75 68 STAT_TEST_TYPE_MAX 76 69 }; 77 70 78 - static int configured_mode; 79 71 static bool opt_pkt_dump; 80 - static u32 num_frames = DEFAULT_PKT_CNT / 4; 81 - static bool second_step; 82 72 static int test_type; 83 73 84 74 static bool opt_verbose; 85 - 86 - static u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST; 87 - static u32 xdp_bind_flags = XDP_USE_NEED_WAKEUP | XDP_COPY; 88 75 static int stat_test_type; 89 - static u32 rxqsize; 90 - static u32 frame_headroom; 91 76 92 77 struct xsk_umem_info { 93 78 struct xsk_ring_prod fq; 94 79 struct xsk_ring_cons cq; 95 80 struct xsk_umem *umem; 81 + u32 num_frames; 82 + u32 frame_headroom; 96 83 void *buffer; 84 + u32 frame_size; 85 + bool unaligned_mode; 97 86 }; 98 87 99 88 struct xsk_socket_info { ··· 98 95 struct xsk_umem_info *umem; 99 96 struct xsk_socket *xsk; 100 97 u32 outstanding_tx; 101 - }; 102 - 103 - struct flow_vector { 104 - enum fvector { 105 - tx, 106 - rx, 107 - } vector; 98 + u32 rxqsize; 108 99 }; 109 100 110 101 struct pkt { 111 102 u64 addr; 112 103 u32 len; 113 104 u32 payload; 105 + bool valid; 114 106 }; 115 107 116 108 struct pkt_stream { 117 109 u32 nb_pkts; 110 + u32 rx_pkt_nb; 118 111 struct pkt *pkts; 112 + bool use_addr_for_fill; 119 113 }; 114 + 115 + typedef void *(*thread_func_t)(void *arg); 120 116 121 117 struct ifobject { 122 118 char ifname[MAX_INTERFACE_NAME_CHARS]; 123 119 char nsname[MAX_INTERFACES_NAMESPACE_CHARS]; 124 120 struct xsk_socket_info *xsk; 125 - struct xsk_socket_info **xsk_arr; 126 - struct xsk_umem_info **umem_arr; 121 + struct xsk_socket_info *xsk_arr; 127 122 struct xsk_umem_info *umem; 128 - void *(*func_ptr)(void *arg); 129 - struct flow_vector fv; 123 + struct xsk_umem_info *umem_arr; 124 + thread_func_t func_ptr; 130 125 struct pkt_stream *pkt_stream; 131 126 int ns_fd; 132 127 u32 dst_ip; 133 128 u32 src_ip; 129 + u32 xdp_flags; 130 + u32 bind_flags; 134 131 u16 src_port; 135 132 u16 dst_port; 133 + bool tx_on; 134 + bool rx_on; 135 + bool use_poll; 136 136 u8 dst_mac[ETH_ALEN]; 137 137 u8 src_mac[ETH_ALEN]; 138 138 }; 139 139 140 - static struct ifobject *ifdict[MAX_INTERFACES]; 141 - static struct ifobject *ifdict_rx; 142 - static struct ifobject *ifdict_tx; 140 + struct test_spec { 141 + struct ifobject *ifobj_tx; 142 + struct ifobject *ifobj_rx; 143 + struct pkt_stream *pkt_stream_default; 144 + u16 total_steps; 145 + u16 current_step; 146 + u16 nb_sockets; 147 + char name[MAX_TEST_NAME_SIZE]; 148 + }; 143 149 144 - /*threads*/ 145 150 pthread_barrier_t barr; 146 - pthread_t t0, t1; 147 151 148 152 #endif /* XDPXCEIVER_H */