Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: Update tests for new ct zone opts for nf_conntrack kfuncs

Add test for allocating and looking up ct entry in a
non-default ct zone with kfuncs bpf_{xdp,skb}_ct_alloc
and bpf_{xdp,skb}_ct_lookup.

Add negative tests for looking up ct entry in a different
ct zone to where it was allocated and with a different
direction.

Update reserved test for old struct definition to test for
ct_zone_id being set when opts size isn't NF_BPF_CT_OPTS_SZ (16).

Signed-off-by: Brad Cowie <brad@faucet.nz>
Link: https://lore.kernel.org/r/20240522050712.732558-2-brad@faucet.nz
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>

authored by

Brad Cowie and committed by
Martin KaFai Lau
a87f34e7 ece4b296

+116
+1
tools/testing/selftests/bpf/config
··· 80 80 CONFIG_NETKIT=y 81 81 CONFIG_NF_CONNTRACK=y 82 82 CONFIG_NF_CONNTRACK_MARK=y 83 + CONFIG_NF_CONNTRACK_ZONES=y 83 84 CONFIG_NF_DEFRAG_IPV4=y 84 85 CONFIG_NF_DEFRAG_IPV6=y 85 86 CONFIG_NF_NAT=y
+7
tools/testing/selftests/bpf/prog_tests/bpf_nf.c
··· 104 104 105 105 ASSERT_EQ(skel->bss->test_einval_bpf_tuple, -EINVAL, "Test EINVAL for NULL bpf_tuple"); 106 106 ASSERT_EQ(skel->bss->test_einval_reserved, -EINVAL, "Test EINVAL for reserved not set to 0"); 107 + ASSERT_EQ(skel->bss->test_einval_reserved_new, -EINVAL, "Test EINVAL for reserved in new struct not set to 0"); 107 108 ASSERT_EQ(skel->bss->test_einval_netns_id, -EINVAL, "Test EINVAL for netns_id < -1"); 108 109 ASSERT_EQ(skel->bss->test_einval_len_opts, -EINVAL, "Test EINVAL for len__opts != NF_BPF_CT_OPTS_SZ"); 109 110 ASSERT_EQ(skel->bss->test_eproto_l4proto, -EPROTO, "Test EPROTO for l4proto != TCP or UDP"); ··· 123 122 ASSERT_EQ(skel->bss->test_exist_lookup_mark, 43, "Test existing connection lookup ctmark"); 124 123 ASSERT_EQ(skel->data->test_snat_addr, 0, "Test for source natting"); 125 124 ASSERT_EQ(skel->data->test_dnat_addr, 0, "Test for destination natting"); 125 + ASSERT_EQ(skel->data->test_ct_zone_id_alloc_entry, 0, "Test for alloc new entry in specified ct zone"); 126 + ASSERT_EQ(skel->data->test_ct_zone_id_insert_entry, 0, "Test for insert new entry in specified ct zone"); 127 + ASSERT_EQ(skel->data->test_ct_zone_id_succ_lookup, 0, "Test for successful lookup in specified ct_zone"); 128 + ASSERT_EQ(skel->bss->test_ct_zone_dir_enoent_lookup, -ENOENT, "Test ENOENT for lookup with wrong ct zone dir"); 129 + ASSERT_EQ(skel->bss->test_ct_zone_id_enoent_lookup, -ENOENT, "Test ENOENT for lookup in wrong ct zone"); 130 + 126 131 end: 127 132 if (client_fd != -1) 128 133 close(client_fd);
+108
tools/testing/selftests/bpf/progs/test_bpf_nf.c
··· 9 9 #define EINVAL 22 10 10 #define ENOENT 2 11 11 12 + #define NF_CT_ZONE_DIR_ORIG (1 << IP_CT_DIR_ORIGINAL) 13 + #define NF_CT_ZONE_DIR_REPL (1 << IP_CT_DIR_REPLY) 14 + 12 15 extern unsigned long CONFIG_HZ __kconfig; 13 16 14 17 int test_einval_bpf_tuple = 0; 15 18 int test_einval_reserved = 0; 19 + int test_einval_reserved_new = 0; 16 20 int test_einval_netns_id = 0; 17 21 int test_einval_len_opts = 0; 18 22 int test_eproto_l4proto = 0; ··· 26 22 int test_alloc_entry = -EINVAL; 27 23 int test_insert_entry = -EAFNOSUPPORT; 28 24 int test_succ_lookup = -ENOENT; 25 + int test_ct_zone_id_alloc_entry = -EINVAL; 26 + int test_ct_zone_id_insert_entry = -EAFNOSUPPORT; 27 + int test_ct_zone_id_succ_lookup = -ENOENT; 28 + int test_ct_zone_dir_enoent_lookup = 0; 29 + int test_ct_zone_id_enoent_lookup = 0; 29 30 u32 test_delta_timeout = 0; 30 31 u32 test_status = 0; 31 32 u32 test_insert_lookup_mark = 0; ··· 54 45 s32 netns_id; 55 46 s32 error; 56 47 u8 l4proto; 48 + u8 dir; 49 + u8 reserved[2]; 50 + }; 51 + 52 + struct bpf_ct_opts___new { 53 + s32 netns_id; 54 + s32 error; 55 + u8 l4proto; 56 + u8 dir; 57 + u16 ct_zone_id; 58 + u8 ct_zone_dir; 57 59 u8 reserved[3]; 58 60 } __attribute__((preserve_access_index)); 59 61 ··· 240 220 } 241 221 } 242 222 223 + static __always_inline void 224 + nf_ct_opts_new_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32, 225 + struct bpf_ct_opts___new *, u32), 226 + struct nf_conn *(*alloc_fn)(void *, struct bpf_sock_tuple *, u32, 227 + struct bpf_ct_opts___new *, u32), 228 + void *ctx) 229 + { 230 + struct bpf_ct_opts___new opts_def = { .l4proto = IPPROTO_TCP, .netns_id = -1 }; 231 + struct bpf_sock_tuple bpf_tuple; 232 + struct nf_conn *ct; 233 + 234 + __builtin_memset(&bpf_tuple, 0, sizeof(bpf_tuple.ipv4)); 235 + 236 + opts_def.reserved[0] = 1; 237 + ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, 238 + sizeof(opts_def)); 239 + opts_def.reserved[0] = 0; 240 + if (ct) 241 + bpf_ct_release(ct); 242 + else 243 + test_einval_reserved_new = opts_def.error; 244 + 245 + bpf_tuple.ipv4.saddr = bpf_get_prandom_u32(); /* src IP */ 246 + bpf_tuple.ipv4.daddr = bpf_get_prandom_u32(); /* dst IP */ 247 + bpf_tuple.ipv4.sport = bpf_get_prandom_u32(); /* src port */ 248 + bpf_tuple.ipv4.dport = bpf_get_prandom_u32(); /* dst port */ 249 + 250 + /* use non-default ct zone */ 251 + opts_def.ct_zone_id = 10; 252 + opts_def.ct_zone_dir = NF_CT_ZONE_DIR_ORIG; 253 + ct = alloc_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, 254 + sizeof(opts_def)); 255 + if (ct) { 256 + __u16 sport = bpf_get_prandom_u32(); 257 + __u16 dport = bpf_get_prandom_u32(); 258 + union nf_inet_addr saddr = {}; 259 + union nf_inet_addr daddr = {}; 260 + struct nf_conn *ct_ins; 261 + 262 + bpf_ct_set_timeout(ct, 10000); 263 + 264 + /* snat */ 265 + saddr.ip = bpf_get_prandom_u32(); 266 + bpf_ct_set_nat_info(ct, &saddr, sport, NF_NAT_MANIP_SRC___local); 267 + /* dnat */ 268 + daddr.ip = bpf_get_prandom_u32(); 269 + bpf_ct_set_nat_info(ct, &daddr, dport, NF_NAT_MANIP_DST___local); 270 + 271 + ct_ins = bpf_ct_insert_entry(ct); 272 + if (ct_ins) { 273 + struct nf_conn *ct_lk; 274 + 275 + /* entry should exist in same ct zone we inserted it */ 276 + ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), 277 + &opts_def, sizeof(opts_def)); 278 + if (ct_lk) { 279 + bpf_ct_release(ct_lk); 280 + test_ct_zone_id_succ_lookup = 0; 281 + } 282 + 283 + /* entry should not exist with wrong direction */ 284 + opts_def.ct_zone_dir = NF_CT_ZONE_DIR_REPL; 285 + ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), 286 + &opts_def, sizeof(opts_def)); 287 + opts_def.ct_zone_dir = NF_CT_ZONE_DIR_ORIG; 288 + if (ct_lk) 289 + bpf_ct_release(ct_lk); 290 + else 291 + test_ct_zone_dir_enoent_lookup = opts_def.error; 292 + 293 + /* entry should not exist in default ct zone */ 294 + opts_def.ct_zone_id = 0; 295 + ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), 296 + &opts_def, sizeof(opts_def)); 297 + if (ct_lk) 298 + bpf_ct_release(ct_lk); 299 + else 300 + test_ct_zone_id_enoent_lookup = opts_def.error; 301 + 302 + bpf_ct_release(ct_ins); 303 + test_ct_zone_id_insert_entry = 0; 304 + } 305 + test_ct_zone_id_alloc_entry = 0; 306 + } 307 + } 308 + 243 309 SEC("xdp") 244 310 int nf_xdp_ct_test(struct xdp_md *ctx) 245 311 { 246 312 nf_ct_test((void *)bpf_xdp_ct_lookup, (void *)bpf_xdp_ct_alloc, ctx); 313 + nf_ct_opts_new_test((void *)bpf_xdp_ct_lookup, (void *)bpf_xdp_ct_alloc, ctx); 247 314 return 0; 248 315 } 249 316 ··· 338 231 int nf_skb_ct_test(struct __sk_buff *ctx) 339 232 { 340 233 nf_ct_test((void *)bpf_skb_ct_lookup, (void *)bpf_skb_ct_alloc, ctx); 234 + nf_ct_opts_new_test((void *)bpf_skb_ct_lookup, (void *)bpf_skb_ct_alloc, ctx); 341 235 return 0; 342 236 } 343 237