Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

selftests/bpf: run flow dissector tests in skb-less mode

Export last_dissection map from flow dissector and use a known place in
tun driver to trigger BPF flow dissection.

Signed-off-by: Stanislav Fomichev <sdf@google.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>

authored by

Stanislav Fomichev and committed by
Daniel Borkmann
0905beec c9cb2c1e

+165 -34
+1 -1
tools/testing/selftests/bpf/flow_dissector_load.c
··· 26 26 struct bpf_object *obj; 27 27 28 28 ret = bpf_flow_load(&obj, cfg_path_name, cfg_section_name, 29 - cfg_map_name, &prog_fd); 29 + cfg_map_name, NULL, &prog_fd, NULL); 30 30 if (ret) 31 31 error(1, 0, "bpf_flow_load %s", cfg_path_name); 32 32
+14 -2
tools/testing/selftests/bpf/flow_dissector_load.h
··· 9 9 const char *path, 10 10 const char *section_name, 11 11 const char *map_name, 12 - int *prog_fd) 12 + const char *keys_map_name, 13 + int *prog_fd, 14 + int *keys_fd) 13 15 { 14 16 struct bpf_program *prog, *main_prog; 15 - struct bpf_map *prog_array; 17 + struct bpf_map *prog_array, *keys; 16 18 int prog_array_fd; 17 19 int ret, fd, i; 18 20 ··· 38 36 prog_array_fd = bpf_map__fd(prog_array); 39 37 if (prog_array_fd < 0) 40 38 return ret; 39 + 40 + if (keys_map_name && keys_fd) { 41 + keys = bpf_object__find_map_by_name(*obj, keys_map_name); 42 + if (!keys) 43 + return -1; 44 + 45 + *keys_fd = bpf_map__fd(keys); 46 + if (*keys_fd < 0) 47 + return -1; 48 + } 41 49 42 50 i = 0; 43 51 bpf_object__for_each_program(prog, *obj) {
+100 -2
tools/testing/selftests/bpf/prog_tests/flow_dissector.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include <test_progs.h> 3 + #include <error.h> 4 + #include <linux/if.h> 5 + #include <linux/if_tun.h> 3 6 4 7 #define CHECK_FLOW_KEYS(desc, got, expected) \ 5 8 CHECK_ATTR(memcmp(&got, &expected, sizeof(got)) != 0, \ ··· 143 140 }, 144 141 }; 145 142 143 + static int create_tap(const char *ifname) 144 + { 145 + struct ifreq ifr = { 146 + .ifr_flags = IFF_TAP | IFF_NO_PI | IFF_NAPI | IFF_NAPI_FRAGS, 147 + }; 148 + int fd, ret; 149 + 150 + strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); 151 + 152 + fd = open("/dev/net/tun", O_RDWR); 153 + if (fd < 0) 154 + return -1; 155 + 156 + ret = ioctl(fd, TUNSETIFF, &ifr); 157 + if (ret) 158 + return -1; 159 + 160 + return fd; 161 + } 162 + 163 + static int tx_tap(int fd, void *pkt, size_t len) 164 + { 165 + struct iovec iov[] = { 166 + { 167 + .iov_len = len, 168 + .iov_base = pkt, 169 + }, 170 + }; 171 + return writev(fd, iov, ARRAY_SIZE(iov)); 172 + } 173 + 174 + static int ifup(const char *ifname) 175 + { 176 + struct ifreq ifr = {}; 177 + int sk, ret; 178 + 179 + strncpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name)); 180 + 181 + sk = socket(PF_INET, SOCK_DGRAM, 0); 182 + if (sk < 0) 183 + return -1; 184 + 185 + ret = ioctl(sk, SIOCGIFFLAGS, &ifr); 186 + if (ret) { 187 + close(sk); 188 + return -1; 189 + } 190 + 191 + ifr.ifr_flags |= IFF_UP; 192 + ret = ioctl(sk, SIOCSIFFLAGS, &ifr); 193 + if (ret) { 194 + close(sk); 195 + return -1; 196 + } 197 + 198 + close(sk); 199 + return 0; 200 + } 201 + 146 202 void test_flow_dissector(void) 147 203 { 204 + int i, err, prog_fd, keys_fd = -1, tap_fd; 148 205 struct bpf_object *obj; 149 - int i, err, prog_fd; 206 + __u32 duration = 0; 150 207 151 208 err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector", 152 - "jmp_table", &prog_fd); 209 + "jmp_table", "last_dissection", &prog_fd, &keys_fd); 153 210 if (err) { 154 211 error_cnt++; 155 212 return; ··· 231 168 "err %d errno %d retval %d duration %d size %u/%lu\n", 232 169 err, errno, tattr.retval, tattr.duration, 233 170 tattr.data_size_out, sizeof(flow_keys)); 171 + CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); 172 + } 173 + 174 + /* Do the same tests but for skb-less flow dissector. 175 + * We use a known path in the net/tun driver that calls 176 + * eth_get_headlen and we manually export bpf_flow_keys 177 + * via BPF map in this case. 178 + * 179 + * Note, that since eth_get_headlen operates on a L2 level, 180 + * we adjust exported nhoff/thoff by ETH_HLEN. 181 + */ 182 + 183 + err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0); 184 + CHECK(err, "bpf_prog_attach", "err %d errno %d", err, errno); 185 + 186 + tap_fd = create_tap("tap0"); 187 + CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d", tap_fd, errno); 188 + err = ifup("tap0"); 189 + CHECK(err, "ifup", "err %d errno %d", err, errno); 190 + 191 + for (i = 0; i < ARRAY_SIZE(tests); i++) { 192 + struct bpf_flow_keys flow_keys = {}; 193 + struct bpf_prog_test_run_attr tattr = {}; 194 + __u32 key = 0; 195 + 196 + err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt)); 197 + CHECK(err < 0, "tx_tap", "err %d errno %d", err, errno); 198 + 199 + err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys); 200 + CHECK_ATTR(err, tests[i].name, "bpf_map_lookup_elem %d\n", err); 201 + 202 + flow_keys.nhoff -= ETH_HLEN; 203 + flow_keys.thoff -= ETH_HLEN; 204 + 205 + CHECK_ATTR(err, tests[i].name, "skb-less err %d\n", err); 234 206 CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); 235 207 } 236 208
+50 -29
tools/testing/selftests/bpf/progs/bpf_flow.c
··· 64 64 .max_entries = 8 65 65 }; 66 66 67 + struct bpf_map_def SEC("maps") last_dissection = { 68 + .type = BPF_MAP_TYPE_ARRAY, 69 + .key_size = sizeof(__u32), 70 + .value_size = sizeof(struct bpf_flow_keys), 71 + .max_entries = 1, 72 + }; 73 + 74 + static __always_inline int export_flow_keys(struct bpf_flow_keys *keys, 75 + int ret) 76 + { 77 + struct bpf_flow_keys *val; 78 + __u32 key = 0; 79 + 80 + val = bpf_map_lookup_elem(&last_dissection, &key); 81 + if (val) 82 + memcpy(val, keys, sizeof(*val)); 83 + return ret; 84 + } 85 + 67 86 static __always_inline void *bpf_flow_dissect_get_header(struct __sk_buff *skb, 68 87 __u16 hdr_size, 69 88 void *buffer) ··· 128 109 break; 129 110 default: 130 111 /* Protocol not supported */ 131 - return BPF_DROP; 112 + return export_flow_keys(keys, BPF_DROP); 132 113 } 133 114 134 - return BPF_DROP; 115 + return export_flow_keys(keys, BPF_DROP); 135 116 } 136 117 137 118 SEC("flow_dissector") ··· 158 139 case IPPROTO_ICMP: 159 140 icmp = bpf_flow_dissect_get_header(skb, sizeof(*icmp), &_icmp); 160 141 if (!icmp) 161 - return BPF_DROP; 162 - return BPF_OK; 142 + return export_flow_keys(keys, BPF_DROP); 143 + return export_flow_keys(keys, BPF_OK); 163 144 case IPPROTO_IPIP: 164 145 keys->is_encap = true; 165 146 return parse_eth_proto(skb, bpf_htons(ETH_P_IP)); ··· 169 150 case IPPROTO_GRE: 170 151 gre = bpf_flow_dissect_get_header(skb, sizeof(*gre), &_gre); 171 152 if (!gre) 172 - return BPF_DROP; 153 + return export_flow_keys(keys, BPF_DROP); 173 154 174 155 if (bpf_htons(gre->flags & GRE_VERSION)) 175 156 /* Only inspect standard GRE packets with version 0 */ 176 - return BPF_OK; 157 + return export_flow_keys(keys, BPF_OK); 177 158 178 159 keys->thoff += sizeof(*gre); /* Step over GRE Flags and Proto */ 179 160 if (GRE_IS_CSUM(gre->flags)) ··· 189 170 eth = bpf_flow_dissect_get_header(skb, sizeof(*eth), 190 171 &_eth); 191 172 if (!eth) 192 - return BPF_DROP; 173 + return export_flow_keys(keys, BPF_DROP); 193 174 194 175 keys->thoff += sizeof(*eth); 195 176 ··· 200 181 case IPPROTO_TCP: 201 182 tcp = bpf_flow_dissect_get_header(skb, sizeof(*tcp), &_tcp); 202 183 if (!tcp) 203 - return BPF_DROP; 184 + return export_flow_keys(keys, BPF_DROP); 204 185 205 186 if (tcp->doff < 5) 206 - return BPF_DROP; 187 + return export_flow_keys(keys, BPF_DROP); 207 188 208 189 if ((__u8 *)tcp + (tcp->doff << 2) > data_end) 209 - return BPF_DROP; 190 + return export_flow_keys(keys, BPF_DROP); 210 191 211 192 keys->sport = tcp->source; 212 193 keys->dport = tcp->dest; 213 - return BPF_OK; 194 + return export_flow_keys(keys, BPF_OK); 214 195 case IPPROTO_UDP: 215 196 case IPPROTO_UDPLITE: 216 197 udp = bpf_flow_dissect_get_header(skb, sizeof(*udp), &_udp); 217 198 if (!udp) 218 - return BPF_DROP; 199 + return export_flow_keys(keys, BPF_DROP); 219 200 220 201 keys->sport = udp->source; 221 202 keys->dport = udp->dest; 222 - return BPF_OK; 203 + return export_flow_keys(keys, BPF_OK); 223 204 default: 224 - return BPF_DROP; 205 + return export_flow_keys(keys, BPF_DROP); 225 206 } 226 207 227 - return BPF_DROP; 208 + return export_flow_keys(keys, BPF_DROP); 228 209 } 229 210 230 211 static __always_inline int parse_ipv6_proto(struct __sk_buff *skb, __u8 nexthdr) ··· 244 225 return parse_ip_proto(skb, nexthdr); 245 226 } 246 227 247 - return BPF_DROP; 228 + return export_flow_keys(keys, BPF_DROP); 248 229 } 249 230 250 231 PROG(IP)(struct __sk_buff *skb) ··· 257 238 258 239 iph = bpf_flow_dissect_get_header(skb, sizeof(*iph), &_iph); 259 240 if (!iph) 260 - return BPF_DROP; 241 + return export_flow_keys(keys, BPF_DROP); 261 242 262 243 /* IP header cannot be smaller than 20 bytes */ 263 244 if (iph->ihl < 5) 264 - return BPF_DROP; 245 + return export_flow_keys(keys, BPF_DROP); 265 246 266 247 keys->addr_proto = ETH_P_IP; 267 248 keys->ipv4_src = iph->saddr; ··· 269 250 270 251 keys->thoff += iph->ihl << 2; 271 252 if (data + keys->thoff > data_end) 272 - return BPF_DROP; 253 + return export_flow_keys(keys, BPF_DROP); 273 254 274 255 if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET)) { 275 256 keys->is_frag = true; ··· 283 264 } 284 265 285 266 if (done) 286 - return BPF_OK; 267 + return export_flow_keys(keys, BPF_OK); 287 268 288 269 return parse_ip_proto(skb, iph->protocol); 289 270 } ··· 295 276 296 277 ip6h = bpf_flow_dissect_get_header(skb, sizeof(*ip6h), &_ip6h); 297 278 if (!ip6h) 298 - return BPF_DROP; 279 + return export_flow_keys(keys, BPF_DROP); 299 280 300 281 keys->addr_proto = ETH_P_IPV6; 301 282 memcpy(&keys->ipv6_src, &ip6h->saddr, 2*sizeof(ip6h->saddr)); ··· 307 288 308 289 PROG(IPV6OP)(struct __sk_buff *skb) 309 290 { 291 + struct bpf_flow_keys *keys = skb->flow_keys; 310 292 struct ipv6_opt_hdr *ip6h, _ip6h; 311 293 312 294 ip6h = bpf_flow_dissect_get_header(skb, sizeof(*ip6h), &_ip6h); 313 295 if (!ip6h) 314 - return BPF_DROP; 296 + return export_flow_keys(keys, BPF_DROP); 315 297 316 298 /* hlen is in 8-octets and does not include the first 8 bytes 317 299 * of the header ··· 329 309 330 310 fragh = bpf_flow_dissect_get_header(skb, sizeof(*fragh), &_fragh); 331 311 if (!fragh) 332 - return BPF_DROP; 312 + return export_flow_keys(keys, BPF_DROP); 333 313 334 314 keys->thoff += sizeof(*fragh); 335 315 keys->is_frag = true; ··· 341 321 342 322 PROG(MPLS)(struct __sk_buff *skb) 343 323 { 324 + struct bpf_flow_keys *keys = skb->flow_keys; 344 325 struct mpls_label *mpls, _mpls; 345 326 346 327 mpls = bpf_flow_dissect_get_header(skb, sizeof(*mpls), &_mpls); 347 328 if (!mpls) 348 - return BPF_DROP; 329 + return export_flow_keys(keys, BPF_DROP); 349 330 350 - return BPF_OK; 331 + return export_flow_keys(keys, BPF_OK); 351 332 } 352 333 353 334 PROG(VLAN)(struct __sk_buff *skb) ··· 360 339 if (keys->n_proto == bpf_htons(ETH_P_8021AD)) { 361 340 vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan); 362 341 if (!vlan) 363 - return BPF_DROP; 342 + return export_flow_keys(keys, BPF_DROP); 364 343 365 344 if (vlan->h_vlan_encapsulated_proto != bpf_htons(ETH_P_8021Q)) 366 - return BPF_DROP; 345 + return export_flow_keys(keys, BPF_DROP); 367 346 368 347 keys->nhoff += sizeof(*vlan); 369 348 keys->thoff += sizeof(*vlan); ··· 371 350 372 351 vlan = bpf_flow_dissect_get_header(skb, sizeof(*vlan), &_vlan); 373 352 if (!vlan) 374 - return BPF_DROP; 353 + return export_flow_keys(keys, BPF_DROP); 375 354 376 355 keys->nhoff += sizeof(*vlan); 377 356 keys->thoff += sizeof(*vlan); 378 357 /* Only allow 8021AD + 8021Q double tagging and no triple tagging.*/ 379 358 if (vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021AD) || 380 359 vlan->h_vlan_encapsulated_proto == bpf_htons(ETH_P_8021Q)) 381 - return BPF_DROP; 360 + return export_flow_keys(keys, BPF_DROP); 382 361 383 362 keys->n_proto = vlan->h_vlan_encapsulated_proto; 384 363 return parse_eth_proto(skb, vlan->h_vlan_encapsulated_proto);