Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Daniel Borkmann says:

====================
The following pull-request contains BPF updates for your *net-next* tree.

The main changes are:

1) Add the ability to use unaligned chunks in the AF_XDP umem. By
relaxing where the chunks can be placed, it allows to use an
arbitrary buffer size and place whenever there is a free
address in the umem. Helps more seamless DPDK AF_XDP driver
integration. Support for i40e, ixgbe and mlx5e, from Kevin and
Maxim.

2) Addition of a wakeup flag for AF_XDP tx and fill rings so the
application can wake up the kernel for rx/tx processing which
avoids busy-spinning of the latter, useful when app and driver
is located on the same core. Support for i40e, ixgbe and mlx5e,
from Magnus and Maxim.

3) bpftool fixes for printf()-like functions so compiler can actually
enforce checks, bpftool build system improvements for custom output
directories, and addition of 'bpftool map freeze' command, from Quentin.

4) Support attaching/detaching XDP programs from 'bpftool net' command,
from Daniel.

5) Automatic xskmap cleanup when AF_XDP socket is released, and several
barrier/{read,write}_once fixes in AF_XDP code, from Björn.

6) Relicense of bpf_helpers.h/bpf_endian.h for future libbpf
inclusion as well as libbpf versioning improvements, from Andrii.

7) Several new BPF kselftests for verifier precision tracking, from Alexei.

8) Several BPF kselftest fixes wrt endianess to run on s390x, from Ilya.

9) And more BPF kselftest improvements all over the place, from Stanislav.

10) Add simple BPF map op cache for nfp driver to batch dumps, from Jakub.

11) AF_XDP socket umem mapping improvements for 32bit archs, from Ivan.

12) Add BPF-to-BPF call and BTF line info support for s390x JIT, from Yauheni.

13) Small optimization in arm64 JIT to spare 1 insns for BPF_MOD, from Jerin.

14) Fix an error check in bpf_tcp_gen_syncookie() helper, from Petar.

15) Various minor fixes and cleanups, from Nathan, Masahiro, Masanari,
Peter, Wei, Yue.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+3492 -701
+6 -4
Documentation/networking/af_xdp.rst
··· 153 153 154 154 Frames passed to the kernel are used for the ingress path (RX rings). 155 155 156 - The user application produces UMEM addrs to this ring. Note that the 157 - kernel will mask the incoming addr. E.g. for a chunk size of 2k, the 158 - log2(2048) LSB of the addr will be masked off, meaning that 2048, 2050 159 - and 3000 refers to the same chunk. 156 + The user application produces UMEM addrs to this ring. Note that, if 157 + running the application with aligned chunk mode, the kernel will mask 158 + the incoming addr. E.g. for a chunk size of 2k, the log2(2048) LSB of 159 + the addr will be masked off, meaning that 2048, 2050 and 3000 refers 160 + to the same chunk. If the user application is run in the unaligned 161 + chunks mode, then the incoming addr will be left untouched. 160 162 161 163 162 164 UMEM Completion Ring
+3
arch/arm64/net/bpf_jit.h
··· 171 171 /* Rd = Ra + Rn * Rm */ 172 172 #define A64_MADD(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \ 173 173 A64_VARIANT(sf), AARCH64_INSN_DATA3_MADD) 174 + /* Rd = Ra - Rn * Rm */ 175 + #define A64_MSUB(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \ 176 + A64_VARIANT(sf), AARCH64_INSN_DATA3_MSUB) 174 177 /* Rd = Rn * Rm */ 175 178 #define A64_MUL(sf, Rd, Rn, Rm) A64_MADD(sf, Rd, A64_ZR, Rn, Rm) 176 179
+2 -4
arch/arm64/net/bpf_jit_comp.c
··· 409 409 break; 410 410 case BPF_MOD: 411 411 emit(A64_UDIV(is64, tmp, dst, src), ctx); 412 - emit(A64_MUL(is64, tmp, tmp, src), ctx); 413 - emit(A64_SUB(is64, dst, dst, tmp), ctx); 412 + emit(A64_MSUB(is64, dst, dst, tmp, src), ctx); 414 413 break; 415 414 } 416 415 break; ··· 515 516 case BPF_ALU64 | BPF_MOD | BPF_K: 516 517 emit_a64_mov_i(is64, tmp2, imm, ctx); 517 518 emit(A64_UDIV(is64, tmp, dst, tmp2), ctx); 518 - emit(A64_MUL(is64, tmp, tmp, tmp2), ctx); 519 - emit(A64_SUB(is64, dst, dst, tmp), ctx); 519 + emit(A64_MSUB(is64, dst, dst, tmp, tmp2), ctx); 520 520 break; 521 521 case BPF_ALU | BPF_LSH | BPF_K: 522 522 case BPF_ALU64 | BPF_LSH | BPF_K:
+56 -11
arch/s390/net/bpf_jit_comp.c
··· 502 502 * NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of 503 503 * stack space for the large switch statement. 504 504 */ 505 - static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i) 505 + static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, 506 + int i, bool extra_pass) 506 507 { 507 508 struct bpf_insn *insn = &fp->insnsi[i]; 508 509 int jmp_off, last, insn_count = 1; ··· 1012 1011 */ 1013 1012 case BPF_JMP | BPF_CALL: 1014 1013 { 1015 - /* 1016 - * b0 = (__bpf_call_base + imm)(b1, b2, b3, b4, b5) 1017 - */ 1018 - const u64 func = (u64)__bpf_call_base + imm; 1014 + u64 func; 1015 + bool func_addr_fixed; 1016 + int ret; 1017 + 1018 + ret = bpf_jit_get_func_addr(fp, insn, extra_pass, 1019 + &func, &func_addr_fixed); 1020 + if (ret < 0) 1021 + return -1; 1019 1022 1020 1023 REG_SET_SEEN(BPF_REG_5); 1021 1024 jit->seen |= SEEN_FUNC; ··· 1288 1283 /* 1289 1284 * Compile eBPF program into s390x code 1290 1285 */ 1291 - static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp) 1286 + static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp, 1287 + bool extra_pass) 1292 1288 { 1293 1289 int i, insn_count; 1294 1290 ··· 1298 1292 1299 1293 bpf_jit_prologue(jit, fp->aux->stack_depth); 1300 1294 for (i = 0; i < fp->len; i += insn_count) { 1301 - insn_count = bpf_jit_insn(jit, fp, i); 1295 + insn_count = bpf_jit_insn(jit, fp, i, extra_pass); 1302 1296 if (insn_count < 0) 1303 1297 return -1; 1304 1298 /* Next instruction address */ ··· 1317 1311 return true; 1318 1312 } 1319 1313 1314 + struct s390_jit_data { 1315 + struct bpf_binary_header *header; 1316 + struct bpf_jit ctx; 1317 + int pass; 1318 + }; 1319 + 1320 1320 /* 1321 1321 * Compile eBPF program "fp" 1322 1322 */ ··· 1330 1318 { 1331 1319 struct bpf_prog *tmp, *orig_fp = fp; 1332 1320 struct bpf_binary_header *header; 1321 + struct s390_jit_data *jit_data; 1333 1322 bool tmp_blinded = false; 1323 + bool extra_pass = false; 1334 1324 struct bpf_jit jit; 1335 1325 int pass; 1336 1326 ··· 1351 1337 fp = tmp; 1352 1338 } 1353 1339 1340 + jit_data = fp->aux->jit_data; 1341 + if (!jit_data) { 1342 + jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); 1343 + if (!jit_data) { 1344 + fp = orig_fp; 1345 + goto out; 1346 + } 1347 + fp->aux->jit_data = jit_data; 1348 + } 1349 + if (jit_data->ctx.addrs) { 1350 + jit = jit_data->ctx; 1351 + header = jit_data->header; 1352 + extra_pass = true; 1353 + pass = jit_data->pass + 1; 1354 + goto skip_init_ctx; 1355 + } 1356 + 1354 1357 memset(&jit, 0, sizeof(jit)); 1355 1358 jit.addrs = kcalloc(fp->len + 1, sizeof(*jit.addrs), GFP_KERNEL); 1356 1359 if (jit.addrs == NULL) { ··· 1380 1349 * - 3: Calculate program size and addrs arrray 1381 1350 */ 1382 1351 for (pass = 1; pass <= 3; pass++) { 1383 - if (bpf_jit_prog(&jit, fp)) { 1352 + if (bpf_jit_prog(&jit, fp, extra_pass)) { 1384 1353 fp = orig_fp; 1385 1354 goto free_addrs; 1386 1355 } ··· 1392 1361 fp = orig_fp; 1393 1362 goto free_addrs; 1394 1363 } 1364 + 1395 1365 header = bpf_jit_binary_alloc(jit.size, &jit.prg_buf, 2, jit_fill_hole); 1396 1366 if (!header) { 1397 1367 fp = orig_fp; 1398 1368 goto free_addrs; 1399 1369 } 1400 - if (bpf_jit_prog(&jit, fp)) { 1370 + skip_init_ctx: 1371 + if (bpf_jit_prog(&jit, fp, extra_pass)) { 1401 1372 bpf_jit_binary_free(header); 1402 1373 fp = orig_fp; 1403 1374 goto free_addrs; ··· 1408 1375 bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf); 1409 1376 print_fn_code(jit.prg_buf, jit.size_prg); 1410 1377 } 1411 - bpf_jit_binary_lock_ro(header); 1378 + if (!fp->is_func || extra_pass) { 1379 + bpf_jit_binary_lock_ro(header); 1380 + } else { 1381 + jit_data->header = header; 1382 + jit_data->ctx = jit; 1383 + jit_data->pass = pass; 1384 + } 1412 1385 fp->bpf_func = (void *) jit.prg_buf; 1413 1386 fp->jited = 1; 1414 1387 fp->jited_len = jit.size; 1388 + 1389 + if (!fp->is_func || extra_pass) { 1390 + bpf_prog_fill_jited_linfo(fp, jit.addrs + 1); 1415 1391 free_addrs: 1416 - kfree(jit.addrs); 1392 + kfree(jit.addrs); 1393 + kfree(jit_data); 1394 + fp->aux->jit_data = NULL; 1395 + } 1417 1396 out: 1418 1397 if (tmp_blinded) 1419 1398 bpf_jit_prog_release_other(fp, fp == orig_fp ?
+3 -2
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 12530 12530 if (need_reset && prog) 12531 12531 for (i = 0; i < vsi->num_queue_pairs; i++) 12532 12532 if (vsi->xdp_rings[i]->xsk_umem) 12533 - (void)i40e_xsk_async_xmit(vsi->netdev, i); 12533 + (void)i40e_xsk_wakeup(vsi->netdev, i, 12534 + XDP_WAKEUP_RX); 12534 12535 12535 12536 return 0; 12536 12537 } ··· 12853 12852 .ndo_bridge_setlink = i40e_ndo_bridge_setlink, 12854 12853 .ndo_bpf = i40e_xdp, 12855 12854 .ndo_xdp_xmit = i40e_xdp_xmit, 12856 - .ndo_xsk_async_xmit = i40e_xsk_async_xmit, 12855 + .ndo_xsk_wakeup = i40e_xsk_wakeup, 12857 12856 .ndo_dfwd_add_station = i40e_fwd_add, 12858 12857 .ndo_dfwd_del_station = i40e_fwd_del, 12859 12858 };
+35 -17
drivers/net/ethernet/intel/i40e/i40e_xsk.c
··· 116 116 return err; 117 117 118 118 /* Kick start the NAPI context so that receiving will start */ 119 - err = i40e_xsk_async_xmit(vsi->netdev, qid); 119 + err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX); 120 120 if (err) 121 121 return err; 122 122 } ··· 190 190 **/ 191 191 static int i40e_run_xdp_zc(struct i40e_ring *rx_ring, struct xdp_buff *xdp) 192 192 { 193 + struct xdp_umem *umem = rx_ring->xsk_umem; 193 194 int err, result = I40E_XDP_PASS; 195 + u64 offset = umem->headroom; 194 196 struct i40e_ring *xdp_ring; 195 197 struct bpf_prog *xdp_prog; 196 198 u32 act; ··· 203 201 */ 204 202 xdp_prog = READ_ONCE(rx_ring->xdp_prog); 205 203 act = bpf_prog_run_xdp(xdp_prog, xdp); 206 - xdp->handle += xdp->data - xdp->data_hard_start; 204 + offset += xdp->data - xdp->data_hard_start; 205 + 206 + xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset); 207 + 207 208 switch (act) { 208 209 case XDP_PASS: 209 210 break; ··· 267 262 bi->addr = xdp_umem_get_data(umem, handle); 268 263 bi->addr += hr; 269 264 270 - bi->handle = handle + umem->headroom; 265 + bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); 271 266 272 267 xsk_umem_discard_addr(umem); 273 268 return true; ··· 304 299 bi->addr = xdp_umem_get_data(umem, handle); 305 300 bi->addr += hr; 306 301 307 - bi->handle = handle + umem->headroom; 302 + bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); 308 303 309 304 xsk_umem_discard_addr_rq(umem); 310 305 return true; ··· 425 420 struct i40e_rx_buffer *old_bi) 426 421 { 427 422 struct i40e_rx_buffer *new_bi = &rx_ring->rx_bi[rx_ring->next_to_alloc]; 428 - unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask; 429 - u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; 430 423 u16 nta = rx_ring->next_to_alloc; 431 424 432 425 /* update, and store next to alloc */ ··· 432 429 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 433 430 434 431 /* transfer page from old buffer to new buffer */ 435 - new_bi->dma = old_bi->dma & mask; 436 - new_bi->dma += hr; 437 - 438 - new_bi->addr = (void *)((unsigned long)old_bi->addr & mask); 439 - new_bi->addr += hr; 440 - 441 - new_bi->handle = old_bi->handle & mask; 442 - new_bi->handle += rx_ring->xsk_umem->headroom; 432 + new_bi->dma = old_bi->dma; 433 + new_bi->addr = old_bi->addr; 434 + new_bi->handle = old_bi->handle; 443 435 444 436 old_bi->addr = NULL; 445 437 } ··· 469 471 bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle); 470 472 bi->addr += hr; 471 473 472 - bi->handle = (u64)handle + rx_ring->xsk_umem->headroom; 474 + bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle, 475 + rx_ring->xsk_umem->headroom); 473 476 } 474 477 475 478 /** ··· 625 626 626 627 i40e_finalize_xdp_rx(rx_ring, xdp_xmit); 627 628 i40e_update_rx_stats(rx_ring, total_rx_bytes, total_rx_packets); 629 + 630 + if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) { 631 + if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) 632 + xsk_set_rx_need_wakeup(rx_ring->xsk_umem); 633 + else 634 + xsk_clear_rx_need_wakeup(rx_ring->xsk_umem); 635 + 636 + return (int)total_rx_packets; 637 + } 628 638 return failure ? budget : (int)total_rx_packets; 629 639 } 630 640 ··· 689 681 i40e_xdp_ring_update_tail(xdp_ring); 690 682 691 683 xsk_umem_consume_tx_done(xdp_ring->xsk_umem); 684 + if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) 685 + xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem); 692 686 } 693 687 694 688 return !!budget && work_done; ··· 769 759 i40e_update_tx_stats(tx_ring, completed_frames, total_bytes); 770 760 771 761 out_xmit: 762 + if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) { 763 + if (tx_ring->next_to_clean == tx_ring->next_to_use) 764 + xsk_set_tx_need_wakeup(tx_ring->xsk_umem); 765 + else 766 + xsk_clear_tx_need_wakeup(tx_ring->xsk_umem); 767 + } 768 + 772 769 xmit_done = i40e_xmit_zc(tx_ring, budget); 773 770 774 771 return work_done && xmit_done; 775 772 } 776 773 777 774 /** 778 - * i40e_xsk_async_xmit - Implements the ndo_xsk_async_xmit 775 + * i40e_xsk_wakeup - Implements the ndo_xsk_wakeup 779 776 * @dev: the netdevice 780 777 * @queue_id: queue id to wake up 778 + * @flags: ignored in our case since we have Rx and Tx in the same NAPI. 781 779 * 782 780 * Returns <0 for errors, 0 otherwise. 783 781 **/ 784 - int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id) 782 + int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags) 785 783 { 786 784 struct i40e_netdev_priv *np = netdev_priv(dev); 787 785 struct i40e_vsi *vsi = np->vsi;
+1 -1
drivers/net/ethernet/intel/i40e/i40e_xsk.h
··· 18 18 19 19 bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, 20 20 struct i40e_ring *tx_ring, int napi_budget); 21 - int i40e_xsk_async_xmit(struct net_device *dev, u32 queue_id); 21 + int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); 22 22 23 23 #endif /* _I40E_XSK_H_ */
+3 -2
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 10260 10260 if (need_reset && prog) 10261 10261 for (i = 0; i < adapter->num_rx_queues; i++) 10262 10262 if (adapter->xdp_ring[i]->xsk_umem) 10263 - (void)ixgbe_xsk_async_xmit(adapter->netdev, i); 10263 + (void)ixgbe_xsk_wakeup(adapter->netdev, i, 10264 + XDP_WAKEUP_RX); 10264 10265 10265 10266 return 0; 10266 10267 } ··· 10380 10379 .ndo_features_check = ixgbe_features_check, 10381 10380 .ndo_bpf = ixgbe_xdp, 10382 10381 .ndo_xdp_xmit = ixgbe_xdp_xmit, 10383 - .ndo_xsk_async_xmit = ixgbe_xsk_async_xmit, 10382 + .ndo_xsk_wakeup = ixgbe_xsk_wakeup, 10384 10383 }; 10385 10384 10386 10385 static void ixgbe_disable_txr_hw(struct ixgbe_adapter *adapter,
+1 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
··· 42 42 void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring); 43 43 bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector, 44 44 struct ixgbe_ring *tx_ring, int napi_budget); 45 - int ixgbe_xsk_async_xmit(struct net_device *dev, u32 queue_id); 45 + int ixgbe_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); 46 46 void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring); 47 47 48 48 #endif /* #define _IXGBE_TXRX_COMMON_H_ */
+33 -16
drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
··· 100 100 ixgbe_txrx_ring_enable(adapter, qid); 101 101 102 102 /* Kick start the NAPI context so that receiving will start */ 103 - err = ixgbe_xsk_async_xmit(adapter->netdev, qid); 103 + err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX); 104 104 if (err) 105 105 return err; 106 106 } ··· 143 143 struct ixgbe_ring *rx_ring, 144 144 struct xdp_buff *xdp) 145 145 { 146 + struct xdp_umem *umem = rx_ring->xsk_umem; 146 147 int err, result = IXGBE_XDP_PASS; 148 + u64 offset = umem->headroom; 147 149 struct bpf_prog *xdp_prog; 148 150 struct xdp_frame *xdpf; 149 151 u32 act; ··· 153 151 rcu_read_lock(); 154 152 xdp_prog = READ_ONCE(rx_ring->xdp_prog); 155 153 act = bpf_prog_run_xdp(xdp_prog, xdp); 156 - xdp->handle += xdp->data - xdp->data_hard_start; 154 + offset += xdp->data - xdp->data_hard_start; 155 + 156 + xdp->handle = xsk_umem_adjust_offset(umem, xdp->handle, offset); 157 + 157 158 switch (act) { 158 159 case XDP_PASS: 159 160 break; ··· 206 201 static void ixgbe_reuse_rx_buffer_zc(struct ixgbe_ring *rx_ring, 207 202 struct ixgbe_rx_buffer *obi) 208 203 { 209 - unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask; 210 - u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM; 211 204 u16 nta = rx_ring->next_to_alloc; 212 205 struct ixgbe_rx_buffer *nbi; 213 206 ··· 215 212 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; 216 213 217 214 /* transfer page from old buffer to new buffer */ 218 - nbi->dma = obi->dma & mask; 219 - nbi->dma += hr; 220 - 221 - nbi->addr = (void *)((unsigned long)obi->addr & mask); 222 - nbi->addr += hr; 223 - 224 - nbi->handle = obi->handle & mask; 225 - nbi->handle += rx_ring->xsk_umem->headroom; 215 + nbi->dma = obi->dma; 216 + nbi->addr = obi->addr; 217 + nbi->handle = obi->handle; 226 218 227 219 obi->addr = NULL; 228 220 obi->skb = NULL; ··· 248 250 bi->addr = xdp_umem_get_data(rx_ring->xsk_umem, handle); 249 251 bi->addr += hr; 250 252 251 - bi->handle = (u64)handle + rx_ring->xsk_umem->headroom; 253 + bi->handle = xsk_umem_adjust_offset(rx_ring->xsk_umem, (u64)handle, 254 + rx_ring->xsk_umem->headroom); 252 255 } 253 256 254 257 static bool ixgbe_alloc_buffer_zc(struct ixgbe_ring *rx_ring, ··· 275 276 bi->addr = xdp_umem_get_data(umem, handle); 276 277 bi->addr += hr; 277 278 278 - bi->handle = handle + umem->headroom; 279 + bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); 279 280 280 281 xsk_umem_discard_addr(umem); 281 282 return true; ··· 302 303 bi->addr = xdp_umem_get_data(umem, handle); 303 304 bi->addr += hr; 304 305 305 - bi->handle = handle + umem->headroom; 306 + bi->handle = xsk_umem_adjust_offset(umem, handle, umem->headroom); 306 307 307 308 xsk_umem_discard_addr_rq(umem); 308 309 return true; ··· 546 547 q_vector->rx.total_packets += total_rx_packets; 547 548 q_vector->rx.total_bytes += total_rx_bytes; 548 549 550 + if (xsk_umem_uses_need_wakeup(rx_ring->xsk_umem)) { 551 + if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) 552 + xsk_set_rx_need_wakeup(rx_ring->xsk_umem); 553 + else 554 + xsk_clear_rx_need_wakeup(rx_ring->xsk_umem); 555 + 556 + return (int)total_rx_packets; 557 + } 549 558 return failure ? budget : (int)total_rx_packets; 550 559 } 551 560 ··· 622 615 if (tx_desc) { 623 616 ixgbe_xdp_ring_update_tail(xdp_ring); 624 617 xsk_umem_consume_tx_done(xdp_ring->xsk_umem); 618 + if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) 619 + xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem); 625 620 } 626 621 627 622 return !!budget && work_done; ··· 697 688 if (xsk_frames) 698 689 xsk_umem_complete_tx(umem, xsk_frames); 699 690 691 + if (xsk_umem_uses_need_wakeup(tx_ring->xsk_umem)) { 692 + if (tx_ring->next_to_clean == tx_ring->next_to_use) 693 + xsk_set_tx_need_wakeup(tx_ring->xsk_umem); 694 + else 695 + xsk_clear_tx_need_wakeup(tx_ring->xsk_umem); 696 + } 697 + 700 698 xmit_done = ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); 699 + 701 700 return budget > 0 && xmit_done; 702 701 } 703 702 704 - int ixgbe_xsk_async_xmit(struct net_device *dev, u32 qid) 703 + int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) 705 704 { 706 705 struct ixgbe_adapter *adapter = netdev_priv(dev); 707 706 struct ixgbe_ring *ring;
+19 -4
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
··· 25 25 return headroom; 26 26 } 27 27 28 - u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params, 29 - struct mlx5e_xsk_param *xsk) 28 + u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params, 29 + struct mlx5e_xsk_param *xsk) 30 30 { 31 31 u32 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); 32 32 u16 linear_rq_headroom = mlx5e_get_linear_rq_headroom(params, xsk); 33 - u32 frag_sz = linear_rq_headroom + hw_mtu; 33 + 34 + return linear_rq_headroom + hw_mtu; 35 + } 36 + 37 + u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params, 38 + struct mlx5e_xsk_param *xsk) 39 + { 40 + u32 frag_sz = mlx5e_rx_get_min_frag_sz(params, xsk); 34 41 35 42 /* AF_XDP doesn't build SKBs in place. */ 36 43 if (!xsk) 37 44 frag_sz = MLX5_SKB_FRAG_SZ(frag_sz); 38 45 39 - /* XDP in mlx5e doesn't support multiple packets per page. */ 46 + /* XDP in mlx5e doesn't support multiple packets per page. AF_XDP is a 47 + * special case. It can run with frames smaller than a page, as it 48 + * doesn't allocate pages dynamically. However, here we pretend that 49 + * fragments are page-sized: it allows to treat XSK frames like pages 50 + * by redirecting alloc and free operations to XSK rings and by using 51 + * the fact there are no multiple packets per "page" (which is a frame). 52 + * The latter is important, because frames may come in a random order, 53 + * and we will have trouble assemblying a real page of multiple frames. 54 + */ 40 55 if (mlx5e_rx_is_xdp(params, xsk)) 41 56 frag_sz = max_t(u32, frag_sz, PAGE_SIZE); 42 57
+2
drivers/net/ethernet/mellanox/mlx5/core/en/params.h
··· 76 76 77 77 u16 mlx5e_get_linear_rq_headroom(struct mlx5e_params *params, 78 78 struct mlx5e_xsk_param *xsk); 79 + u32 mlx5e_rx_get_min_frag_sz(struct mlx5e_params *params, 80 + struct mlx5e_xsk_param *xsk); 79 81 u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params, 80 82 struct mlx5e_xsk_param *xsk); 81 83 u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params,
+6 -2
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
··· 122 122 void *va, u16 *rx_headroom, u32 *len, bool xsk) 123 123 { 124 124 struct bpf_prog *prog = READ_ONCE(rq->xdp_prog); 125 + struct xdp_umem *umem = rq->umem; 125 126 struct xdp_buff xdp; 126 127 u32 act; 127 128 int err; ··· 139 138 xdp.rxq = &rq->xdp_rxq; 140 139 141 140 act = bpf_prog_run_xdp(prog, &xdp); 142 - if (xsk) 143 - xdp.handle += xdp.data - xdp.data_hard_start; 141 + if (xsk) { 142 + u64 off = xdp.data - xdp.data_hard_start; 143 + 144 + xdp.handle = xsk_umem_adjust_offset(umem, xdp.handle, off); 145 + } 144 146 switch (act) { 145 147 case XDP_PASS: 146 148 *rx_headroom = xdp.data - xdp.data_hard_start;
+3 -2
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c
··· 24 24 if (!xsk_umem_peek_addr_rq(umem, &handle)) 25 25 return -ENOMEM; 26 26 27 - dma_info->xsk.handle = handle + rq->buff.umem_headroom; 27 + dma_info->xsk.handle = xsk_umem_adjust_offset(umem, handle, 28 + rq->buff.umem_headroom); 28 29 dma_info->xsk.data = xdp_umem_get_data(umem, dma_info->xsk.handle); 29 30 30 31 /* No need to add headroom to the DMA address. In striding RQ case, we ··· 105 104 106 105 /* head_offset is not used in this function, because di->xsk.data and 107 106 * di->addr point directly to the necessary place. Furthermore, in the 108 - * current implementation, one page = one packet = one frame, so 107 + * current implementation, UMR pages are mapped to XSK frames, so 109 108 * head_offset should always be 0. 110 109 */ 111 110 WARN_ON_ONCE(head_offset);
+14
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.h
··· 5 5 #define __MLX5_EN_XSK_RX_H__ 6 6 7 7 #include "en.h" 8 + #include <net/xdp_sock.h> 8 9 9 10 /* RX data path */ 10 11 ··· 24 23 struct mlx5_cqe64 *cqe, 25 24 struct mlx5e_wqe_frag_info *wi, 26 25 u32 cqe_bcnt); 26 + 27 + static inline bool mlx5e_xsk_update_rx_wakeup(struct mlx5e_rq *rq, bool alloc_err) 28 + { 29 + if (!xsk_umem_uses_need_wakeup(rq->umem)) 30 + return alloc_err; 31 + 32 + if (unlikely(alloc_err)) 33 + xsk_set_rx_need_wakeup(rq->umem); 34 + else 35 + xsk_clear_rx_need_wakeup(rq->umem); 36 + 37 + return false; 38 + } 27 39 28 40 #endif /* __MLX5_EN_XSK_RX_H__ */
+10 -5
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c
··· 4 4 #include "setup.h" 5 5 #include "en/params.h" 6 6 7 + /* It matches XDP_UMEM_MIN_CHUNK_SIZE, but as this constant is private and may 8 + * change unexpectedly, and mlx5e has a minimum valid stride size for striding 9 + * RQ, keep this check in the driver. 10 + */ 11 + #define MLX5E_MIN_XSK_CHUNK_SIZE 2048 12 + 7 13 bool mlx5e_validate_xsk_param(struct mlx5e_params *params, 8 14 struct mlx5e_xsk_param *xsk, 9 15 struct mlx5_core_dev *mdev) 10 16 { 11 - /* AF_XDP doesn't support frames larger than PAGE_SIZE, and the current 12 - * mlx5e XDP implementation doesn't support multiple packets per page. 13 - */ 14 - if (xsk->chunk_size != PAGE_SIZE) 17 + /* AF_XDP doesn't support frames larger than PAGE_SIZE. */ 18 + if (xsk->chunk_size > PAGE_SIZE || 19 + xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) 15 20 return false; 16 21 17 22 /* Current MTU and XSK headroom don't allow packets to fit the frames. */ 18 - if (mlx5e_rx_get_linear_frag_sz(params, xsk) > xsk->chunk_size) 23 + if (mlx5e_rx_get_min_frag_sz(params, xsk) > xsk->chunk_size) 19 24 return false; 20 25 21 26 /* frag_sz is different for regular and XSK RQs, so ensure that linear
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.c
··· 7 7 #include "en/params.h" 8 8 #include <net/xdp_sock.h> 9 9 10 - int mlx5e_xsk_async_xmit(struct net_device *dev, u32 qid) 10 + int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags) 11 11 { 12 12 struct mlx5e_priv *priv = netdev_priv(dev); 13 13 struct mlx5e_params *params = &priv->channels.params;
+13 -1
drivers/net/ethernet/mellanox/mlx5/core/en/xsk/tx.h
··· 5 5 #define __MLX5_EN_XSK_TX_H__ 6 6 7 7 #include "en.h" 8 + #include <net/xdp_sock.h> 8 9 9 10 /* TX data path */ 10 11 11 - int mlx5e_xsk_async_xmit(struct net_device *dev, u32 qid); 12 + int mlx5e_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags); 12 13 13 14 bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget); 15 + 16 + static inline void mlx5e_xsk_update_tx_wakeup(struct mlx5e_xdpsq *sq) 17 + { 18 + if (!xsk_umem_uses_need_wakeup(sq->umem)) 19 + return; 20 + 21 + if (sq->pc != sq->cc) 22 + xsk_clear_tx_need_wakeup(sq->umem); 23 + else 24 + xsk_set_tx_need_wakeup(sq->umem); 25 + } 14 26 15 27 #endif /* __MLX5_EN_XSK_TX_H__ */
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 4580 4580 .ndo_tx_timeout = mlx5e_tx_timeout, 4581 4581 .ndo_bpf = mlx5e_xdp, 4582 4582 .ndo_xdp_xmit = mlx5e_xdp_xmit, 4583 - .ndo_xsk_async_xmit = mlx5e_xsk_async_xmit, 4583 + .ndo_xsk_wakeup = mlx5e_xsk_wakeup, 4584 4584 #ifdef CONFIG_MLX5_EN_ARFS 4585 4585 .ndo_rx_flow_steer = mlx5e_rx_flow_steer, 4586 4586 #endif
+5 -2
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
··· 695 695 rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk; 696 696 rq->mpwqe.actual_wq_head = head; 697 697 698 - /* If XSK Fill Ring doesn't have enough frames, busy poll by 699 - * rescheduling the NAPI poll. 698 + /* If XSK Fill Ring doesn't have enough frames, report the error, so 699 + * that one of the actions can be performed: 700 + * 1. If need_wakeup is used, signal that the application has to kick 701 + * the driver when it refills the Fill Ring. 702 + * 2. Otherwise, busy poll by rescheduling the NAPI poll. 700 703 */ 701 704 if (unlikely(alloc_err == -ENOMEM && rq->umem)) 702 705 return true;
+25 -2
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
··· 33 33 #include <linux/irq.h> 34 34 #include "en.h" 35 35 #include "en/xdp.h" 36 + #include "en/xsk/rx.h" 36 37 #include "en/xsk/tx.h" 37 38 38 39 static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c) ··· 82 81 mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl); 83 82 } 84 83 84 + static bool mlx5e_napi_xsk_post(struct mlx5e_xdpsq *xsksq, struct mlx5e_rq *xskrq) 85 + { 86 + bool busy_xsk = false, xsk_rx_alloc_err; 87 + 88 + /* Handle the race between the application querying need_wakeup and the 89 + * driver setting it: 90 + * 1. Update need_wakeup both before and after the TX. If it goes to 91 + * "yes", it can only happen with the first update. 92 + * 2. If the application queried need_wakeup before we set it, the 93 + * packets will be transmitted anyway, even w/o a wakeup. 94 + * 3. Give a chance to clear need_wakeup after new packets were queued 95 + * for TX. 96 + */ 97 + mlx5e_xsk_update_tx_wakeup(xsksq); 98 + busy_xsk |= mlx5e_xsk_tx(xsksq, MLX5E_TX_XSK_POLL_BUDGET); 99 + mlx5e_xsk_update_tx_wakeup(xsksq); 100 + 101 + xsk_rx_alloc_err = xskrq->post_wqes(xskrq); 102 + busy_xsk |= mlx5e_xsk_update_rx_wakeup(xskrq, xsk_rx_alloc_err); 103 + 104 + return busy_xsk; 105 + } 106 + 85 107 int mlx5e_napi_poll(struct napi_struct *napi, int budget) 86 108 { 87 109 struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, ··· 146 122 if (xsk_open) { 147 123 mlx5e_poll_ico_cq(&c->xskicosq.cq); 148 124 busy |= mlx5e_poll_xdpsq_cq(&xsksq->cq); 149 - busy_xsk |= mlx5e_xsk_tx(xsksq, MLX5E_TX_XSK_POLL_BUDGET); 150 - busy_xsk |= xskrq->post_wqes(xskrq); 125 + busy_xsk |= mlx5e_napi_xsk_post(xsksq, xskrq); 151 126 } 152 127 153 128 busy |= busy_xsk;
+176 -11
drivers/net/ethernet/netronome/nfp/bpf/cmsg.c
··· 6 6 #include <linux/bug.h> 7 7 #include <linux/jiffies.h> 8 8 #include <linux/skbuff.h> 9 + #include <linux/timekeeping.h> 9 10 10 11 #include "../ccm.h" 11 12 #include "../nfp_app.h" ··· 176 175 return &reply->data[bpf->cmsg_key_sz * (n + 1) + bpf->cmsg_val_sz * n]; 177 176 } 178 177 178 + static bool nfp_bpf_ctrl_op_cache_invalidate(enum nfp_ccm_type op) 179 + { 180 + return op == NFP_CCM_TYPE_BPF_MAP_UPDATE || 181 + op == NFP_CCM_TYPE_BPF_MAP_DELETE; 182 + } 183 + 184 + static bool nfp_bpf_ctrl_op_cache_capable(enum nfp_ccm_type op) 185 + { 186 + return op == NFP_CCM_TYPE_BPF_MAP_LOOKUP || 187 + op == NFP_CCM_TYPE_BPF_MAP_GETNEXT; 188 + } 189 + 190 + static bool nfp_bpf_ctrl_op_cache_fill(enum nfp_ccm_type op) 191 + { 192 + return op == NFP_CCM_TYPE_BPF_MAP_GETFIRST || 193 + op == NFP_CCM_TYPE_BPF_MAP_GETNEXT; 194 + } 195 + 196 + static unsigned int 197 + nfp_bpf_ctrl_op_cache_get(struct nfp_bpf_map *nfp_map, enum nfp_ccm_type op, 198 + const u8 *key, u8 *out_key, u8 *out_value, 199 + u32 *cache_gen) 200 + { 201 + struct bpf_map *map = &nfp_map->offmap->map; 202 + struct nfp_app_bpf *bpf = nfp_map->bpf; 203 + unsigned int i, count, n_entries; 204 + struct cmsg_reply_map_op *reply; 205 + 206 + n_entries = nfp_bpf_ctrl_op_cache_fill(op) ? bpf->cmsg_cache_cnt : 1; 207 + 208 + spin_lock(&nfp_map->cache_lock); 209 + *cache_gen = nfp_map->cache_gen; 210 + if (nfp_map->cache_blockers) 211 + n_entries = 1; 212 + 213 + if (nfp_bpf_ctrl_op_cache_invalidate(op)) 214 + goto exit_block; 215 + if (!nfp_bpf_ctrl_op_cache_capable(op)) 216 + goto exit_unlock; 217 + 218 + if (!nfp_map->cache) 219 + goto exit_unlock; 220 + if (nfp_map->cache_to < ktime_get_ns()) 221 + goto exit_invalidate; 222 + 223 + reply = (void *)nfp_map->cache->data; 224 + count = be32_to_cpu(reply->count); 225 + 226 + for (i = 0; i < count; i++) { 227 + void *cached_key; 228 + 229 + cached_key = nfp_bpf_ctrl_reply_key(bpf, reply, i); 230 + if (memcmp(cached_key, key, map->key_size)) 231 + continue; 232 + 233 + if (op == NFP_CCM_TYPE_BPF_MAP_LOOKUP) 234 + memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, i), 235 + map->value_size); 236 + if (op == NFP_CCM_TYPE_BPF_MAP_GETNEXT) { 237 + if (i + 1 == count) 238 + break; 239 + 240 + memcpy(out_key, 241 + nfp_bpf_ctrl_reply_key(bpf, reply, i + 1), 242 + map->key_size); 243 + } 244 + 245 + n_entries = 0; 246 + goto exit_unlock; 247 + } 248 + goto exit_unlock; 249 + 250 + exit_block: 251 + nfp_map->cache_blockers++; 252 + exit_invalidate: 253 + dev_consume_skb_any(nfp_map->cache); 254 + nfp_map->cache = NULL; 255 + exit_unlock: 256 + spin_unlock(&nfp_map->cache_lock); 257 + return n_entries; 258 + } 259 + 260 + static void 261 + nfp_bpf_ctrl_op_cache_put(struct nfp_bpf_map *nfp_map, enum nfp_ccm_type op, 262 + struct sk_buff *skb, u32 cache_gen) 263 + { 264 + bool blocker, filler; 265 + 266 + blocker = nfp_bpf_ctrl_op_cache_invalidate(op); 267 + filler = nfp_bpf_ctrl_op_cache_fill(op); 268 + if (blocker || filler) { 269 + u64 to = 0; 270 + 271 + if (filler) 272 + to = ktime_get_ns() + NFP_BPF_MAP_CACHE_TIME_NS; 273 + 274 + spin_lock(&nfp_map->cache_lock); 275 + if (blocker) { 276 + nfp_map->cache_blockers--; 277 + nfp_map->cache_gen++; 278 + } 279 + if (filler && !nfp_map->cache_blockers && 280 + nfp_map->cache_gen == cache_gen) { 281 + nfp_map->cache_to = to; 282 + swap(nfp_map->cache, skb); 283 + } 284 + spin_unlock(&nfp_map->cache_lock); 285 + } 286 + 287 + dev_consume_skb_any(skb); 288 + } 289 + 179 290 static int 180 291 nfp_bpf_ctrl_entry_op(struct bpf_offloaded_map *offmap, enum nfp_ccm_type op, 181 292 u8 *key, u8 *value, u64 flags, u8 *out_key, u8 *out_value) 182 293 { 183 294 struct nfp_bpf_map *nfp_map = offmap->dev_priv; 295 + unsigned int n_entries, reply_entries, count; 184 296 struct nfp_app_bpf *bpf = nfp_map->bpf; 185 297 struct bpf_map *map = &offmap->map; 186 298 struct cmsg_reply_map_op *reply; 187 299 struct cmsg_req_map_op *req; 188 300 struct sk_buff *skb; 301 + u32 cache_gen; 189 302 int err; 190 303 191 304 /* FW messages have no space for more than 32 bits of flags */ 192 305 if (flags >> 32) 193 306 return -EOPNOTSUPP; 194 307 308 + /* Handle op cache */ 309 + n_entries = nfp_bpf_ctrl_op_cache_get(nfp_map, op, key, out_key, 310 + out_value, &cache_gen); 311 + if (!n_entries) 312 + return 0; 313 + 195 314 skb = nfp_bpf_cmsg_map_req_alloc(bpf, 1); 196 - if (!skb) 197 - return -ENOMEM; 315 + if (!skb) { 316 + err = -ENOMEM; 317 + goto err_cache_put; 318 + } 198 319 199 320 req = (void *)skb->data; 200 321 req->tid = cpu_to_be32(nfp_map->tid); 201 - req->count = cpu_to_be32(1); 322 + req->count = cpu_to_be32(n_entries); 202 323 req->flags = cpu_to_be32(flags); 203 324 204 325 /* Copy inputs */ ··· 330 207 memcpy(nfp_bpf_ctrl_req_val(bpf, req, 0), value, 331 208 map->value_size); 332 209 333 - skb = nfp_ccm_communicate(&bpf->ccm, skb, op, 334 - nfp_bpf_cmsg_map_reply_size(bpf, 1)); 335 - if (IS_ERR(skb)) 336 - return PTR_ERR(skb); 210 + skb = nfp_ccm_communicate(&bpf->ccm, skb, op, 0); 211 + if (IS_ERR(skb)) { 212 + err = PTR_ERR(skb); 213 + goto err_cache_put; 214 + } 215 + 216 + if (skb->len < sizeof(*reply)) { 217 + cmsg_warn(bpf, "cmsg drop - type 0x%02x too short %d!\n", 218 + op, skb->len); 219 + err = -EIO; 220 + goto err_free; 221 + } 337 222 338 223 reply = (void *)skb->data; 224 + count = be32_to_cpu(reply->count); 339 225 err = nfp_bpf_ctrl_rc_to_errno(bpf, &reply->reply_hdr); 226 + /* FW responds with message sized to hold the good entries, 227 + * plus one extra entry if there was an error. 228 + */ 229 + reply_entries = count + !!err; 230 + if (n_entries > 1 && count) 231 + err = 0; 340 232 if (err) 341 233 goto err_free; 234 + 235 + if (skb->len != nfp_bpf_cmsg_map_reply_size(bpf, reply_entries)) { 236 + cmsg_warn(bpf, "cmsg drop - type 0x%02x too short %d for %d entries!\n", 237 + op, skb->len, reply_entries); 238 + err = -EIO; 239 + goto err_free; 240 + } 342 241 343 242 /* Copy outputs */ 344 243 if (out_key) ··· 370 225 memcpy(out_value, nfp_bpf_ctrl_reply_val(bpf, reply, 0), 371 226 map->value_size); 372 227 373 - dev_consume_skb_any(skb); 228 + nfp_bpf_ctrl_op_cache_put(nfp_map, op, skb, cache_gen); 374 229 375 230 return 0; 376 231 err_free: 377 232 dev_kfree_skb_any(skb); 233 + err_cache_put: 234 + nfp_bpf_ctrl_op_cache_put(nfp_map, op, NULL, cache_gen); 378 235 return err; 379 236 } 380 237 ··· 414 267 key, NULL, 0, next_key, NULL); 415 268 } 416 269 270 + unsigned int nfp_bpf_ctrl_cmsg_min_mtu(struct nfp_app_bpf *bpf) 271 + { 272 + return max(nfp_bpf_cmsg_map_req_size(bpf, 1), 273 + nfp_bpf_cmsg_map_reply_size(bpf, 1)); 274 + } 275 + 417 276 unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf) 418 277 { 419 - return max3((unsigned int)NFP_NET_DEFAULT_MTU, 420 - nfp_bpf_cmsg_map_req_size(bpf, 1), 421 - nfp_bpf_cmsg_map_reply_size(bpf, 1)); 278 + return max3(NFP_NET_DEFAULT_MTU, 279 + nfp_bpf_cmsg_map_req_size(bpf, NFP_BPF_MAP_CACHE_CNT), 280 + nfp_bpf_cmsg_map_reply_size(bpf, NFP_BPF_MAP_CACHE_CNT)); 281 + } 282 + 283 + unsigned int nfp_bpf_ctrl_cmsg_cache_cnt(struct nfp_app_bpf *bpf) 284 + { 285 + unsigned int mtu, req_max, reply_max, entry_sz; 286 + 287 + mtu = bpf->app->ctrl->dp.mtu; 288 + entry_sz = bpf->cmsg_key_sz + bpf->cmsg_val_sz; 289 + req_max = (mtu - sizeof(struct cmsg_req_map_op)) / entry_sz; 290 + reply_max = (mtu - sizeof(struct cmsg_reply_map_op)) / entry_sz; 291 + 292 + return min3(req_max, reply_max, NFP_BPF_MAP_CACHE_CNT); 422 293 } 423 294 424 295 void nfp_bpf_ctrl_msg_rx(struct nfp_app *app, struct sk_buff *skb)
+1
drivers/net/ethernet/netronome/nfp/bpf/fw.h
··· 24 24 NFP_BPF_CAP_TYPE_QUEUE_SELECT = 5, 25 25 NFP_BPF_CAP_TYPE_ADJUST_TAIL = 6, 26 26 NFP_BPF_CAP_TYPE_ABI_VERSION = 7, 27 + NFP_BPF_CAP_TYPE_CMSG_MULTI_ENT = 8, 27 28 }; 28 29 29 30 struct nfp_bpf_cap_tlv_func {
+33
drivers/net/ethernet/netronome/nfp/bpf/main.c
··· 300 300 } 301 301 302 302 static int 303 + nfp_bpf_parse_cap_cmsg_multi_ent(struct nfp_app_bpf *bpf, void __iomem *value, 304 + u32 length) 305 + { 306 + bpf->cmsg_multi_ent = true; 307 + return 0; 308 + } 309 + 310 + static int 303 311 nfp_bpf_parse_cap_abi_version(struct nfp_app_bpf *bpf, void __iomem *value, 304 312 u32 length) 305 313 { ··· 383 375 length)) 384 376 goto err_release_free; 385 377 break; 378 + case NFP_BPF_CAP_TYPE_CMSG_MULTI_ENT: 379 + if (nfp_bpf_parse_cap_cmsg_multi_ent(app->priv, value, 380 + length)) 381 + goto err_release_free; 382 + break; 386 383 default: 387 384 nfp_dbg(cpp, "unknown BPF capability: %d\n", type); 388 385 break; ··· 426 413 struct nfp_app_bpf *bpf = app->priv; 427 414 428 415 bpf_offload_dev_netdev_unregister(bpf->bpf_dev, netdev); 416 + } 417 + 418 + static int nfp_bpf_start(struct nfp_app *app) 419 + { 420 + struct nfp_app_bpf *bpf = app->priv; 421 + 422 + if (app->ctrl->dp.mtu < nfp_bpf_ctrl_cmsg_min_mtu(bpf)) { 423 + nfp_err(bpf->app->cpp, 424 + "ctrl channel MTU below min required %u < %u\n", 425 + app->ctrl->dp.mtu, nfp_bpf_ctrl_cmsg_min_mtu(bpf)); 426 + return -EINVAL; 427 + } 428 + 429 + if (bpf->cmsg_multi_ent) 430 + bpf->cmsg_cache_cnt = nfp_bpf_ctrl_cmsg_cache_cnt(bpf); 431 + else 432 + bpf->cmsg_cache_cnt = 1; 433 + 434 + return 0; 429 435 } 430 436 431 437 static int nfp_bpf_init(struct nfp_app *app) ··· 520 488 521 489 .init = nfp_bpf_init, 522 490 .clean = nfp_bpf_clean, 491 + .start = nfp_bpf_start, 523 492 524 493 .check_mtu = nfp_bpf_check_mtu, 525 494
+24
drivers/net/ethernet/netronome/nfp/bpf/main.h
··· 99 99 * @maps_neutral: hash table of offload-neutral maps (on pointer) 100 100 * 101 101 * @abi_version: global BPF ABI version 102 + * @cmsg_cache_cnt: number of entries to read for caching 102 103 * 103 104 * @adjust_head: adjust head capability 104 105 * @adjust_head.flags: extra flags for adjust head ··· 125 124 * @pseudo_random: FW initialized the pseudo-random machinery (CSRs) 126 125 * @queue_select: BPF can set the RX queue ID in packet vector 127 126 * @adjust_tail: BPF can simply trunc packet size for adjust tail 127 + * @cmsg_multi_ent: FW can pack multiple map entries in a single cmsg 128 128 */ 129 129 struct nfp_app_bpf { 130 130 struct nfp_app *app; ··· 135 133 136 134 unsigned int cmsg_key_sz; 137 135 unsigned int cmsg_val_sz; 136 + 137 + unsigned int cmsg_cache_cnt; 138 138 139 139 struct list_head map_list; 140 140 unsigned int maps_in_use; ··· 173 169 bool pseudo_random; 174 170 bool queue_select; 175 171 bool adjust_tail; 172 + bool cmsg_multi_ent; 176 173 }; 177 174 178 175 enum nfp_bpf_map_use { ··· 188 183 unsigned char non_zero_update :1; 189 184 }; 190 185 186 + #define NFP_BPF_MAP_CACHE_CNT 4U 187 + #define NFP_BPF_MAP_CACHE_TIME_NS (250 * 1000) 188 + 191 189 /** 192 190 * struct nfp_bpf_map - private per-map data attached to BPF maps for offload 193 191 * @offmap: pointer to the offloaded BPF map 194 192 * @bpf: back pointer to bpf app private structure 195 193 * @tid: table id identifying map on datapath 194 + * 195 + * @cache_lock: protects @cache_blockers, @cache_to, @cache 196 + * @cache_blockers: number of ops in flight which block caching 197 + * @cache_gen: counter incremented by every blocker on exit 198 + * @cache_to: time when cache will no longer be valid (ns) 199 + * @cache: skb with cached response 200 + * 196 201 * @l: link on the nfp_app_bpf->map_list list 197 202 * @use_map: map of how the value is used (in 4B chunks) 198 203 */ ··· 210 195 struct bpf_offloaded_map *offmap; 211 196 struct nfp_app_bpf *bpf; 212 197 u32 tid; 198 + 199 + spinlock_t cache_lock; 200 + u32 cache_blockers; 201 + u32 cache_gen; 202 + u64 cache_to; 203 + struct sk_buff *cache; 204 + 213 205 struct list_head l; 214 206 struct nfp_bpf_map_word use_map[]; 215 207 }; ··· 586 564 587 565 void *nfp_bpf_relo_for_vnic(struct nfp_prog *nfp_prog, struct nfp_bpf_vnic *bv); 588 566 567 + unsigned int nfp_bpf_ctrl_cmsg_min_mtu(struct nfp_app_bpf *bpf); 589 568 unsigned int nfp_bpf_ctrl_cmsg_mtu(struct nfp_app_bpf *bpf); 569 + unsigned int nfp_bpf_ctrl_cmsg_cache_cnt(struct nfp_app_bpf *bpf); 590 570 long long int 591 571 nfp_bpf_ctrl_alloc_map(struct nfp_app_bpf *bpf, struct bpf_map *map); 592 572 void
+3
drivers/net/ethernet/netronome/nfp/bpf/offload.c
··· 385 385 offmap->dev_priv = nfp_map; 386 386 nfp_map->offmap = offmap; 387 387 nfp_map->bpf = bpf; 388 + spin_lock_init(&nfp_map->cache_lock); 388 389 389 390 res = nfp_bpf_ctrl_alloc_map(bpf, &offmap->map); 390 391 if (res < 0) { ··· 408 407 struct nfp_bpf_map *nfp_map = offmap->dev_priv; 409 408 410 409 nfp_bpf_ctrl_free_map(bpf, nfp_map); 410 + dev_consume_skb_any(nfp_map->cache); 411 + WARN_ON_ONCE(nfp_map->cache_blockers); 411 412 list_del_init(&nfp_map->l); 412 413 bpf->map_elems_in_use -= offmap->map.max_entries; 413 414 bpf->maps_in_use--;
+1 -1
drivers/net/ethernet/netronome/nfp/nfp_net.h
··· 66 66 #define NFP_NET_MAX_DMA_BITS 40 67 67 68 68 /* Default size for MTU and freelist buffer sizes */ 69 - #define NFP_NET_DEFAULT_MTU 1500 69 + #define NFP_NET_DEFAULT_MTU 1500U 70 70 71 71 /* Maximum number of bytes prepended to a packet */ 72 72 #define NFP_NET_MAX_PREPEND 64
+1 -8
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
··· 4116 4116 4117 4117 /* Set default MTU and Freelist buffer size */ 4118 4118 if (!nfp_net_is_data_vnic(nn) && nn->app->ctrl_mtu) { 4119 - if (nn->app->ctrl_mtu <= nn->max_mtu) { 4120 - nn->dp.mtu = nn->app->ctrl_mtu; 4121 - } else { 4122 - if (nn->app->ctrl_mtu != NFP_APP_CTRL_MTU_MAX) 4123 - nn_warn(nn, "app requested MTU above max supported %u > %u\n", 4124 - nn->app->ctrl_mtu, nn->max_mtu); 4125 - nn->dp.mtu = nn->max_mtu; 4126 - } 4119 + nn->dp.mtu = min(nn->app->ctrl_mtu, nn->max_mtu); 4127 4120 } else if (nn->max_mtu < NFP_NET_DEFAULT_MTU) { 4128 4121 nn->dp.mtu = nn->max_mtu; 4129 4122 } else {
+5
include/linux/bpf.h
··· 24 24 struct btf; 25 25 struct btf_type; 26 26 27 + extern struct idr btf_idr; 28 + extern spinlock_t btf_idr_lock; 29 + 27 30 /* map is generic key/value storage optionally accesible by eBPF programs */ 28 31 struct bpf_map_ops { 29 32 /* funcs callable from userspace (via syscall) */ ··· 650 647 struct bpf_map *bpf_map_get_with_uref(u32 ufd); 651 648 struct bpf_map *__bpf_map_get(struct fd f); 652 649 struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); 650 + struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map, 651 + bool uref); 653 652 void bpf_map_put_with_uref(struct bpf_map *map); 654 653 void bpf_map_put(struct bpf_map *map); 655 654 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
+1
include/linux/bpf_verifier.h
··· 355 355 struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ 356 356 int stack_size; /* number of states to be processed */ 357 357 bool strict_alignment; /* perform strict pointer alignment checks */ 358 + bool test_state_freq; /* test verifier with different pruning frequency */ 358 359 struct bpf_verifier_state *cur_state; /* current verifier state */ 359 360 struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ 360 361 struct bpf_verifier_state_list *free_list;
+12 -2
include/linux/netdevice.h
··· 901 901 }; 902 902 }; 903 903 904 + /* Flags for ndo_xsk_wakeup. */ 905 + #define XDP_WAKEUP_RX (1 << 0) 906 + #define XDP_WAKEUP_TX (1 << 1) 907 + 904 908 #ifdef CONFIG_XFRM_OFFLOAD 905 909 struct xfrmdev_ops { 906 910 int (*xdo_dev_state_add) (struct xfrm_state *x); ··· 1231 1227 * that got dropped are freed/returned via xdp_return_frame(). 1232 1228 * Returns negative number, means general error invoking ndo, meaning 1233 1229 * no frames were xmit'ed and core-caller will free all frames. 1230 + * int (*ndo_xsk_wakeup)(struct net_device *dev, u32 queue_id, u32 flags); 1231 + * This function is used to wake up the softirq, ksoftirqd or kthread 1232 + * responsible for sending and/or receiving packets on a specific 1233 + * queue id bound to an AF_XDP socket. The flags field specifies if 1234 + * only RX, only Tx, or both should be woken up using the flags 1235 + * XDP_WAKEUP_RX and XDP_WAKEUP_TX. 1234 1236 * struct devlink_port *(*ndo_get_devlink_port)(struct net_device *dev); 1235 1237 * Get devlink port instance associated with a given netdev. 1236 1238 * Called with a reference on the netdevice and devlink locks only, ··· 1436 1426 int (*ndo_xdp_xmit)(struct net_device *dev, int n, 1437 1427 struct xdp_frame **xdp, 1438 1428 u32 flags); 1439 - int (*ndo_xsk_async_xmit)(struct net_device *dev, 1440 - u32 queue_id); 1429 + int (*ndo_xsk_wakeup)(struct net_device *dev, 1430 + u32 queue_id, u32 flags); 1441 1431 struct devlink_port * (*ndo_get_devlink_port)(struct net_device *dev); 1442 1432 }; 1443 1433
+6
include/linux/tnum.h
··· 5 5 * propagate the unknown bits such that the tnum result represents all the 6 6 * possible results for possible values of the operands. 7 7 */ 8 + 9 + #ifndef _LINUX_TNUM_H 10 + #define _LINUX_TNUM_H 11 + 8 12 #include <linux/types.h> 9 13 10 14 struct tnum { ··· 85 81 int tnum_strn(char *str, size_t size, struct tnum a); 86 82 /* Format a tnum as tristate binary expansion */ 87 83 int tnum_sbin(char *str, size_t size, struct tnum a); 84 + 85 + #endif /* _LINUX_TNUM_H */
+10
include/net/bpf_sk_storage.h
··· 10 10 extern const struct bpf_func_proto bpf_sk_storage_get_proto; 11 11 extern const struct bpf_func_proto bpf_sk_storage_delete_proto; 12 12 13 + #ifdef CONFIG_BPF_SYSCALL 14 + int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk); 15 + #else 16 + static inline int bpf_sk_storage_clone(const struct sock *sk, 17 + struct sock *newsk) 18 + { 19 + return 0; 20 + } 21 + #endif 22 + 13 23 #endif /* _BPF_SK_STORAGE_H */
+119 -3
include/net/xdp_sock.h
··· 16 16 struct net_device; 17 17 struct xsk_queue; 18 18 19 + /* Masks for xdp_umem_page flags. 20 + * The low 12-bits of the addr will be 0 since this is the page address, so we 21 + * can use them for flags. 22 + */ 23 + #define XSK_NEXT_PG_CONTIG_SHIFT 0 24 + #define XSK_NEXT_PG_CONTIG_MASK (1ULL << XSK_NEXT_PG_CONTIG_SHIFT) 25 + 19 26 struct xdp_umem_page { 20 27 void *addr; 21 28 dma_addr_t dma; ··· 33 26 u32 length; 34 27 u64 handles[]; 35 28 }; 29 + 30 + /* Flags for the umem flags field. 31 + * 32 + * The NEED_WAKEUP flag is 1 due to the reuse of the flags field for public 33 + * flags. See inlude/uapi/include/linux/if_xdp.h. 34 + */ 35 + #define XDP_UMEM_USES_NEED_WAKEUP (1 << 1) 36 36 37 37 struct xdp_umem { 38 38 struct xsk_queue *fq; ··· 55 41 struct work_struct work; 56 42 struct page **pgs; 57 43 u32 npgs; 44 + u16 queue_id; 45 + u8 need_wakeup; 46 + u8 flags; 58 47 int id; 59 48 struct net_device *dev; 60 49 struct xdp_umem_fq_reuse *fq_reuse; 61 - u16 queue_id; 62 50 bool zc; 63 51 spinlock_t xsk_list_lock; 64 52 struct list_head xsk_list; 53 + }; 54 + 55 + /* Nodes are linked in the struct xdp_sock map_list field, and used to 56 + * track which maps a certain socket reside in. 57 + */ 58 + struct xsk_map; 59 + struct xsk_map_node { 60 + struct list_head node; 61 + struct xsk_map *map; 62 + struct xdp_sock **map_entry; 65 63 }; 66 64 67 65 struct xdp_sock { ··· 101 75 /* Protects generic receive. */ 102 76 spinlock_t rx_lock; 103 77 u64 rx_dropped; 78 + struct list_head map_list; 79 + /* Protects map_list */ 80 + spinlock_t map_list_lock; 104 81 }; 105 82 106 83 struct xdp_buff; ··· 124 95 struct xdp_umem_fq_reuse *newq); 125 96 void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq); 126 97 struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id); 98 + void xsk_set_rx_need_wakeup(struct xdp_umem *umem); 99 + void xsk_set_tx_need_wakeup(struct xdp_umem *umem); 100 + void xsk_clear_rx_need_wakeup(struct xdp_umem *umem); 101 + void xsk_clear_tx_need_wakeup(struct xdp_umem *umem); 102 + bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem); 103 + 104 + void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs, 105 + struct xdp_sock **map_entry); 106 + int xsk_map_inc(struct xsk_map *map); 107 + void xsk_map_put(struct xsk_map *map); 108 + 109 + static inline u64 xsk_umem_extract_addr(u64 addr) 110 + { 111 + return addr & XSK_UNALIGNED_BUF_ADDR_MASK; 112 + } 113 + 114 + static inline u64 xsk_umem_extract_offset(u64 addr) 115 + { 116 + return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT; 117 + } 118 + 119 + static inline u64 xsk_umem_add_offset_to_addr(u64 addr) 120 + { 121 + return xsk_umem_extract_addr(addr) + xsk_umem_extract_offset(addr); 122 + } 127 123 128 124 static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) 129 125 { 130 - return umem->pages[addr >> PAGE_SHIFT].addr + (addr & (PAGE_SIZE - 1)); 126 + unsigned long page_addr; 127 + 128 + addr = xsk_umem_add_offset_to_addr(addr); 129 + page_addr = (unsigned long)umem->pages[addr >> PAGE_SHIFT].addr; 130 + 131 + return (char *)(page_addr & PAGE_MASK) + (addr & ~PAGE_MASK); 131 132 } 132 133 133 134 static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr) 134 135 { 135 - return umem->pages[addr >> PAGE_SHIFT].dma + (addr & (PAGE_SIZE - 1)); 136 + addr = xsk_umem_add_offset_to_addr(addr); 137 + 138 + return umem->pages[addr >> PAGE_SHIFT].dma + (addr & ~PAGE_MASK); 136 139 } 137 140 138 141 /* Reuse-queue aware version of FILL queue helpers */ ··· 204 143 struct xdp_umem_fq_reuse *rq = umem->fq_reuse; 205 144 206 145 rq->handles[rq->length++] = addr; 146 + } 147 + 148 + /* Handle the offset appropriately depending on aligned or unaligned mode. 149 + * For unaligned mode, we store the offset in the upper 16-bits of the address. 150 + * For aligned mode, we simply add the offset to the address. 151 + */ 152 + static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 address, 153 + u64 offset) 154 + { 155 + if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) 156 + return address + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT); 157 + else 158 + return address + offset; 207 159 } 208 160 #else 209 161 static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) ··· 287 213 return NULL; 288 214 } 289 215 216 + static inline u64 xsk_umem_extract_addr(u64 addr) 217 + { 218 + return 0; 219 + } 220 + 221 + static inline u64 xsk_umem_extract_offset(u64 addr) 222 + { 223 + return 0; 224 + } 225 + 226 + static inline u64 xsk_umem_add_offset_to_addr(u64 addr) 227 + { 228 + return 0; 229 + } 230 + 290 231 static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr) 291 232 { 292 233 return NULL; ··· 328 239 329 240 static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr) 330 241 { 242 + } 243 + 244 + static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem) 245 + { 246 + } 247 + 248 + static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem) 249 + { 250 + } 251 + 252 + static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem) 253 + { 254 + } 255 + 256 + static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem) 257 + { 258 + } 259 + 260 + static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem) 261 + { 262 + return false; 263 + } 264 + 265 + static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle, 266 + u64 offset) 267 + { 268 + return 0; 331 269 } 332 270 333 271 #endif /* CONFIG_XDP_SOCKETS */
+12 -3
include/uapi/linux/bpf.h
··· 106 106 BPF_TASK_FD_QUERY, 107 107 BPF_MAP_LOOKUP_AND_DELETE_ELEM, 108 108 BPF_MAP_FREEZE, 109 + BPF_BTF_GET_NEXT_ID, 109 110 }; 110 111 111 112 enum bpf_map_type { ··· 285 284 */ 286 285 #define BPF_F_TEST_RND_HI32 (1U << 2) 287 286 287 + /* The verifier internal test flag. Behavior is undefined */ 288 + #define BPF_F_TEST_STATE_FREQ (1U << 3) 289 + 288 290 /* When BPF ldimm64's insn[0].src_reg != 0 then this can have 289 291 * two extensions: 290 292 * ··· 340 336 /* Flags for accessing BPF object from program side. */ 341 337 #define BPF_F_RDONLY_PROG (1U << 7) 342 338 #define BPF_F_WRONLY_PROG (1U << 8) 339 + 340 + /* Clone map from listener for newly accepted socket */ 341 + #define BPF_F_CLONE (1U << 9) 343 342 344 343 /* flags for BPF_PROG_QUERY */ 345 344 #define BPF_F_QUERY_EFFECTIVE (1U << 0) ··· 583 576 * limited to five). 584 577 * 585 578 * Each time the helper is called, it appends a line to the trace. 579 + * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is 580 + * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this. 586 581 * The format of the trace is customizable, and the exact output 587 582 * one will get depends on the options set in 588 583 * *\/sys/kernel/debug/tracing/trace_options* (see also the ··· 1023 1014 * The realm of the route for the packet associated to *skb*, or 0 1024 1015 * if none was found. 1025 1016 * 1026 - * int bpf_perf_event_output(struct pt_reg *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) 1017 + * int bpf_perf_event_output(struct pt_regs *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) 1027 1018 * Description 1028 1019 * Write raw *data* blob into a special BPF perf event held by 1029 1020 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf ··· 1085 1076 * Return 1086 1077 * 0 on success, or a negative error in case of failure. 1087 1078 * 1088 - * int bpf_get_stackid(struct pt_reg *ctx, struct bpf_map *map, u64 flags) 1079 + * int bpf_get_stackid(struct pt_regs *ctx, struct bpf_map *map, u64 flags) 1089 1080 * Description 1090 1081 * Walk a user or a kernel stack and return its id. To achieve 1091 1082 * this, the helper needs *ctx*, which is a pointer to the context ··· 1734 1725 * Return 1735 1726 * 0 on success, or a negative error in case of failure. 1736 1727 * 1737 - * int bpf_override_return(struct pt_reg *regs, u64 rc) 1728 + * int bpf_override_return(struct pt_regs *regs, u64 rc) 1738 1729 * Description 1739 1730 * Used for error injection, this helper uses kprobes to override 1740 1731 * the return value of the probed function, and to set it to *rc*.
+22
include/uapi/linux/if_xdp.h
··· 16 16 #define XDP_SHARED_UMEM (1 << 0) 17 17 #define XDP_COPY (1 << 1) /* Force copy-mode */ 18 18 #define XDP_ZEROCOPY (1 << 2) /* Force zero-copy mode */ 19 + /* If this option is set, the driver might go sleep and in that case 20 + * the XDP_RING_NEED_WAKEUP flag in the fill and/or Tx rings will be 21 + * set. If it is set, the application need to explicitly wake up the 22 + * driver with a poll() (Rx and Tx) or sendto() (Tx only). If you are 23 + * running the driver and the application on the same core, you should 24 + * use this option so that the kernel will yield to the user space 25 + * application. 26 + */ 27 + #define XDP_USE_NEED_WAKEUP (1 << 3) 28 + 29 + /* Flags for xsk_umem_config flags */ 30 + #define XDP_UMEM_UNALIGNED_CHUNK_FLAG (1 << 0) 19 31 20 32 struct sockaddr_xdp { 21 33 __u16 sxdp_family; ··· 37 25 __u32 sxdp_shared_umem_fd; 38 26 }; 39 27 28 + /* XDP_RING flags */ 29 + #define XDP_RING_NEED_WAKEUP (1 << 0) 30 + 40 31 struct xdp_ring_offset { 41 32 __u64 producer; 42 33 __u64 consumer; 43 34 __u64 desc; 35 + __u64 flags; 44 36 }; 45 37 46 38 struct xdp_mmap_offsets { ··· 69 53 __u64 len; /* Length of packet data area */ 70 54 __u32 chunk_size; 71 55 __u32 headroom; 56 + __u32 flags; 72 57 }; 73 58 74 59 struct xdp_statistics { ··· 90 73 #define XDP_PGOFF_TX_RING 0x80000000 91 74 #define XDP_UMEM_PGOFF_FILL_RING 0x100000000ULL 92 75 #define XDP_UMEM_PGOFF_COMPLETION_RING 0x180000000ULL 76 + 77 + /* Masks for unaligned chunks mode */ 78 + #define XSK_UNALIGNED_BUF_OFFSET_SHIFT 48 79 + #define XSK_UNALIGNED_BUF_ADDR_MASK \ 80 + ((1ULL << XSK_UNALIGNED_BUF_OFFSET_SHIFT) - 1) 93 81 94 82 /* Rx/Tx descriptor */ 95 83 struct xdp_desc {
+14 -2
kernel/bpf/btf.c
··· 195 195 i < btf_type_vlen(struct_type); \ 196 196 i++, member++) 197 197 198 - static DEFINE_IDR(btf_idr); 199 - static DEFINE_SPINLOCK(btf_idr_lock); 198 + DEFINE_IDR(btf_idr); 199 + DEFINE_SPINLOCK(btf_idr_lock); 200 200 201 201 struct btf { 202 202 void *data; ··· 3376 3376 btf_type_ops(t)->seq_show(btf, t, type_id, obj, 0, m); 3377 3377 } 3378 3378 3379 + #ifdef CONFIG_PROC_FS 3380 + static void bpf_btf_show_fdinfo(struct seq_file *m, struct file *filp) 3381 + { 3382 + const struct btf *btf = filp->private_data; 3383 + 3384 + seq_printf(m, "btf_id:\t%u\n", btf->id); 3385 + } 3386 + #endif 3387 + 3379 3388 static int btf_release(struct inode *inode, struct file *filp) 3380 3389 { 3381 3390 btf_put(filp->private_data); ··· 3392 3383 } 3393 3384 3394 3385 const struct file_operations btf_fops = { 3386 + #ifdef CONFIG_PROC_FS 3387 + .show_fdinfo = bpf_btf_show_fdinfo, 3388 + #endif 3395 3389 .release = btf_release, 3396 3390 }; 3397 3391
+18 -3
kernel/bpf/syscall.c
··· 683 683 } 684 684 685 685 /* map_idr_lock should have been held */ 686 - static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map, 687 - bool uref) 686 + static struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, 687 + bool uref) 688 688 { 689 689 int refold; 690 690 ··· 703 703 704 704 return map; 705 705 } 706 + 707 + struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map, bool uref) 708 + { 709 + spin_lock_bh(&map_idr_lock); 710 + map = __bpf_map_inc_not_zero(map, uref); 711 + spin_unlock_bh(&map_idr_lock); 712 + 713 + return map; 714 + } 715 + EXPORT_SYMBOL_GPL(bpf_map_inc_not_zero); 706 716 707 717 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value) 708 718 { ··· 1629 1619 1630 1620 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | 1631 1621 BPF_F_ANY_ALIGNMENT | 1622 + BPF_F_TEST_STATE_FREQ | 1632 1623 BPF_F_TEST_RND_HI32)) 1633 1624 return -EINVAL; 1634 1625 ··· 2194 2183 spin_lock_bh(&map_idr_lock); 2195 2184 map = idr_find(&map_idr, id); 2196 2185 if (map) 2197 - map = bpf_map_inc_not_zero(map, true); 2186 + map = __bpf_map_inc_not_zero(map, true); 2198 2187 else 2199 2188 map = ERR_PTR(-ENOENT); 2200 2189 spin_unlock_bh(&map_idr_lock); ··· 2890 2879 case BPF_MAP_GET_NEXT_ID: 2891 2880 err = bpf_obj_get_next_id(&attr, uattr, 2892 2881 &map_idr, &map_idr_lock); 2882 + break; 2883 + case BPF_BTF_GET_NEXT_ID: 2884 + err = bpf_obj_get_next_id(&attr, uattr, 2885 + &btf_idr, &btf_idr_lock); 2893 2886 break; 2894 2887 case BPF_PROG_GET_FD_BY_ID: 2895 2888 err = bpf_prog_get_fd_by_id(&attr);
+2 -7
kernel/bpf/sysfs_btf.c
··· 30 30 31 31 static int __init btf_vmlinux_init(void) 32 32 { 33 - int err; 34 - 35 33 if (!_binary__btf_vmlinux_bin_start) 36 34 return 0; 37 35 38 36 btf_kobj = kobject_create_and_add("btf", kernel_kobj); 39 - if (IS_ERR(btf_kobj)) { 40 - err = PTR_ERR(btf_kobj); 41 - btf_kobj = NULL; 42 - return err; 43 - } 37 + if (!btf_kobj) 38 + return -ENOMEM; 44 39 45 40 bin_attr_btf_vmlinux.size = _binary__btf_vmlinux_bin_end - 46 41 _binary__btf_vmlinux_bin_start;
+4 -1
kernel/bpf/verifier.c
··· 7223 7223 struct bpf_verifier_state_list *sl, **pprev; 7224 7224 struct bpf_verifier_state *cur = env->cur_state, *new; 7225 7225 int i, j, err, states_cnt = 0; 7226 - bool add_new_state = false; 7226 + bool add_new_state = env->test_state_freq ? true : false; 7227 7227 7228 7228 cur->last_insn_idx = env->prev_insn_idx; 7229 7229 if (!env->insn_aux_data[insn_idx].prune_point) ··· 9262 9262 env->strict_alignment = false; 9263 9263 9264 9264 env->allow_ptr_leaks = is_priv; 9265 + 9266 + if (is_priv) 9267 + env->test_state_freq = attr->prog_flags & BPF_F_TEST_STATE_FREQ; 9265 9268 9266 9269 ret = replace_map_fd_with_map_ptr(env); 9267 9270 if (ret < 0)
+111 -22
kernel/bpf/xskmap.c
··· 13 13 struct bpf_map map; 14 14 struct xdp_sock **xsk_map; 15 15 struct list_head __percpu *flush_list; 16 + spinlock_t lock; /* Synchronize map updates */ 16 17 }; 18 + 19 + int xsk_map_inc(struct xsk_map *map) 20 + { 21 + struct bpf_map *m = &map->map; 22 + 23 + m = bpf_map_inc(m, false); 24 + return PTR_ERR_OR_ZERO(m); 25 + } 26 + 27 + void xsk_map_put(struct xsk_map *map) 28 + { 29 + bpf_map_put(&map->map); 30 + } 31 + 32 + static struct xsk_map_node *xsk_map_node_alloc(struct xsk_map *map, 33 + struct xdp_sock **map_entry) 34 + { 35 + struct xsk_map_node *node; 36 + int err; 37 + 38 + node = kzalloc(sizeof(*node), GFP_ATOMIC | __GFP_NOWARN); 39 + if (!node) 40 + return NULL; 41 + 42 + err = xsk_map_inc(map); 43 + if (err) { 44 + kfree(node); 45 + return ERR_PTR(err); 46 + } 47 + 48 + node->map = map; 49 + node->map_entry = map_entry; 50 + return node; 51 + } 52 + 53 + static void xsk_map_node_free(struct xsk_map_node *node) 54 + { 55 + xsk_map_put(node->map); 56 + kfree(node); 57 + } 58 + 59 + static void xsk_map_sock_add(struct xdp_sock *xs, struct xsk_map_node *node) 60 + { 61 + spin_lock_bh(&xs->map_list_lock); 62 + list_add_tail(&node->node, &xs->map_list); 63 + spin_unlock_bh(&xs->map_list_lock); 64 + } 65 + 66 + static void xsk_map_sock_delete(struct xdp_sock *xs, 67 + struct xdp_sock **map_entry) 68 + { 69 + struct xsk_map_node *n, *tmp; 70 + 71 + spin_lock_bh(&xs->map_list_lock); 72 + list_for_each_entry_safe(n, tmp, &xs->map_list, node) { 73 + if (map_entry == n->map_entry) { 74 + list_del(&n->node); 75 + xsk_map_node_free(n); 76 + } 77 + } 78 + spin_unlock_bh(&xs->map_list_lock); 79 + } 17 80 18 81 static struct bpf_map *xsk_map_alloc(union bpf_attr *attr) 19 82 { ··· 97 34 return ERR_PTR(-ENOMEM); 98 35 99 36 bpf_map_init_from_attr(&m->map, attr); 37 + spin_lock_init(&m->lock); 100 38 101 39 cost = (u64)m->map.max_entries * sizeof(struct xdp_sock *); 102 40 cost += sizeof(struct list_head) * num_possible_cpus(); ··· 135 71 static void xsk_map_free(struct bpf_map *map) 136 72 { 137 73 struct xsk_map *m = container_of(map, struct xsk_map, map); 138 - int i; 139 74 140 75 bpf_clear_redirect_map(map); 141 76 synchronize_net(); 142 - 143 - for (i = 0; i < map->max_entries; i++) { 144 - struct xdp_sock *xs; 145 - 146 - xs = m->xsk_map[i]; 147 - if (!xs) 148 - continue; 149 - 150 - sock_put((struct sock *)xs); 151 - } 152 - 153 77 free_percpu(m->flush_list); 154 78 bpf_map_area_free(m->xsk_map); 155 79 kfree(m); ··· 216 164 u64 map_flags) 217 165 { 218 166 struct xsk_map *m = container_of(map, struct xsk_map, map); 167 + struct xdp_sock *xs, *old_xs, **map_entry; 219 168 u32 i = *(u32 *)key, fd = *(u32 *)value; 220 - struct xdp_sock *xs, *old_xs; 169 + struct xsk_map_node *node; 221 170 struct socket *sock; 222 171 int err; 223 172 ··· 226 173 return -EINVAL; 227 174 if (unlikely(i >= m->map.max_entries)) 228 175 return -E2BIG; 229 - if (unlikely(map_flags == BPF_NOEXIST)) 230 - return -EEXIST; 231 176 232 177 sock = sockfd_lookup(fd, &err); 233 178 if (!sock) ··· 243 192 return -EOPNOTSUPP; 244 193 } 245 194 246 - sock_hold(sock->sk); 195 + map_entry = &m->xsk_map[i]; 196 + node = xsk_map_node_alloc(m, map_entry); 197 + if (IS_ERR(node)) { 198 + sockfd_put(sock); 199 + return PTR_ERR(node); 200 + } 247 201 248 - old_xs = xchg(&m->xsk_map[i], xs); 202 + spin_lock_bh(&m->lock); 203 + old_xs = READ_ONCE(*map_entry); 204 + if (old_xs == xs) { 205 + err = 0; 206 + goto out; 207 + } else if (old_xs && map_flags == BPF_NOEXIST) { 208 + err = -EEXIST; 209 + goto out; 210 + } else if (!old_xs && map_flags == BPF_EXIST) { 211 + err = -ENOENT; 212 + goto out; 213 + } 214 + xsk_map_sock_add(xs, node); 215 + WRITE_ONCE(*map_entry, xs); 249 216 if (old_xs) 250 - sock_put((struct sock *)old_xs); 251 - 217 + xsk_map_sock_delete(old_xs, map_entry); 218 + spin_unlock_bh(&m->lock); 252 219 sockfd_put(sock); 253 220 return 0; 221 + 222 + out: 223 + spin_unlock_bh(&m->lock); 224 + sockfd_put(sock); 225 + xsk_map_node_free(node); 226 + return err; 254 227 } 255 228 256 229 static int xsk_map_delete_elem(struct bpf_map *map, void *key) 257 230 { 258 231 struct xsk_map *m = container_of(map, struct xsk_map, map); 259 - struct xdp_sock *old_xs; 232 + struct xdp_sock *old_xs, **map_entry; 260 233 int k = *(u32 *)key; 261 234 262 235 if (k >= map->max_entries) 263 236 return -EINVAL; 264 237 265 - old_xs = xchg(&m->xsk_map[k], NULL); 238 + spin_lock_bh(&m->lock); 239 + map_entry = &m->xsk_map[k]; 240 + old_xs = xchg(map_entry, NULL); 266 241 if (old_xs) 267 - sock_put((struct sock *)old_xs); 242 + xsk_map_sock_delete(old_xs, map_entry); 243 + spin_unlock_bh(&m->lock); 268 244 269 245 return 0; 246 + } 247 + 248 + void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs, 249 + struct xdp_sock **map_entry) 250 + { 251 + spin_lock_bh(&map->lock); 252 + if (READ_ONCE(*map_entry) == xs) { 253 + WRITE_ONCE(*map_entry, NULL); 254 + xsk_map_sock_delete(xs, map_entry); 255 + } 256 + spin_unlock_bh(&map->lock); 270 257 } 271 258 272 259 const struct bpf_map_ops xsk_map_ops = {
+2 -1
kernel/trace/Kconfig
··· 520 520 bool 521 521 default y 522 522 help 523 - This allows the user to attach BPF programs to kprobe events. 523 + This allows the user to attach BPF programs to kprobe, uprobe, and 524 + tracepoint events. 524 525 525 526 config DYNAMIC_EVENTS 526 527 def_bool n
+1 -1
lib/test_bpf.c
··· 867 867 }, 868 868 CLASSIC, 869 869 { }, 870 - { { 4, 10 ^ 300 }, { 20, 10 ^ 300 } }, 870 + { { 4, 0xA ^ 300 }, { 20, 0xA ^ 300 } }, 871 871 }, 872 872 { 873 873 "SPILL_FILL",
+101 -3
net/core/bpf_sk_storage.c
··· 12 12 13 13 static atomic_t cache_idx; 14 14 15 + #define SK_STORAGE_CREATE_FLAG_MASK \ 16 + (BPF_F_NO_PREALLOC | BPF_F_CLONE) 17 + 15 18 struct bucket { 16 19 struct hlist_head list; 17 20 raw_spinlock_t lock; ··· 212 209 kfree_rcu(sk_storage, rcu); 213 210 } 214 211 215 - /* sk_storage->lock must be held and sk_storage->list cannot be empty */ 216 212 static void __selem_link_sk(struct bpf_sk_storage *sk_storage, 217 213 struct bpf_sk_storage_elem *selem) 218 214 { ··· 511 509 return 0; 512 510 } 513 511 514 - /* Called by __sk_destruct() */ 512 + /* Called by __sk_destruct() & bpf_sk_storage_clone() */ 515 513 void bpf_sk_storage_free(struct sock *sk) 516 514 { 517 515 struct bpf_sk_storage_elem *selem; ··· 559 557 560 558 smap = (struct bpf_sk_storage_map *)map; 561 559 560 + /* Note that this map might be concurrently cloned from 561 + * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone 562 + * RCU read section to finish before proceeding. New RCU 563 + * read sections should be prevented via bpf_map_inc_not_zero. 564 + */ 562 565 synchronize_rcu(); 563 566 564 567 /* bpf prog and the userspace can no longer access this map ··· 608 601 609 602 static int bpf_sk_storage_map_alloc_check(union bpf_attr *attr) 610 603 { 611 - if (attr->map_flags != BPF_F_NO_PREALLOC || attr->max_entries || 604 + if (attr->map_flags & ~SK_STORAGE_CREATE_FLAG_MASK || 605 + !(attr->map_flags & BPF_F_NO_PREALLOC) || 606 + attr->max_entries || 612 607 attr->key_size != sizeof(int) || !attr->value_size || 613 608 /* Enforce BTF for userspace sk dumping */ 614 609 !attr->btf_key_type_id || !attr->btf_value_type_id) ··· 746 737 } 747 738 748 739 return err; 740 + } 741 + 742 + static struct bpf_sk_storage_elem * 743 + bpf_sk_storage_clone_elem(struct sock *newsk, 744 + struct bpf_sk_storage_map *smap, 745 + struct bpf_sk_storage_elem *selem) 746 + { 747 + struct bpf_sk_storage_elem *copy_selem; 748 + 749 + copy_selem = selem_alloc(smap, newsk, NULL, true); 750 + if (!copy_selem) 751 + return NULL; 752 + 753 + if (map_value_has_spin_lock(&smap->map)) 754 + copy_map_value_locked(&smap->map, SDATA(copy_selem)->data, 755 + SDATA(selem)->data, true); 756 + else 757 + copy_map_value(&smap->map, SDATA(copy_selem)->data, 758 + SDATA(selem)->data); 759 + 760 + return copy_selem; 761 + } 762 + 763 + int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk) 764 + { 765 + struct bpf_sk_storage *new_sk_storage = NULL; 766 + struct bpf_sk_storage *sk_storage; 767 + struct bpf_sk_storage_elem *selem; 768 + int ret = 0; 769 + 770 + RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL); 771 + 772 + rcu_read_lock(); 773 + sk_storage = rcu_dereference(sk->sk_bpf_storage); 774 + 775 + if (!sk_storage || hlist_empty(&sk_storage->list)) 776 + goto out; 777 + 778 + hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) { 779 + struct bpf_sk_storage_elem *copy_selem; 780 + struct bpf_sk_storage_map *smap; 781 + struct bpf_map *map; 782 + 783 + smap = rcu_dereference(SDATA(selem)->smap); 784 + if (!(smap->map.map_flags & BPF_F_CLONE)) 785 + continue; 786 + 787 + /* Note that for lockless listeners adding new element 788 + * here can race with cleanup in bpf_sk_storage_map_free. 789 + * Try to grab map refcnt to make sure that it's still 790 + * alive and prevent concurrent removal. 791 + */ 792 + map = bpf_map_inc_not_zero(&smap->map, false); 793 + if (IS_ERR(map)) 794 + continue; 795 + 796 + copy_selem = bpf_sk_storage_clone_elem(newsk, smap, selem); 797 + if (!copy_selem) { 798 + ret = -ENOMEM; 799 + bpf_map_put(map); 800 + goto out; 801 + } 802 + 803 + if (new_sk_storage) { 804 + selem_link_map(smap, copy_selem); 805 + __selem_link_sk(new_sk_storage, copy_selem); 806 + } else { 807 + ret = sk_storage_alloc(newsk, smap, copy_selem); 808 + if (ret) { 809 + kfree(copy_selem); 810 + atomic_sub(smap->elem_size, 811 + &newsk->sk_omem_alloc); 812 + bpf_map_put(map); 813 + goto out; 814 + } 815 + 816 + new_sk_storage = rcu_dereference(copy_selem->sk_storage); 817 + } 818 + bpf_map_put(map); 819 + } 820 + 821 + out: 822 + rcu_read_unlock(); 823 + 824 + /* In case of an error, don't free anything explicitly here, the 825 + * caller is responsible to call bpf_sk_storage_free. 826 + */ 827 + 828 + return ret; 749 829 } 750 830 751 831 BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
+13 -2
net/core/dev.c
··· 8126 8126 bpf_chk = generic_xdp_install; 8127 8127 8128 8128 if (fd >= 0) { 8129 + u32 prog_id; 8130 + 8129 8131 if (!offload && __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG)) { 8130 8132 NL_SET_ERR_MSG(extack, "native and generic XDP can't be active at the same time"); 8131 8133 return -EEXIST; 8132 8134 } 8133 - if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && 8134 - __dev_xdp_query(dev, bpf_op, query)) { 8135 + 8136 + prog_id = __dev_xdp_query(dev, bpf_op, query); 8137 + if ((flags & XDP_FLAGS_UPDATE_IF_NOEXIST) && prog_id) { 8135 8138 NL_SET_ERR_MSG(extack, "XDP program already attached"); 8136 8139 return -EBUSY; 8137 8140 } ··· 8149 8146 bpf_prog_put(prog); 8150 8147 return -EINVAL; 8151 8148 } 8149 + 8150 + if (prog->aux->id == prog_id) { 8151 + bpf_prog_put(prog); 8152 + return 0; 8153 + } 8154 + } else { 8155 + if (!__dev_xdp_query(dev, bpf_op, query)) 8156 + return 0; 8152 8157 } 8153 8158 8154 8159 err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
+1 -1
net/core/filter.c
··· 5903 5903 default: 5904 5904 return -EPROTONOSUPPORT; 5905 5905 } 5906 - if (mss <= 0) 5906 + if (mss == 0) 5907 5907 return -ENOENT; 5908 5908 5909 5909 return cookie | ((u64)mss << 32);
+6 -3
net/core/sock.c
··· 1851 1851 goto out; 1852 1852 } 1853 1853 RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL); 1854 - #ifdef CONFIG_BPF_SYSCALL 1855 - RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL); 1856 - #endif 1854 + 1855 + if (bpf_sk_storage_clone(sk, newsk)) { 1856 + sk_free_unlock_clone(newsk); 1857 + newsk = NULL; 1858 + goto out; 1859 + } 1857 1860 1858 1861 newsk->sk_err = 0; 1859 1862 newsk->sk_err_soft = 0;
+54 -13
net/xdp/xdp_umem.c
··· 14 14 #include <linux/netdevice.h> 15 15 #include <linux/rtnetlink.h> 16 16 #include <linux/idr.h> 17 - #include <linux/highmem.h> 17 + #include <linux/vmalloc.h> 18 18 19 19 #include "xdp_umem.h" 20 20 #include "xsk_queue.h" ··· 106 106 umem->dev = dev; 107 107 umem->queue_id = queue_id; 108 108 109 + if (flags & XDP_USE_NEED_WAKEUP) { 110 + umem->flags |= XDP_UMEM_USES_NEED_WAKEUP; 111 + /* Tx needs to be explicitly woken up the first time. 112 + * Also for supporting drivers that do not implement this 113 + * feature. They will always have to call sendto(). 114 + */ 115 + xsk_set_tx_need_wakeup(umem); 116 + } 117 + 109 118 dev_hold(dev); 110 119 111 120 if (force_copy) 112 121 /* For copy-mode, we are done. */ 113 122 return 0; 114 123 115 - if (!dev->netdev_ops->ndo_bpf || 116 - !dev->netdev_ops->ndo_xsk_async_xmit) { 124 + if (!dev->netdev_ops->ndo_bpf || !dev->netdev_ops->ndo_xsk_wakeup) { 117 125 err = -EOPNOTSUPP; 118 126 goto err_unreg_umem; 119 127 } ··· 178 170 unsigned int i; 179 171 180 172 for (i = 0; i < umem->npgs; i++) 181 - kunmap(umem->pgs[i]); 173 + if (PageHighMem(umem->pgs[i])) 174 + vunmap(umem->pages[i].addr); 175 + } 176 + 177 + static int xdp_umem_map_pages(struct xdp_umem *umem) 178 + { 179 + unsigned int i; 180 + void *addr; 181 + 182 + for (i = 0; i < umem->npgs; i++) { 183 + if (PageHighMem(umem->pgs[i])) 184 + addr = vmap(&umem->pgs[i], 1, VM_MAP, PAGE_KERNEL); 185 + else 186 + addr = page_address(umem->pgs[i]); 187 + 188 + if (!addr) { 189 + xdp_umem_unmap_pages(umem); 190 + return -ENOMEM; 191 + } 192 + 193 + umem->pages[i].addr = addr; 194 + } 195 + 196 + return 0; 182 197 } 183 198 184 199 static void xdp_umem_unpin_pages(struct xdp_umem *umem) ··· 340 309 341 310 static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) 342 311 { 312 + bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; 343 313 u32 chunk_size = mr->chunk_size, headroom = mr->headroom; 344 314 unsigned int chunks, chunks_per_page; 345 315 u64 addr = mr->addr, size = mr->len; 346 - int size_chk, err, i; 316 + int size_chk, err; 347 317 348 318 if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) { 349 319 /* Strictly speaking we could support this, if: ··· 356 324 return -EINVAL; 357 325 } 358 326 359 - if (!is_power_of_2(chunk_size)) 327 + if (mr->flags & ~(XDP_UMEM_UNALIGNED_CHUNK_FLAG | 328 + XDP_UMEM_USES_NEED_WAKEUP)) 329 + return -EINVAL; 330 + 331 + if (!unaligned_chunks && !is_power_of_2(chunk_size)) 360 332 return -EINVAL; 361 333 362 334 if (!PAGE_ALIGNED(addr)) { ··· 377 341 if (chunks == 0) 378 342 return -EINVAL; 379 343 380 - chunks_per_page = PAGE_SIZE / chunk_size; 381 - if (chunks < chunks_per_page || chunks % chunks_per_page) 382 - return -EINVAL; 344 + if (!unaligned_chunks) { 345 + chunks_per_page = PAGE_SIZE / chunk_size; 346 + if (chunks < chunks_per_page || chunks % chunks_per_page) 347 + return -EINVAL; 348 + } 383 349 384 350 headroom = ALIGN(headroom, 64); 385 351 ··· 390 352 return -EINVAL; 391 353 392 354 umem->address = (unsigned long)addr; 393 - umem->chunk_mask = ~((u64)chunk_size - 1); 355 + umem->chunk_mask = unaligned_chunks ? XSK_UNALIGNED_BUF_ADDR_MASK 356 + : ~((u64)chunk_size - 1); 394 357 umem->size = size; 395 358 umem->headroom = headroom; 396 359 umem->chunk_size_nohr = chunk_size - headroom; 397 360 umem->npgs = size / PAGE_SIZE; 398 361 umem->pgs = NULL; 399 362 umem->user = NULL; 363 + umem->flags = mr->flags; 400 364 INIT_LIST_HEAD(&umem->xsk_list); 401 365 spin_lock_init(&umem->xsk_list_lock); 402 366 ··· 418 378 goto out_pin; 419 379 } 420 380 421 - for (i = 0; i < umem->npgs; i++) 422 - umem->pages[i].addr = kmap(umem->pgs[i]); 381 + err = xdp_umem_map_pages(umem); 382 + if (!err) 383 + return 0; 423 384 424 - return 0; 385 + kfree(umem->pages); 425 386 426 387 out_pin: 427 388 xdp_umem_unpin_pages(umem);
+293 -52
net/xdp/xsk.c
··· 45 45 46 46 u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr) 47 47 { 48 - return xskq_peek_addr(umem->fq, addr); 48 + return xskq_peek_addr(umem->fq, addr, umem); 49 49 } 50 50 EXPORT_SYMBOL(xsk_umem_peek_addr); 51 51 ··· 55 55 } 56 56 EXPORT_SYMBOL(xsk_umem_discard_addr); 57 57 58 + void xsk_set_rx_need_wakeup(struct xdp_umem *umem) 59 + { 60 + if (umem->need_wakeup & XDP_WAKEUP_RX) 61 + return; 62 + 63 + umem->fq->ring->flags |= XDP_RING_NEED_WAKEUP; 64 + umem->need_wakeup |= XDP_WAKEUP_RX; 65 + } 66 + EXPORT_SYMBOL(xsk_set_rx_need_wakeup); 67 + 68 + void xsk_set_tx_need_wakeup(struct xdp_umem *umem) 69 + { 70 + struct xdp_sock *xs; 71 + 72 + if (umem->need_wakeup & XDP_WAKEUP_TX) 73 + return; 74 + 75 + rcu_read_lock(); 76 + list_for_each_entry_rcu(xs, &umem->xsk_list, list) { 77 + xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; 78 + } 79 + rcu_read_unlock(); 80 + 81 + umem->need_wakeup |= XDP_WAKEUP_TX; 82 + } 83 + EXPORT_SYMBOL(xsk_set_tx_need_wakeup); 84 + 85 + void xsk_clear_rx_need_wakeup(struct xdp_umem *umem) 86 + { 87 + if (!(umem->need_wakeup & XDP_WAKEUP_RX)) 88 + return; 89 + 90 + umem->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP; 91 + umem->need_wakeup &= ~XDP_WAKEUP_RX; 92 + } 93 + EXPORT_SYMBOL(xsk_clear_rx_need_wakeup); 94 + 95 + void xsk_clear_tx_need_wakeup(struct xdp_umem *umem) 96 + { 97 + struct xdp_sock *xs; 98 + 99 + if (!(umem->need_wakeup & XDP_WAKEUP_TX)) 100 + return; 101 + 102 + rcu_read_lock(); 103 + list_for_each_entry_rcu(xs, &umem->xsk_list, list) { 104 + xs->tx->ring->flags &= ~XDP_RING_NEED_WAKEUP; 105 + } 106 + rcu_read_unlock(); 107 + 108 + umem->need_wakeup &= ~XDP_WAKEUP_TX; 109 + } 110 + EXPORT_SYMBOL(xsk_clear_tx_need_wakeup); 111 + 112 + bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem) 113 + { 114 + return umem->flags & XDP_UMEM_USES_NEED_WAKEUP; 115 + } 116 + EXPORT_SYMBOL(xsk_umem_uses_need_wakeup); 117 + 118 + /* If a buffer crosses a page boundary, we need to do 2 memcpy's, one for 119 + * each page. This is only required in copy mode. 120 + */ 121 + static void __xsk_rcv_memcpy(struct xdp_umem *umem, u64 addr, void *from_buf, 122 + u32 len, u32 metalen) 123 + { 124 + void *to_buf = xdp_umem_get_data(umem, addr); 125 + 126 + addr = xsk_umem_add_offset_to_addr(addr); 127 + if (xskq_crosses_non_contig_pg(umem, addr, len + metalen)) { 128 + void *next_pg_addr = umem->pages[(addr >> PAGE_SHIFT) + 1].addr; 129 + u64 page_start = addr & ~(PAGE_SIZE - 1); 130 + u64 first_len = PAGE_SIZE - (addr - page_start); 131 + 132 + memcpy(to_buf, from_buf, first_len + metalen); 133 + memcpy(next_pg_addr, from_buf + first_len, len - first_len); 134 + 135 + return; 136 + } 137 + 138 + memcpy(to_buf, from_buf, len + metalen); 139 + } 140 + 58 141 static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) 59 142 { 60 - void *to_buf, *from_buf; 143 + u64 offset = xs->umem->headroom; 144 + u64 addr, memcpy_addr; 145 + void *from_buf; 61 146 u32 metalen; 62 - u64 addr; 63 147 int err; 64 148 65 - if (!xskq_peek_addr(xs->umem->fq, &addr) || 149 + if (!xskq_peek_addr(xs->umem->fq, &addr, xs->umem) || 66 150 len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) { 67 151 xs->rx_dropped++; 68 152 return -ENOSPC; 69 153 } 70 - 71 - addr += xs->umem->headroom; 72 154 73 155 if (unlikely(xdp_data_meta_unsupported(xdp))) { 74 156 from_buf = xdp->data; ··· 160 78 metalen = xdp->data - xdp->data_meta; 161 79 } 162 80 163 - to_buf = xdp_umem_get_data(xs->umem, addr); 164 - memcpy(to_buf, from_buf, len + metalen); 165 - addr += metalen; 81 + memcpy_addr = xsk_umem_adjust_offset(xs->umem, addr, offset); 82 + __xsk_rcv_memcpy(xs->umem, memcpy_addr, from_buf, len, metalen); 83 + 84 + offset += metalen; 85 + addr = xsk_umem_adjust_offset(xs->umem, addr, offset); 166 86 err = xskq_produce_batch_desc(xs->rx, addr, len); 167 87 if (!err) { 168 88 xskq_discard_addr(xs->umem->fq); ··· 186 102 return err; 187 103 } 188 104 105 + static bool xsk_is_bound(struct xdp_sock *xs) 106 + { 107 + if (READ_ONCE(xs->state) == XSK_BOUND) { 108 + /* Matches smp_wmb() in bind(). */ 109 + smp_rmb(); 110 + return true; 111 + } 112 + return false; 113 + } 114 + 189 115 int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 190 116 { 191 117 u32 len; 118 + 119 + if (!xsk_is_bound(xs)) 120 + return -EINVAL; 192 121 193 122 if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index) 194 123 return -EINVAL; ··· 222 125 { 223 126 u32 metalen = xdp->data - xdp->data_meta; 224 127 u32 len = xdp->data_end - xdp->data; 128 + u64 offset = xs->umem->headroom; 225 129 void *buffer; 226 130 u64 addr; 227 131 int err; ··· 234 136 goto out_unlock; 235 137 } 236 138 237 - if (!xskq_peek_addr(xs->umem->fq, &addr) || 139 + if (!xskq_peek_addr(xs->umem->fq, &addr, xs->umem) || 238 140 len > xs->umem->chunk_size_nohr - XDP_PACKET_HEADROOM) { 239 141 err = -ENOSPC; 240 142 goto out_drop; 241 143 } 242 144 243 - addr += xs->umem->headroom; 244 - 145 + addr = xsk_umem_adjust_offset(xs->umem, addr, offset); 245 146 buffer = xdp_umem_get_data(xs->umem, addr); 246 147 memcpy(buffer, xdp->data_meta, len + metalen); 247 - addr += metalen; 148 + 149 + addr = xsk_umem_adjust_offset(xs->umem, addr, metalen); 248 150 err = xskq_produce_batch_desc(xs->rx, addr, len); 249 151 if (err) 250 152 goto out_drop; ··· 288 190 289 191 rcu_read_lock(); 290 192 list_for_each_entry_rcu(xs, &umem->xsk_list, list) { 291 - if (!xskq_peek_desc(xs->tx, desc)) 193 + if (!xskq_peek_desc(xs->tx, desc, umem)) 292 194 continue; 293 195 294 196 if (xskq_produce_addr_lazy(umem->cq, desc->addr)) ··· 310 212 struct xdp_sock *xs = xdp_sk(sk); 311 213 struct net_device *dev = xs->dev; 312 214 313 - return dev->netdev_ops->ndo_xsk_async_xmit(dev, xs->queue_id); 215 + return dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, 216 + XDP_WAKEUP_TX); 314 217 } 315 218 316 219 static void xsk_destruct_skb(struct sk_buff *skb) ··· 342 243 if (xs->queue_id >= xs->dev->real_num_tx_queues) 343 244 goto out; 344 245 345 - while (xskq_peek_desc(xs->tx, &desc)) { 246 + while (xskq_peek_desc(xs->tx, &desc, xs->umem)) { 346 247 char *buffer; 347 248 u64 addr; 348 249 u32 len; ··· 371 272 skb->dev = xs->dev; 372 273 skb->priority = sk->sk_priority; 373 274 skb->mark = sk->sk_mark; 374 - skb_shinfo(skb)->destructor_arg = (void *)(long)addr; 275 + skb_shinfo(skb)->destructor_arg = (void *)(long)desc.addr; 375 276 skb->destructor = xsk_destruct_skb; 376 277 377 278 err = dev_direct_xmit(skb, xs->queue_id); ··· 400 301 struct sock *sk = sock->sk; 401 302 struct xdp_sock *xs = xdp_sk(sk); 402 303 403 - if (unlikely(!xs->dev)) 304 + if (unlikely(!xsk_is_bound(xs))) 404 305 return -ENXIO; 405 306 if (unlikely(!(xs->dev->flags & IFF_UP))) 406 307 return -ENETDOWN; ··· 416 317 struct poll_table_struct *wait) 417 318 { 418 319 unsigned int mask = datagram_poll(file, sock, wait); 419 - struct sock *sk = sock->sk; 420 - struct xdp_sock *xs = xdp_sk(sk); 320 + struct xdp_sock *xs = xdp_sk(sock->sk); 321 + struct net_device *dev; 322 + struct xdp_umem *umem; 323 + 324 + if (unlikely(!xsk_is_bound(xs))) 325 + return mask; 326 + 327 + dev = xs->dev; 328 + umem = xs->umem; 329 + 330 + if (umem->need_wakeup) 331 + dev->netdev_ops->ndo_xsk_wakeup(dev, xs->queue_id, 332 + umem->need_wakeup); 421 333 422 334 if (xs->rx && !xskq_empty_desc(xs->rx)) 423 335 mask |= POLLIN | POLLRDNORM; ··· 452 342 453 343 /* Make sure queue is ready before it can be seen by others */ 454 344 smp_wmb(); 455 - *queue = q; 345 + WRITE_ONCE(*queue, q); 456 346 return 0; 457 347 } 458 348 ··· 460 350 { 461 351 struct net_device *dev = xs->dev; 462 352 463 - if (!dev || xs->state != XSK_BOUND) 353 + if (xs->state != XSK_BOUND) 464 354 return; 465 - 466 - xs->state = XSK_UNBOUND; 355 + WRITE_ONCE(xs->state, XSK_UNBOUND); 467 356 468 357 /* Wait for driver to stop using the xdp socket. */ 469 358 xdp_del_sk_umem(xs->umem, xs); 470 359 xs->dev = NULL; 471 360 synchronize_net(); 472 361 dev_put(dev); 362 + } 363 + 364 + static struct xsk_map *xsk_get_map_list_entry(struct xdp_sock *xs, 365 + struct xdp_sock ***map_entry) 366 + { 367 + struct xsk_map *map = NULL; 368 + struct xsk_map_node *node; 369 + 370 + *map_entry = NULL; 371 + 372 + spin_lock_bh(&xs->map_list_lock); 373 + node = list_first_entry_or_null(&xs->map_list, struct xsk_map_node, 374 + node); 375 + if (node) { 376 + WARN_ON(xsk_map_inc(node->map)); 377 + map = node->map; 378 + *map_entry = node->map_entry; 379 + } 380 + spin_unlock_bh(&xs->map_list_lock); 381 + return map; 382 + } 383 + 384 + static void xsk_delete_from_maps(struct xdp_sock *xs) 385 + { 386 + /* This function removes the current XDP socket from all the 387 + * maps it resides in. We need to take extra care here, due to 388 + * the two locks involved. Each map has a lock synchronizing 389 + * updates to the entries, and each socket has a lock that 390 + * synchronizes access to the list of maps (map_list). For 391 + * deadlock avoidance the locks need to be taken in the order 392 + * "map lock"->"socket map list lock". We start off by 393 + * accessing the socket map list, and take a reference to the 394 + * map to guarantee existence between the 395 + * xsk_get_map_list_entry() and xsk_map_try_sock_delete() 396 + * calls. Then we ask the map to remove the socket, which 397 + * tries to remove the socket from the map. Note that there 398 + * might be updates to the map between 399 + * xsk_get_map_list_entry() and xsk_map_try_sock_delete(). 400 + */ 401 + struct xdp_sock **map_entry = NULL; 402 + struct xsk_map *map; 403 + 404 + while ((map = xsk_get_map_list_entry(xs, &map_entry))) { 405 + xsk_map_try_sock_delete(map, xs, map_entry); 406 + xsk_map_put(map); 407 + } 473 408 } 474 409 475 410 static int xsk_release(struct socket *sock) ··· 536 381 sock_prot_inuse_add(net, sk->sk_prot, -1); 537 382 local_bh_enable(); 538 383 384 + xsk_delete_from_maps(xs); 385 + mutex_lock(&xs->mutex); 539 386 xsk_unbind_dev(xs); 387 + mutex_unlock(&xs->mutex); 540 388 541 389 xskq_destroy(xs->rx); 542 390 xskq_destroy(xs->tx); ··· 570 412 return sock; 571 413 } 572 414 415 + /* Check if umem pages are contiguous. 416 + * If zero-copy mode, use the DMA address to do the page contiguity check 417 + * For all other modes we use addr (kernel virtual address) 418 + * Store the result in the low bits of addr. 419 + */ 420 + static void xsk_check_page_contiguity(struct xdp_umem *umem, u32 flags) 421 + { 422 + struct xdp_umem_page *pgs = umem->pages; 423 + int i, is_contig; 424 + 425 + for (i = 0; i < umem->npgs - 1; i++) { 426 + is_contig = (flags & XDP_ZEROCOPY) ? 427 + (pgs[i].dma + PAGE_SIZE == pgs[i + 1].dma) : 428 + (pgs[i].addr + PAGE_SIZE == pgs[i + 1].addr); 429 + pgs[i].addr += is_contig << XSK_NEXT_PG_CONTIG_SHIFT; 430 + } 431 + } 432 + 573 433 static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) 574 434 { 575 435 struct sockaddr_xdp *sxdp = (struct sockaddr_xdp *)addr; ··· 603 427 return -EINVAL; 604 428 605 429 flags = sxdp->sxdp_flags; 606 - if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY)) 430 + if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY | 431 + XDP_USE_NEED_WAKEUP)) 607 432 return -EINVAL; 608 433 609 434 rtnl_lock(); ··· 631 454 struct xdp_sock *umem_xs; 632 455 struct socket *sock; 633 456 634 - if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY)) { 457 + if ((flags & XDP_COPY) || (flags & XDP_ZEROCOPY) || 458 + (flags & XDP_USE_NEED_WAKEUP)) { 635 459 /* Cannot specify flags for shared sockets. */ 636 460 err = -EINVAL; 637 461 goto out_unlock; ··· 651 473 } 652 474 653 475 umem_xs = xdp_sk(sock->sk); 654 - if (!umem_xs->umem) { 655 - /* No umem to inherit. */ 476 + if (!xsk_is_bound(umem_xs)) { 656 477 err = -EBADF; 657 478 sockfd_put(sock); 658 479 goto out_unlock; 659 - } else if (umem_xs->dev != dev || umem_xs->queue_id != qid) { 480 + } 481 + if (umem_xs->dev != dev || umem_xs->queue_id != qid) { 660 482 err = -EINVAL; 661 483 sockfd_put(sock); 662 484 goto out_unlock; 663 485 } 664 486 665 487 xdp_get_umem(umem_xs->umem); 666 - xs->umem = umem_xs->umem; 488 + WRITE_ONCE(xs->umem, umem_xs->umem); 667 489 sockfd_put(sock); 668 490 } else if (!xs->umem || !xdp_umem_validate_queues(xs->umem)) { 669 491 err = -EINVAL; ··· 678 500 err = xdp_umem_assign_dev(xs->umem, dev, qid, flags); 679 501 if (err) 680 502 goto out_unlock; 503 + 504 + xsk_check_page_contiguity(xs->umem, flags); 681 505 } 682 506 683 507 xs->dev = dev; ··· 690 510 xdp_add_sk_umem(xs->umem, xs); 691 511 692 512 out_unlock: 693 - if (err) 513 + if (err) { 694 514 dev_put(dev); 695 - else 696 - xs->state = XSK_BOUND; 515 + } else { 516 + /* Matches smp_rmb() in bind() for shared umem 517 + * sockets, and xsk_is_bound(). 518 + */ 519 + smp_wmb(); 520 + WRITE_ONCE(xs->state, XSK_BOUND); 521 + } 697 522 out_release: 698 523 mutex_unlock(&xs->mutex); 699 524 rtnl_unlock(); 700 525 return err; 701 526 } 527 + 528 + struct xdp_umem_reg_v1 { 529 + __u64 addr; /* Start of packet data area */ 530 + __u64 len; /* Length of packet data area */ 531 + __u32 chunk_size; 532 + __u32 headroom; 533 + }; 702 534 703 535 static int xsk_setsockopt(struct socket *sock, int level, int optname, 704 536 char __user *optval, unsigned int optlen) ··· 741 549 } 742 550 q = (optname == XDP_TX_RING) ? &xs->tx : &xs->rx; 743 551 err = xsk_init_queue(entries, q, false); 552 + if (!err && optname == XDP_TX_RING) 553 + /* Tx needs to be explicitly woken up the first time */ 554 + xs->tx->ring->flags |= XDP_RING_NEED_WAKEUP; 744 555 mutex_unlock(&xs->mutex); 745 556 return err; 746 557 } 747 558 case XDP_UMEM_REG: 748 559 { 749 - struct xdp_umem_reg mr; 560 + size_t mr_size = sizeof(struct xdp_umem_reg); 561 + struct xdp_umem_reg mr = {}; 750 562 struct xdp_umem *umem; 751 563 752 - if (copy_from_user(&mr, optval, sizeof(mr))) 564 + if (optlen < sizeof(struct xdp_umem_reg_v1)) 565 + return -EINVAL; 566 + else if (optlen < sizeof(mr)) 567 + mr_size = sizeof(struct xdp_umem_reg_v1); 568 + 569 + if (copy_from_user(&mr, optval, mr_size)) 753 570 return -EFAULT; 754 571 755 572 mutex_lock(&xs->mutex); ··· 775 574 776 575 /* Make sure umem is ready before it can be seen by others */ 777 576 smp_wmb(); 778 - xs->umem = umem; 577 + WRITE_ONCE(xs->umem, umem); 779 578 mutex_unlock(&xs->mutex); 780 579 return 0; 781 580 } ··· 809 608 } 810 609 811 610 return -ENOPROTOOPT; 611 + } 612 + 613 + static void xsk_enter_rxtx_offsets(struct xdp_ring_offset_v1 *ring) 614 + { 615 + ring->producer = offsetof(struct xdp_rxtx_ring, ptrs.producer); 616 + ring->consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer); 617 + ring->desc = offsetof(struct xdp_rxtx_ring, desc); 618 + } 619 + 620 + static void xsk_enter_umem_offsets(struct xdp_ring_offset_v1 *ring) 621 + { 622 + ring->producer = offsetof(struct xdp_umem_ring, ptrs.producer); 623 + ring->consumer = offsetof(struct xdp_umem_ring, ptrs.consumer); 624 + ring->desc = offsetof(struct xdp_umem_ring, desc); 812 625 } 813 626 814 627 static int xsk_getsockopt(struct socket *sock, int level, int optname, ··· 864 649 case XDP_MMAP_OFFSETS: 865 650 { 866 651 struct xdp_mmap_offsets off; 652 + struct xdp_mmap_offsets_v1 off_v1; 653 + bool flags_supported = true; 654 + void *to_copy; 867 655 868 - if (len < sizeof(off)) 656 + if (len < sizeof(off_v1)) 869 657 return -EINVAL; 658 + else if (len < sizeof(off)) 659 + flags_supported = false; 870 660 871 - off.rx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer); 872 - off.rx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer); 873 - off.rx.desc = offsetof(struct xdp_rxtx_ring, desc); 874 - off.tx.producer = offsetof(struct xdp_rxtx_ring, ptrs.producer); 875 - off.tx.consumer = offsetof(struct xdp_rxtx_ring, ptrs.consumer); 876 - off.tx.desc = offsetof(struct xdp_rxtx_ring, desc); 661 + if (flags_supported) { 662 + /* xdp_ring_offset is identical to xdp_ring_offset_v1 663 + * except for the flags field added to the end. 664 + */ 665 + xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) 666 + &off.rx); 667 + xsk_enter_rxtx_offsets((struct xdp_ring_offset_v1 *) 668 + &off.tx); 669 + xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) 670 + &off.fr); 671 + xsk_enter_umem_offsets((struct xdp_ring_offset_v1 *) 672 + &off.cr); 673 + off.rx.flags = offsetof(struct xdp_rxtx_ring, 674 + ptrs.flags); 675 + off.tx.flags = offsetof(struct xdp_rxtx_ring, 676 + ptrs.flags); 677 + off.fr.flags = offsetof(struct xdp_umem_ring, 678 + ptrs.flags); 679 + off.cr.flags = offsetof(struct xdp_umem_ring, 680 + ptrs.flags); 877 681 878 - off.fr.producer = offsetof(struct xdp_umem_ring, ptrs.producer); 879 - off.fr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer); 880 - off.fr.desc = offsetof(struct xdp_umem_ring, desc); 881 - off.cr.producer = offsetof(struct xdp_umem_ring, ptrs.producer); 882 - off.cr.consumer = offsetof(struct xdp_umem_ring, ptrs.consumer); 883 - off.cr.desc = offsetof(struct xdp_umem_ring, desc); 682 + len = sizeof(off); 683 + to_copy = &off; 684 + } else { 685 + xsk_enter_rxtx_offsets(&off_v1.rx); 686 + xsk_enter_rxtx_offsets(&off_v1.tx); 687 + xsk_enter_umem_offsets(&off_v1.fr); 688 + xsk_enter_umem_offsets(&off_v1.cr); 884 689 885 - len = sizeof(off); 886 - if (copy_to_user(optval, &off, len)) 690 + len = sizeof(off_v1); 691 + to_copy = &off_v1; 692 + } 693 + 694 + if (copy_to_user(optval, to_copy, len)) 887 695 return -EFAULT; 888 696 if (put_user(len, optlen)) 889 697 return -EFAULT; ··· 951 713 unsigned long pfn; 952 714 struct page *qpg; 953 715 954 - if (xs->state != XSK_READY) 716 + if (READ_ONCE(xs->state) != XSK_READY) 955 717 return -EBUSY; 956 718 957 719 if (offset == XDP_PGOFF_RX_RING) { ··· 1092 854 mutex_init(&xs->mutex); 1093 855 spin_lock_init(&xs->rx_lock); 1094 856 spin_lock_init(&xs->tx_completion_lock); 857 + 858 + INIT_LIST_HEAD(&xs->map_list); 859 + spin_lock_init(&xs->map_list_lock); 1095 860 1096 861 mutex_lock(&net->xdp.lock); 1097 862 sk_add_node_rcu(sk, &net->xdp.list);
+13
net/xdp/xsk.h
··· 4 4 #ifndef XSK_H_ 5 5 #define XSK_H_ 6 6 7 + struct xdp_ring_offset_v1 { 8 + __u64 producer; 9 + __u64 consumer; 10 + __u64 desc; 11 + }; 12 + 13 + struct xdp_mmap_offsets_v1 { 14 + struct xdp_ring_offset_v1 rx; 15 + struct xdp_ring_offset_v1 tx; 16 + struct xdp_ring_offset_v1 fr; 17 + struct xdp_ring_offset_v1 cr; 18 + }; 19 + 7 20 static inline struct xdp_sock *xdp_sk(struct sock *sk) 8 21 { 9 22 return (struct xdp_sock *)sk;
+4 -1
net/xdp/xsk_diag.c
··· 56 56 du.id = umem->id; 57 57 du.size = umem->size; 58 58 du.num_pages = umem->npgs; 59 - du.chunk_size = (__u32)(~umem->chunk_mask + 1); 59 + du.chunk_size = umem->chunk_size_nohr + umem->headroom; 60 60 du.headroom = umem->headroom; 61 61 du.ifindex = umem->dev ? umem->dev->ifindex : 0; 62 62 du.queue_id = umem->queue_id; ··· 97 97 msg->xdiag_ino = sk_ino; 98 98 sock_diag_save_cookie(sk, msg->xdiag_cookie); 99 99 100 + mutex_lock(&xs->mutex); 100 101 if ((req->xdiag_show & XDP_SHOW_INFO) && xsk_diag_put_info(xs, nlskb)) 101 102 goto out_nlmsg_trim; 102 103 ··· 118 117 sock_diag_put_meminfo(sk, nlskb, XDP_DIAG_MEMINFO)) 119 118 goto out_nlmsg_trim; 120 119 120 + mutex_unlock(&xs->mutex); 121 121 nlmsg_end(nlskb, nlh); 122 122 return 0; 123 123 124 124 out_nlmsg_trim: 125 + mutex_unlock(&xs->mutex); 125 126 nlmsg_cancel(nlskb, nlh); 126 127 return -EMSGSIZE; 127 128 }
+63 -8
net/xdp/xsk_queue.h
··· 16 16 struct xdp_ring { 17 17 u32 producer ____cacheline_aligned_in_smp; 18 18 u32 consumer ____cacheline_aligned_in_smp; 19 + u32 flags; 19 20 }; 20 21 21 22 /* Used for the RX and TX queues for packets */ ··· 134 133 135 134 /* UMEM queue */ 136 135 136 + static inline bool xskq_crosses_non_contig_pg(struct xdp_umem *umem, u64 addr, 137 + u64 length) 138 + { 139 + bool cross_pg = (addr & (PAGE_SIZE - 1)) + length > PAGE_SIZE; 140 + bool next_pg_contig = 141 + (unsigned long)umem->pages[(addr >> PAGE_SHIFT)].addr & 142 + XSK_NEXT_PG_CONTIG_MASK; 143 + 144 + return cross_pg && !next_pg_contig; 145 + } 146 + 137 147 static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr) 138 148 { 139 149 if (addr >= q->size) { ··· 155 143 return true; 156 144 } 157 145 158 - static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr) 146 + static inline bool xskq_is_valid_addr_unaligned(struct xsk_queue *q, u64 addr, 147 + u64 length, 148 + struct xdp_umem *umem) 149 + { 150 + u64 base_addr = xsk_umem_extract_addr(addr); 151 + 152 + addr = xsk_umem_add_offset_to_addr(addr); 153 + if (base_addr >= q->size || addr >= q->size || 154 + xskq_crosses_non_contig_pg(umem, addr, length)) { 155 + q->invalid_descs++; 156 + return false; 157 + } 158 + 159 + return true; 160 + } 161 + 162 + static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr, 163 + struct xdp_umem *umem) 159 164 { 160 165 while (q->cons_tail != q->cons_head) { 161 166 struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; 162 167 unsigned int idx = q->cons_tail & q->ring_mask; 163 168 164 169 *addr = READ_ONCE(ring->desc[idx]) & q->chunk_mask; 170 + 171 + if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) { 172 + if (xskq_is_valid_addr_unaligned(q, *addr, 173 + umem->chunk_size_nohr, 174 + umem)) 175 + return addr; 176 + goto out; 177 + } 178 + 165 179 if (xskq_is_valid_addr(q, *addr)) 166 180 return addr; 167 181 182 + out: 168 183 q->cons_tail++; 169 184 } 170 185 171 186 return NULL; 172 187 } 173 188 174 - static inline u64 *xskq_peek_addr(struct xsk_queue *q, u64 *addr) 189 + static inline u64 *xskq_peek_addr(struct xsk_queue *q, u64 *addr, 190 + struct xdp_umem *umem) 175 191 { 176 192 if (q->cons_tail == q->cons_head) { 177 193 smp_mb(); /* D, matches A */ ··· 210 170 smp_rmb(); 211 171 } 212 172 213 - return xskq_validate_addr(q, addr); 173 + return xskq_validate_addr(q, addr, umem); 214 174 } 215 175 216 176 static inline void xskq_discard_addr(struct xsk_queue *q) ··· 269 229 270 230 /* Rx/Tx queue */ 271 231 272 - static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d) 232 + static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d, 233 + struct xdp_umem *umem) 273 234 { 235 + if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) { 236 + if (!xskq_is_valid_addr_unaligned(q, d->addr, d->len, umem)) 237 + return false; 238 + 239 + if (d->len > umem->chunk_size_nohr || d->options) { 240 + q->invalid_descs++; 241 + return false; 242 + } 243 + 244 + return true; 245 + } 246 + 274 247 if (!xskq_is_valid_addr(q, d->addr)) 275 248 return false; 276 249 ··· 297 244 } 298 245 299 246 static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q, 300 - struct xdp_desc *desc) 247 + struct xdp_desc *desc, 248 + struct xdp_umem *umem) 301 249 { 302 250 while (q->cons_tail != q->cons_head) { 303 251 struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; 304 252 unsigned int idx = q->cons_tail & q->ring_mask; 305 253 306 254 *desc = READ_ONCE(ring->desc[idx]); 307 - if (xskq_is_valid_desc(q, desc)) 255 + if (xskq_is_valid_desc(q, desc, umem)) 308 256 return desc; 309 257 310 258 q->cons_tail++; ··· 315 261 } 316 262 317 263 static inline struct xdp_desc *xskq_peek_desc(struct xsk_queue *q, 318 - struct xdp_desc *desc) 264 + struct xdp_desc *desc, 265 + struct xdp_umem *umem) 319 266 { 320 267 if (q->cons_tail == q->cons_head) { 321 268 smp_mb(); /* D, matches A */ ··· 327 272 smp_rmb(); /* C, matches B */ 328 273 } 329 274 330 - return xskq_validate_desc(q, desc); 275 + return xskq_validate_desc(q, desc, umem); 331 276 } 332 277 333 278 static inline void xskq_discard_desc(struct xsk_queue *q)
+6
samples/bpf/syscall_nrs.c
··· 9 9 COMMENT("Linux system call numbers."); 10 10 SYSNR(__NR_write); 11 11 SYSNR(__NR_read); 12 + #ifdef __NR_mmap2 13 + SYSNR(__NR_mmap2); 14 + #endif 15 + #ifdef __NR_mmap 12 16 SYSNR(__NR_mmap); 17 + #endif 18 + 13 19 }
+15 -2
samples/bpf/tracex5_kern.c
··· 68 68 return 0; 69 69 } 70 70 71 - PROG(SYS__NR_mmap)(struct pt_regs *ctx) 71 + #ifdef __NR_mmap2 72 + PROG(SYS__NR_mmap2)(struct pt_regs *ctx) 72 73 { 73 - char fmt[] = "mmap\n"; 74 + char fmt[] = "mmap2\n"; 75 + 74 76 bpf_trace_printk(fmt, sizeof(fmt)); 75 77 return 0; 76 78 } 79 + #endif 80 + 81 + #ifdef __NR_mmap 82 + PROG(SYS__NR_mmap)(struct pt_regs *ctx) 83 + { 84 + char fmt[] = "mmap\n"; 85 + 86 + bpf_trace_printk(fmt, sizeof(fmt)); 87 + return 0; 88 + } 89 + #endif 77 90 78 91 char _license[] SEC("license") = "GPL"; 79 92 u32 _version SEC("version") = LINUX_VERSION_CODE;
+161 -88
samples/bpf/xdpsock_user.c
··· 67 67 static int opt_queue; 68 68 static int opt_poll; 69 69 static int opt_interval = 1; 70 + static u32 opt_xdp_bind_flags = XDP_USE_NEED_WAKEUP; 71 + static u32 opt_umem_flags; 72 + static int opt_unaligned_chunks; 73 + static int opt_mmap_flags; 70 74 static u32 opt_xdp_bind_flags; 71 75 static int opt_xsk_frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; 76 + static int opt_timeout = 1000; 77 + static bool opt_need_wakeup = true; 72 78 static __u32 prog_id; 73 79 74 80 struct xsk_umem_info { ··· 288 282 .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS, 289 283 .frame_size = opt_xsk_frame_size, 290 284 .frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM, 285 + .flags = opt_umem_flags 291 286 }; 287 + 292 288 int ret; 293 289 294 290 umem = calloc(1, sizeof(*umem)); ··· 299 291 300 292 ret = xsk_umem__create(&umem->umem, buffer, size, &umem->fq, &umem->cq, 301 293 &cfg); 294 + 302 295 if (ret) 303 296 exit_with_error(-ret); 304 297 ··· 361 352 {"zero-copy", no_argument, 0, 'z'}, 362 353 {"copy", no_argument, 0, 'c'}, 363 354 {"frame-size", required_argument, 0, 'f'}, 355 + {"no-need-wakeup", no_argument, 0, 'm'}, 356 + {"unaligned", no_argument, 0, 'u'}, 364 357 {0, 0, 0, 0} 365 358 }; 366 359 ··· 383 372 " -z, --zero-copy Force zero-copy mode.\n" 384 373 " -c, --copy Force copy mode.\n" 385 374 " -f, --frame-size=n Set the frame size (must be a power of two, default is %d).\n" 375 + " -m, --no-need-wakeup Turn off use of driver need wakeup flag.\n" 376 + " -f, --frame-size=n Set the frame size (must be a power of two in aligned mode, default is %d).\n" 377 + " -u, --unaligned Enable unaligned chunk placement\n" 386 378 "\n"; 387 379 fprintf(stderr, str, prog, XSK_UMEM__DEFAULT_FRAME_SIZE); 388 380 exit(EXIT_FAILURE); ··· 398 384 opterr = 0; 399 385 400 386 for (;;) { 401 - c = getopt_long(argc, argv, "Frtli:q:psSNn:czf:", long_options, 402 - &option_index); 387 + c = getopt_long(argc, argv, "Frtli:q:psSNn:czf:mu", 388 + long_options, &option_index); 403 389 if (c == -1) 404 390 break; 405 391 ··· 438 424 case 'c': 439 425 opt_xdp_bind_flags |= XDP_COPY; 440 426 break; 427 + case 'u': 428 + opt_umem_flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG; 429 + opt_unaligned_chunks = 1; 430 + opt_mmap_flags = MAP_HUGETLB; 431 + break; 441 432 case 'F': 442 433 opt_xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST; 443 434 break; 444 435 case 'f': 445 436 opt_xsk_frame_size = atoi(optarg); 437 + case 'm': 438 + opt_need_wakeup = false; 439 + opt_xdp_bind_flags &= ~XDP_USE_NEED_WAKEUP; 446 440 break; 441 + 447 442 default: 448 443 usage(basename(argv[0])); 449 444 } ··· 465 442 usage(basename(argv[0])); 466 443 } 467 444 468 - if (opt_xsk_frame_size & (opt_xsk_frame_size - 1)) { 445 + if ((opt_xsk_frame_size & (opt_xsk_frame_size - 1)) && 446 + !opt_unaligned_chunks) { 469 447 fprintf(stderr, "--frame-size=%d is not a power of two\n", 470 448 opt_xsk_frame_size); 471 449 usage(basename(argv[0])); ··· 483 459 exit_with_error(errno); 484 460 } 485 461 486 - static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk) 462 + static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk, 463 + struct pollfd *fds) 487 464 { 465 + struct xsk_umem_info *umem = xsk->umem; 488 466 u32 idx_cq = 0, idx_fq = 0; 489 467 unsigned int rcvd; 490 468 size_t ndescs; ··· 494 468 if (!xsk->outstanding_tx) 495 469 return; 496 470 497 - kick_tx(xsk); 471 + if (!opt_need_wakeup || xsk_ring_prod__needs_wakeup(&xsk->tx)) 472 + kick_tx(xsk); 473 + 498 474 ndescs = (xsk->outstanding_tx > BATCH_SIZE) ? BATCH_SIZE : 499 475 xsk->outstanding_tx; 500 476 501 477 /* re-add completed Tx buffers */ 502 - rcvd = xsk_ring_cons__peek(&xsk->umem->cq, ndescs, &idx_cq); 478 + rcvd = xsk_ring_cons__peek(&umem->cq, ndescs, &idx_cq); 503 479 if (rcvd > 0) { 504 480 unsigned int i; 505 481 int ret; 506 482 507 - ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq); 483 + ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); 508 484 while (ret != rcvd) { 509 485 if (ret < 0) 510 486 exit_with_error(-ret); 511 - ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, 512 - &idx_fq); 487 + if (xsk_ring_prod__needs_wakeup(&umem->fq)) 488 + ret = poll(fds, num_socks, opt_timeout); 489 + ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq); 513 490 } 491 + 514 492 for (i = 0; i < rcvd; i++) 515 - *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = 516 - *xsk_ring_cons__comp_addr(&xsk->umem->cq, 517 - idx_cq++); 493 + *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) = 494 + *xsk_ring_cons__comp_addr(&umem->cq, idx_cq++); 518 495 519 496 xsk_ring_prod__submit(&xsk->umem->fq, rcvd); 520 497 xsk_ring_cons__release(&xsk->umem->cq, rcvd); ··· 534 505 if (!xsk->outstanding_tx) 535 506 return; 536 507 537 - kick_tx(xsk); 508 + if (!opt_need_wakeup || xsk_ring_prod__needs_wakeup(&xsk->tx)) 509 + kick_tx(xsk); 538 510 539 511 rcvd = xsk_ring_cons__peek(&xsk->umem->cq, BATCH_SIZE, &idx); 540 512 if (rcvd > 0) { ··· 545 515 } 546 516 } 547 517 548 - static void rx_drop(struct xsk_socket_info *xsk) 518 + static void rx_drop(struct xsk_socket_info *xsk, struct pollfd *fds) 549 519 { 550 520 unsigned int rcvd, i; 551 521 u32 idx_rx = 0, idx_fq = 0; 552 522 int ret; 553 523 554 524 rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx); 555 - if (!rcvd) 525 + if (!rcvd) { 526 + if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) 527 + ret = poll(fds, num_socks, opt_timeout); 556 528 return; 529 + } 557 530 558 531 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq); 559 532 while (ret != rcvd) { 560 533 if (ret < 0) 561 534 exit_with_error(-ret); 535 + if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) 536 + ret = poll(fds, num_socks, opt_timeout); 562 537 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq); 563 538 } 564 539 565 540 for (i = 0; i < rcvd; i++) { 566 541 u64 addr = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx)->addr; 567 542 u32 len = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++)->len; 543 + u64 orig = xsk_umem__extract_addr(addr); 544 + 545 + addr = xsk_umem__add_offset_to_addr(addr); 568 546 char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr); 569 547 570 548 hex_dump(pkt, len, addr); 571 - *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = addr; 549 + *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = orig; 572 550 } 573 551 574 552 xsk_ring_prod__submit(&xsk->umem->fq, rcvd); ··· 587 549 static void rx_drop_all(void) 588 550 { 589 551 struct pollfd fds[MAX_SOCKS + 1]; 590 - int i, ret, timeout, nfds = 1; 552 + int i, ret; 591 553 592 554 memset(fds, 0, sizeof(fds)); 593 555 594 556 for (i = 0; i < num_socks; i++) { 595 557 fds[i].fd = xsk_socket__fd(xsks[i]->xsk); 596 558 fds[i].events = POLLIN; 597 - timeout = 1000; /* 1sn */ 598 559 } 599 560 600 561 for (;;) { 601 562 if (opt_poll) { 602 - ret = poll(fds, nfds, timeout); 563 + ret = poll(fds, num_socks, opt_timeout); 603 564 if (ret <= 0) 604 565 continue; 605 566 } 606 567 607 568 for (i = 0; i < num_socks; i++) 608 - rx_drop(xsks[i]); 569 + rx_drop(xsks[i], fds); 609 570 } 610 571 } 611 572 612 - static void tx_only(struct xsk_socket_info *xsk) 573 + static void tx_only(struct xsk_socket_info *xsk, u32 frame_nb) 613 574 { 614 - int timeout, ret, nfds = 1; 615 - struct pollfd fds[nfds + 1]; 616 - u32 idx, frame_nb = 0; 575 + u32 idx; 576 + 577 + if (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) == BATCH_SIZE) { 578 + unsigned int i; 579 + 580 + for (i = 0; i < BATCH_SIZE; i++) { 581 + xsk_ring_prod__tx_desc(&xsk->tx, idx + i)->addr = 582 + (frame_nb + i) << XSK_UMEM__DEFAULT_FRAME_SHIFT; 583 + xsk_ring_prod__tx_desc(&xsk->tx, idx + i)->len = 584 + sizeof(pkt_data) - 1; 585 + } 586 + 587 + xsk_ring_prod__submit(&xsk->tx, BATCH_SIZE); 588 + xsk->outstanding_tx += BATCH_SIZE; 589 + frame_nb += BATCH_SIZE; 590 + frame_nb %= NUM_FRAMES; 591 + } 592 + 593 + complete_tx_only(xsk); 594 + } 595 + 596 + static void tx_only_all(void) 597 + { 598 + struct pollfd fds[MAX_SOCKS]; 599 + u32 frame_nb[MAX_SOCKS] = {}; 600 + int i, ret; 617 601 618 602 memset(fds, 0, sizeof(fds)); 619 - fds[0].fd = xsk_socket__fd(xsk->xsk); 620 - fds[0].events = POLLOUT; 621 - timeout = 1000; /* 1sn */ 603 + for (i = 0; i < num_socks; i++) { 604 + fds[0].fd = xsk_socket__fd(xsks[i]->xsk); 605 + fds[0].events = POLLOUT; 606 + } 622 607 623 608 for (;;) { 624 609 if (opt_poll) { 625 - ret = poll(fds, nfds, timeout); 610 + ret = poll(fds, num_socks, opt_timeout); 626 611 if (ret <= 0) 627 612 continue; 628 613 ··· 653 592 continue; 654 593 } 655 594 656 - if (xsk_ring_prod__reserve(&xsk->tx, BATCH_SIZE, &idx) == 657 - BATCH_SIZE) { 658 - unsigned int i; 659 - 660 - for (i = 0; i < BATCH_SIZE; i++) { 661 - xsk_ring_prod__tx_desc(&xsk->tx, idx + i)->addr 662 - = (frame_nb + i) * opt_xsk_frame_size; 663 - xsk_ring_prod__tx_desc(&xsk->tx, idx + i)->len = 664 - sizeof(pkt_data) - 1; 665 - } 666 - 667 - xsk_ring_prod__submit(&xsk->tx, BATCH_SIZE); 668 - xsk->outstanding_tx += BATCH_SIZE; 669 - frame_nb += BATCH_SIZE; 670 - frame_nb %= NUM_FRAMES; 671 - } 672 - 673 - complete_tx_only(xsk); 595 + for (i = 0; i < num_socks; i++) 596 + tx_only(xsks[i], frame_nb[i]); 674 597 } 675 598 } 676 599 677 - static void l2fwd(struct xsk_socket_info *xsk) 600 + static void l2fwd(struct xsk_socket_info *xsk, struct pollfd *fds) 678 601 { 679 - for (;;) { 680 - unsigned int rcvd, i; 681 - u32 idx_rx = 0, idx_tx = 0; 682 - int ret; 602 + unsigned int rcvd, i; 603 + u32 idx_rx = 0, idx_tx = 0; 604 + int ret; 683 605 684 - for (;;) { 685 - complete_tx_l2fwd(xsk); 606 + complete_tx_l2fwd(xsk, fds); 686 607 687 - rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, 688 - &idx_rx); 689 - if (rcvd > 0) 690 - break; 691 - } 608 + rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx); 609 + if (!rcvd) { 610 + if (xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) 611 + ret = poll(fds, num_socks, opt_timeout); 612 + return; 613 + } 692 614 615 + ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx); 616 + while (ret != rcvd) { 617 + if (ret < 0) 618 + exit_with_error(-ret); 619 + if (xsk_ring_prod__needs_wakeup(&xsk->tx)) 620 + kick_tx(xsk); 693 621 ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx); 694 - while (ret != rcvd) { 695 - if (ret < 0) 696 - exit_with_error(-ret); 697 - ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx); 622 + } 623 + 624 + for (i = 0; i < rcvd; i++) { 625 + u64 addr = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx)->addr; 626 + u32 len = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++)->len; 627 + u64 orig = xsk_umem__extract_addr(addr); 628 + 629 + addr = xsk_umem__add_offset_to_addr(addr); 630 + char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr); 631 + 632 + swap_mac_addresses(pkt); 633 + 634 + hex_dump(pkt, len, addr); 635 + xsk_ring_prod__tx_desc(&xsk->tx, idx_tx)->addr = orig; 636 + xsk_ring_prod__tx_desc(&xsk->tx, idx_tx++)->len = len; 637 + } 638 + 639 + xsk_ring_prod__submit(&xsk->tx, rcvd); 640 + xsk_ring_cons__release(&xsk->rx, rcvd); 641 + 642 + xsk->rx_npkts += rcvd; 643 + xsk->outstanding_tx += rcvd; 644 + } 645 + 646 + static void l2fwd_all(void) 647 + { 648 + struct pollfd fds[MAX_SOCKS]; 649 + int i, ret; 650 + 651 + memset(fds, 0, sizeof(fds)); 652 + 653 + for (i = 0; i < num_socks; i++) { 654 + fds[i].fd = xsk_socket__fd(xsks[i]->xsk); 655 + fds[i].events = POLLOUT | POLLIN; 656 + } 657 + 658 + for (;;) { 659 + if (opt_poll) { 660 + ret = poll(fds, num_socks, opt_timeout); 661 + if (ret <= 0) 662 + continue; 698 663 } 699 664 700 - for (i = 0; i < rcvd; i++) { 701 - u64 addr = xsk_ring_cons__rx_desc(&xsk->rx, 702 - idx_rx)->addr; 703 - u32 len = xsk_ring_cons__rx_desc(&xsk->rx, 704 - idx_rx++)->len; 705 - char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr); 706 - 707 - swap_mac_addresses(pkt); 708 - 709 - hex_dump(pkt, len, addr); 710 - xsk_ring_prod__tx_desc(&xsk->tx, idx_tx)->addr = addr; 711 - xsk_ring_prod__tx_desc(&xsk->tx, idx_tx++)->len = len; 712 - } 713 - 714 - xsk_ring_prod__submit(&xsk->tx, rcvd); 715 - xsk_ring_cons__release(&xsk->rx, rcvd); 716 - 717 - xsk->rx_npkts += rcvd; 718 - xsk->outstanding_tx += rcvd; 665 + for (i = 0; i < num_socks; i++) 666 + l2fwd(xsks[i], fds); 719 667 } 720 668 } 721 669 ··· 744 674 exit(EXIT_FAILURE); 745 675 } 746 676 747 - ret = posix_memalign(&bufs, getpagesize(), /* PAGE_SIZE aligned */ 748 - NUM_FRAMES * opt_xsk_frame_size); 749 - if (ret) 750 - exit_with_error(ret); 751 - 677 + /* Reserve memory for the umem. Use hugepages if unaligned chunk mode */ 678 + bufs = mmap(NULL, NUM_FRAMES * opt_xsk_frame_size, 679 + PROT_READ | PROT_WRITE, 680 + MAP_PRIVATE | MAP_ANONYMOUS | opt_mmap_flags, -1, 0); 681 + if (bufs == MAP_FAILED) { 682 + printf("ERROR: mmap failed\n"); 683 + exit(EXIT_FAILURE); 684 + } 752 685 /* Create sockets... */ 753 686 umem = xsk_configure_umem(bufs, NUM_FRAMES * opt_xsk_frame_size); 754 687 xsks[num_socks++] = xsk_configure_socket(umem); ··· 778 705 if (opt_bench == BENCH_RXDROP) 779 706 rx_drop_all(); 780 707 else if (opt_bench == BENCH_TXONLY) 781 - tx_only(xsks[0]); 708 + tx_only_all(); 782 709 else 783 - l2fwd(xsks[0]); 710 + l2fwd_all(); 784 711 785 712 return 0; 786 713 }
+1
tools/bpf/.gitignore
··· 1 1 FEATURE-DUMP.bpf 2 + feature 2 3 bpf_asm 3 4 bpf_dbg 4 5 bpf_exp.yacc.*
+3 -2
tools/bpf/Makefile
··· 81 81 82 82 clean: bpftool_clean 83 83 $(call QUIET_CLEAN, bpf-progs) 84 - $(Q)rm -rf $(OUTPUT)*.o $(OUTPUT)bpf_jit_disasm $(OUTPUT)bpf_dbg \ 84 + $(Q)$(RM) -r -- $(OUTPUT)*.o $(OUTPUT)bpf_jit_disasm $(OUTPUT)bpf_dbg \ 85 85 $(OUTPUT)bpf_asm $(OUTPUT)bpf_exp.yacc.* $(OUTPUT)bpf_exp.lex.* 86 86 $(call QUIET_CLEAN, core-gen) 87 - $(Q)rm -f $(OUTPUT)FEATURE-DUMP.bpf 87 + $(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.bpf 88 + $(Q)$(RM) -r -- $(OUTPUT)feature 88 89 89 90 install: $(PROGS) bpftool_install 90 91 $(call QUIET_INSTALL, bpf_jit_disasm)
+2
tools/bpf/bpftool/.gitignore
··· 3 3 bpftool*.8 4 4 bpf-helpers.* 5 5 FEATURE-DUMP.bpftool 6 + feature 7 + libbpf
+7
tools/bpf/bpftool/Documentation/bpftool-btf.rst
··· 19 19 BTF COMMANDS 20 20 ============= 21 21 22 + | **bpftool** **btf** { **show** | **list** } [**id** *BTF_ID*] 22 23 | **bpftool** **btf dump** *BTF_SRC* [**format** *FORMAT*] 23 24 | **bpftool** **btf help** 24 25 | ··· 30 29 31 30 DESCRIPTION 32 31 =========== 32 + **bpftool btf { show | list }** [**id** *BTF_ID*] 33 + Show information about loaded BTF objects. If a BTF ID is 34 + specified, show information only about given BTF object, 35 + otherwise list all BTF objects currently loaded on the 36 + system. 37 + 33 38 **bpftool btf dump** *BTF_SRC* 34 39 Dump BTF entries from a given *BTF_SRC*. 35 40
+9
tools/bpf/bpftool/Documentation/bpftool-map.rst
··· 36 36 | **bpftool** **map pop** *MAP* 37 37 | **bpftool** **map enqueue** *MAP* **value** *VALUE* 38 38 | **bpftool** **map dequeue** *MAP* 39 + | **bpftool** **map freeze** *MAP* 39 40 | **bpftool** **map help** 40 41 | 41 42 | *MAP* := { **id** *MAP_ID* | **pinned** *FILE* } ··· 127 126 128 127 **bpftool map dequeue** *MAP* 129 128 Dequeue and print **value** from the queue. 129 + 130 + **bpftool map freeze** *MAP* 131 + Freeze the map as read-only from user space. Entries from a 132 + frozen map can not longer be updated or deleted with the 133 + **bpf\ ()** system call. This operation is not reversible, 134 + and the map remains immutable from user space until its 135 + destruction. However, read and write permissions for BPF 136 + programs to the map remain unchanged. 130 137 131 138 **bpftool map help** 132 139 Print short help message.
+54 -3
tools/bpf/bpftool/Documentation/bpftool-net.rst
··· 15 15 *OPTIONS* := { [{ **-j** | **--json** }] [{ **-p** | **--pretty** }] } 16 16 17 17 *COMMANDS* := 18 - { **show** | **list** } [ **dev** name ] | **help** 18 + { **show** | **list** | **attach** | **detach** | **help** } 19 19 20 20 NET COMMANDS 21 21 ============ 22 22 23 - | **bpftool** **net { show | list } [ dev name ]** 23 + | **bpftool** **net { show | list }** [ **dev** *NAME* ] 24 + | **bpftool** **net attach** *ATTACH_TYPE* *PROG* **dev** *NAME* [ **overwrite** ] 25 + | **bpftool** **net detach** *ATTACH_TYPE* **dev** *NAME* 24 26 | **bpftool** **net help** 27 + | 28 + | *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* } 29 + | *ATTACH_TYPE* := { **xdp** | **xdpgeneric** | **xdpdrv** | **xdpoffload** } 25 30 26 31 DESCRIPTION 27 32 =========== 28 - **bpftool net { show | list } [ dev name ]** 33 + **bpftool net { show | list }** [ **dev** *NAME* ] 29 34 List bpf program attachments in the kernel networking subsystem. 30 35 31 36 Currently, only device driver xdp attachments and tc filter ··· 51 46 the order will be first all bpf programs attached to tc classes, then 52 47 all bpf programs attached to non clsact qdiscs, and finally all 53 48 bpf programs attached to root and clsact qdisc. 49 + 50 + **bpftool** **net attach** *ATTACH_TYPE* *PROG* **dev** *NAME* [ **overwrite** ] 51 + Attach bpf program *PROG* to network interface *NAME* with 52 + type specified by *ATTACH_TYPE*. Previously attached bpf program 53 + can be replaced by the command used with **overwrite** option. 54 + Currently, only XDP-related modes are supported for *ATTACH_TYPE*. 55 + 56 + *ATTACH_TYPE* can be of: 57 + **xdp** - try native XDP and fallback to generic XDP if NIC driver does not support it; 58 + **xdpgeneric** - Generic XDP. runs at generic XDP hook when packet already enters receive path as skb; 59 + **xdpdrv** - Native XDP. runs earliest point in driver's receive path; 60 + **xdpoffload** - Offload XDP. runs directly on NIC on each packet reception; 61 + 62 + **bpftool** **net detach** *ATTACH_TYPE* **dev** *NAME* 63 + Detach bpf program attached to network interface *NAME* with 64 + type specified by *ATTACH_TYPE*. To detach bpf program, same 65 + *ATTACH_TYPE* previously used for attach must be specified. 66 + Currently, only XDP-related modes are supported for *ATTACH_TYPE*. 54 67 55 68 **bpftool net help** 56 69 Print short help message. ··· 159 136 ] 160 137 } 161 138 ] 139 + 140 + | 141 + | **# bpftool net attach xdpdrv id 16 dev enp6s0np0** 142 + | **# bpftool net** 143 + 144 + :: 145 + 146 + xdp: 147 + enp6s0np0(4) driver id 16 148 + 149 + | 150 + | **# bpftool net attach xdpdrv id 16 dev enp6s0np0** 151 + | **# bpftool net attach xdpdrv id 20 dev enp6s0np0 overwrite** 152 + | **# bpftool net** 153 + 154 + :: 155 + 156 + xdp: 157 + enp6s0np0(4) driver id 20 158 + 159 + | 160 + | **# bpftool net attach xdpdrv id 16 dev enp6s0np0** 161 + | **# bpftool net detach xdpdrv dev enp6s0np0** 162 + | **# bpftool net** 163 + 164 + :: 165 + 166 + xdp: 162 167 163 168 164 169 SEE ALSO
+18 -13
tools/bpf/bpftool/Makefile
··· 17 17 BPF_DIR = $(srctree)/tools/lib/bpf/ 18 18 19 19 ifneq ($(OUTPUT),) 20 - BPF_PATH = $(OUTPUT) 20 + LIBBPF_OUTPUT = $(OUTPUT)/libbpf/ 21 + LIBBPF_PATH = $(LIBBPF_OUTPUT) 21 22 else 22 - BPF_PATH = $(BPF_DIR) 23 + LIBBPF_PATH = $(BPF_DIR) 23 24 endif 24 25 25 - LIBBPF = $(BPF_PATH)libbpf.a 26 + LIBBPF = $(LIBBPF_PATH)libbpf.a 26 27 27 - BPFTOOL_VERSION := $(shell make --no-print-directory -sC ../../.. kernelversion) 28 + BPFTOOL_VERSION := $(shell make -rR --no-print-directory -sC ../../.. kernelversion) 28 29 29 30 $(LIBBPF): FORCE 30 - $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) $(OUTPUT)libbpf.a 31 + $(if $(LIBBPF_OUTPUT),@mkdir -p $(LIBBPF_OUTPUT)) 32 + $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_OUTPUT) $(LIBBPF_OUTPUT)libbpf.a 31 33 32 34 $(LIBBPF)-clean: 33 35 $(call QUIET_CLEAN, libbpf) 34 - $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(OUTPUT) clean >/dev/null 36 + $(Q)$(MAKE) -C $(BPF_DIR) OUTPUT=$(LIBBPF_OUTPUT) clean >/dev/null 35 37 36 38 prefix ?= /usr/local 37 39 bash_compdir ?= /usr/share/bash-completion/completions 38 40 39 41 CFLAGS += -O2 40 - CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wshadow -Wno-missing-field-initializers 42 + CFLAGS += -W -Wall -Wextra -Wno-unused-parameter -Wno-missing-field-initializers 43 + CFLAGS += $(filter-out -Wswitch-enum,$(EXTRA_WARNINGS)) 41 44 CFLAGS += -DPACKAGE='"bpftool"' -D__EXPORTED_HEADERS__ \ 42 45 -I$(srctree)/kernel/bpf/ \ 43 46 -I$(srctree)/tools/include \ ··· 55 52 LDFLAGS += $(EXTRA_LDFLAGS) 56 53 endif 57 54 58 - LIBS = -lelf -lz $(LIBBPF) 55 + LIBS = $(LIBBPF) -lelf -lz 59 56 60 57 INSTALL ?= install 61 58 RM ?= rm -f ··· 117 114 $(OUTPUT)feature.o: | zdep 118 115 119 116 $(OUTPUT)bpftool: $(OBJS) $(LIBBPF) 120 - $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $^ $(LIBS) 117 + $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) -o $@ $(OBJS) $(LIBS) 121 118 122 119 $(OUTPUT)%.o: %.c 123 120 $(QUIET_CC)$(COMPILE.c) -MMD -o $@ $< 124 121 125 122 clean: $(LIBBPF)-clean 126 123 $(call QUIET_CLEAN, bpftool) 127 - $(Q)$(RM) $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d 124 + $(Q)$(RM) -- $(OUTPUT)bpftool $(OUTPUT)*.o $(OUTPUT)*.d 125 + $(Q)$(RM) -r -- $(OUTPUT)libbpf/ 128 126 $(call QUIET_CLEAN, core-gen) 129 - $(Q)$(RM) $(OUTPUT)FEATURE-DUMP.bpftool 127 + $(Q)$(RM) -- $(OUTPUT)FEATURE-DUMP.bpftool 128 + $(Q)$(RM) -r -- $(OUTPUT)feature/ 130 129 131 130 install: $(OUTPUT)bpftool 132 131 $(call QUIET_INSTALL, bpftool) ··· 139 134 140 135 uninstall: 141 136 $(call QUIET_UNINST, bpftool) 142 - $(Q)$(RM) $(DESTDIR)$(prefix)/sbin/bpftool 143 - $(Q)$(RM) $(DESTDIR)$(bash_compdir)/bpftool 137 + $(Q)$(RM) -- $(DESTDIR)$(prefix)/sbin/bpftool 138 + $(Q)$(RM) -- $(DESTDIR)$(bash_compdir)/bpftool 144 139 145 140 doc: 146 141 $(call descend,Documentation)
+73 -16
tools/bpf/bpftool/bash-completion/bpftool
··· 73 73 74 74 _bpftool_get_btf_ids() 75 75 { 76 - COMPREPLY+=( $( compgen -W "$( bpftool -jp prog 2>&1 | \ 77 - command sed -n 's/.*"btf_id": \(.*\),\?$/\1/p' )" -- "$cur" ) ) 76 + COMPREPLY+=( $( compgen -W "$( bpftool -jp btf 2>&1 | \ 77 + command sed -n 's/.*"id": \(.*\),$/\1/p' )" -- "$cur" ) ) 78 78 } 79 79 80 80 _bpftool_get_obj_map_names() ··· 199 199 ;; 200 200 tag) 201 201 _bpftool_get_prog_tags 202 + return 0 203 + ;; 204 + dev) 205 + _sysfs_get_netdevs 202 206 return 0 203 207 ;; 204 208 file|pinned) ··· 403 399 _filedir 404 400 return 0 405 401 ;; 406 - dev) 407 - _sysfs_get_netdevs 408 - return 0 409 - ;; 410 402 *) 411 403 COMPREPLY=( $( compgen -W "map" -- "$cur" ) ) 412 404 _bpftool_once_attr 'type' ··· 449 449 map) 450 450 local MAP_TYPE='id pinned' 451 451 case $command in 452 - show|list|dump|peek|pop|dequeue) 452 + show|list|dump|peek|pop|dequeue|freeze) 453 453 case $prev in 454 454 $command) 455 455 COMPREPLY=( $( compgen -W "$MAP_TYPE" -- "$cur" ) ) ··· 496 496 return 0 497 497 ;; 498 498 key|value|flags|name|entries) 499 - return 0 500 - ;; 501 - dev) 502 - _sysfs_get_netdevs 503 499 return 0 504 500 ;; 505 501 *) ··· 638 642 [[ $prev == $object ]] && \ 639 643 COMPREPLY=( $( compgen -W 'delete dump getnext help \ 640 644 lookup pin event_pipe show list update create \ 641 - peek push enqueue pop dequeue' -- \ 645 + peek push enqueue pop dequeue freeze' -- \ 642 646 "$cur" ) ) 643 647 ;; 644 648 esac ··· 670 674 map) 671 675 _bpftool_get_map_ids 672 676 ;; 673 - dump) 677 + $command) 674 678 _bpftool_get_btf_ids 675 679 ;; 676 680 esac ··· 698 702 ;; 699 703 esac 700 704 ;; 705 + show|list) 706 + case $prev in 707 + $command) 708 + COMPREPLY+=( $( compgen -W "id" -- "$cur" ) ) 709 + ;; 710 + id) 711 + _bpftool_get_btf_ids 712 + ;; 713 + esac 714 + return 0 715 + ;; 701 716 *) 702 717 [[ $prev == $object ]] && \ 703 - COMPREPLY=( $( compgen -W 'dump help' -- "$cur" ) ) 718 + COMPREPLY=( $( compgen -W 'dump help show list' \ 719 + -- "$cur" ) ) 704 720 ;; 705 721 esac 706 722 ;; ··· 786 778 esac 787 779 ;; 788 780 net) 781 + local PROG_TYPE='id pinned tag' 782 + local ATTACH_TYPES='xdp xdpgeneric xdpdrv xdpoffload' 789 783 case $command in 784 + show|list) 785 + [[ $prev != "$command" ]] && return 0 786 + COMPREPLY=( $( compgen -W 'dev' -- "$cur" ) ) 787 + return 0 788 + ;; 789 + attach) 790 + case $cword in 791 + 3) 792 + COMPREPLY=( $( compgen -W "$ATTACH_TYPES" -- "$cur" ) ) 793 + return 0 794 + ;; 795 + 4) 796 + COMPREPLY=( $( compgen -W "$PROG_TYPE" -- "$cur" ) ) 797 + return 0 798 + ;; 799 + 5) 800 + case $prev in 801 + id) 802 + _bpftool_get_prog_ids 803 + ;; 804 + pinned) 805 + _filedir 806 + ;; 807 + esac 808 + return 0 809 + ;; 810 + 6) 811 + COMPREPLY=( $( compgen -W 'dev' -- "$cur" ) ) 812 + return 0 813 + ;; 814 + 8) 815 + _bpftool_once_attr 'overwrite' 816 + return 0 817 + ;; 818 + esac 819 + ;; 820 + detach) 821 + case $cword in 822 + 3) 823 + COMPREPLY=( $( compgen -W "$ATTACH_TYPES" -- "$cur" ) ) 824 + return 0 825 + ;; 826 + 4) 827 + COMPREPLY=( $( compgen -W 'dev' -- "$cur" ) ) 828 + return 0 829 + ;; 830 + esac 831 + ;; 790 832 *) 791 833 [[ $prev == $object ]] && \ 792 834 COMPREPLY=( $( compgen -W 'help \ 793 - show list' -- "$cur" ) ) 835 + show list attach detach' -- "$cur" ) ) 794 836 ;; 795 837 esac 796 838 ;; 797 839 feature) 798 840 case $command in 799 841 probe) 800 - [[ $prev == "dev" ]] && _sysfs_get_netdevs && return 0 801 842 [[ $prev == "prefix" ]] && return 0 802 843 if _bpftool_search_list 'macros'; then 803 844 COMPREPLY+=( $( compgen -W 'prefix' -- "$cur" ) )
+341 -3
tools/bpf/bpftool/btf.c
··· 11 11 #include <bpf.h> 12 12 #include <libbpf.h> 13 13 #include <linux/btf.h> 14 + #include <linux/hashtable.h> 14 15 15 16 #include "btf.h" 16 17 #include "json_writer.h" ··· 34 33 [BTF_KIND_FUNC_PROTO] = "FUNC_PROTO", 35 34 [BTF_KIND_VAR] = "VAR", 36 35 [BTF_KIND_DATASEC] = "DATASEC", 36 + }; 37 + 38 + struct btf_attach_table { 39 + DECLARE_HASHTABLE(table, 16); 40 + }; 41 + 42 + struct btf_attach_point { 43 + __u32 obj_id; 44 + __u32 btf_id; 45 + struct hlist_node hash; 37 46 }; 38 47 39 48 static const char *btf_int_enc_str(__u8 encoding) ··· 460 449 461 450 btf_id = strtoul(*argv, &endptr, 0); 462 451 if (*endptr) { 463 - p_err("can't parse %s as ID", **argv); 452 + p_err("can't parse %s as ID", *argv); 464 453 return -1; 465 454 } 466 455 NEXT_ARG(); ··· 533 522 return err; 534 523 } 535 524 525 + static int btf_parse_fd(int *argc, char ***argv) 526 + { 527 + unsigned int id; 528 + char *endptr; 529 + int fd; 530 + 531 + if (!is_prefix(*argv[0], "id")) { 532 + p_err("expected 'id', got: '%s'?", **argv); 533 + return -1; 534 + } 535 + NEXT_ARGP(); 536 + 537 + id = strtoul(**argv, &endptr, 0); 538 + if (*endptr) { 539 + p_err("can't parse %s as ID", **argv); 540 + return -1; 541 + } 542 + NEXT_ARGP(); 543 + 544 + fd = bpf_btf_get_fd_by_id(id); 545 + if (fd < 0) 546 + p_err("can't get BTF object by id (%u): %s", 547 + id, strerror(errno)); 548 + 549 + return fd; 550 + } 551 + 552 + static void delete_btf_table(struct btf_attach_table *tab) 553 + { 554 + struct btf_attach_point *obj; 555 + struct hlist_node *tmp; 556 + 557 + unsigned int bkt; 558 + 559 + hash_for_each_safe(tab->table, bkt, tmp, obj, hash) { 560 + hash_del(&obj->hash); 561 + free(obj); 562 + } 563 + } 564 + 565 + static int 566 + build_btf_type_table(struct btf_attach_table *tab, enum bpf_obj_type type, 567 + void *info, __u32 *len) 568 + { 569 + static const char * const names[] = { 570 + [BPF_OBJ_UNKNOWN] = "unknown", 571 + [BPF_OBJ_PROG] = "prog", 572 + [BPF_OBJ_MAP] = "map", 573 + }; 574 + struct btf_attach_point *obj_node; 575 + __u32 btf_id, id = 0; 576 + int err; 577 + int fd; 578 + 579 + while (true) { 580 + switch (type) { 581 + case BPF_OBJ_PROG: 582 + err = bpf_prog_get_next_id(id, &id); 583 + break; 584 + case BPF_OBJ_MAP: 585 + err = bpf_map_get_next_id(id, &id); 586 + break; 587 + default: 588 + err = -1; 589 + p_err("unexpected object type: %d", type); 590 + goto err_free; 591 + } 592 + if (err) { 593 + if (errno == ENOENT) { 594 + err = 0; 595 + break; 596 + } 597 + p_err("can't get next %s: %s%s", names[type], 598 + strerror(errno), 599 + errno == EINVAL ? " -- kernel too old?" : ""); 600 + goto err_free; 601 + } 602 + 603 + switch (type) { 604 + case BPF_OBJ_PROG: 605 + fd = bpf_prog_get_fd_by_id(id); 606 + break; 607 + case BPF_OBJ_MAP: 608 + fd = bpf_map_get_fd_by_id(id); 609 + break; 610 + default: 611 + err = -1; 612 + p_err("unexpected object type: %d", type); 613 + goto err_free; 614 + } 615 + if (fd < 0) { 616 + if (errno == ENOENT) 617 + continue; 618 + p_err("can't get %s by id (%u): %s", names[type], id, 619 + strerror(errno)); 620 + err = -1; 621 + goto err_free; 622 + } 623 + 624 + memset(info, 0, *len); 625 + err = bpf_obj_get_info_by_fd(fd, info, len); 626 + close(fd); 627 + if (err) { 628 + p_err("can't get %s info: %s", names[type], 629 + strerror(errno)); 630 + goto err_free; 631 + } 632 + 633 + switch (type) { 634 + case BPF_OBJ_PROG: 635 + btf_id = ((struct bpf_prog_info *)info)->btf_id; 636 + break; 637 + case BPF_OBJ_MAP: 638 + btf_id = ((struct bpf_map_info *)info)->btf_id; 639 + break; 640 + default: 641 + err = -1; 642 + p_err("unexpected object type: %d", type); 643 + goto err_free; 644 + } 645 + if (!btf_id) 646 + continue; 647 + 648 + obj_node = calloc(1, sizeof(*obj_node)); 649 + if (!obj_node) { 650 + p_err("failed to allocate memory: %s", strerror(errno)); 651 + goto err_free; 652 + } 653 + 654 + obj_node->obj_id = id; 655 + obj_node->btf_id = btf_id; 656 + hash_add(tab->table, &obj_node->hash, obj_node->btf_id); 657 + } 658 + 659 + return 0; 660 + 661 + err_free: 662 + delete_btf_table(tab); 663 + return err; 664 + } 665 + 666 + static int 667 + build_btf_tables(struct btf_attach_table *btf_prog_table, 668 + struct btf_attach_table *btf_map_table) 669 + { 670 + struct bpf_prog_info prog_info; 671 + __u32 prog_len = sizeof(prog_info); 672 + struct bpf_map_info map_info; 673 + __u32 map_len = sizeof(map_info); 674 + int err = 0; 675 + 676 + err = build_btf_type_table(btf_prog_table, BPF_OBJ_PROG, &prog_info, 677 + &prog_len); 678 + if (err) 679 + return err; 680 + 681 + err = build_btf_type_table(btf_map_table, BPF_OBJ_MAP, &map_info, 682 + &map_len); 683 + if (err) { 684 + delete_btf_table(btf_prog_table); 685 + return err; 686 + } 687 + 688 + return 0; 689 + } 690 + 691 + static void 692 + show_btf_plain(struct bpf_btf_info *info, int fd, 693 + struct btf_attach_table *btf_prog_table, 694 + struct btf_attach_table *btf_map_table) 695 + { 696 + struct btf_attach_point *obj; 697 + int n; 698 + 699 + printf("%u: ", info->id); 700 + printf("size %uB", info->btf_size); 701 + 702 + n = 0; 703 + hash_for_each_possible(btf_prog_table->table, obj, hash, info->id) { 704 + if (obj->btf_id == info->id) 705 + printf("%s%u", n++ == 0 ? " prog_ids " : ",", 706 + obj->obj_id); 707 + } 708 + 709 + n = 0; 710 + hash_for_each_possible(btf_map_table->table, obj, hash, info->id) { 711 + if (obj->btf_id == info->id) 712 + printf("%s%u", n++ == 0 ? " map_ids " : ",", 713 + obj->obj_id); 714 + } 715 + 716 + printf("\n"); 717 + } 718 + 719 + static void 720 + show_btf_json(struct bpf_btf_info *info, int fd, 721 + struct btf_attach_table *btf_prog_table, 722 + struct btf_attach_table *btf_map_table) 723 + { 724 + struct btf_attach_point *obj; 725 + 726 + jsonw_start_object(json_wtr); /* btf object */ 727 + jsonw_uint_field(json_wtr, "id", info->id); 728 + jsonw_uint_field(json_wtr, "size", info->btf_size); 729 + 730 + jsonw_name(json_wtr, "prog_ids"); 731 + jsonw_start_array(json_wtr); /* prog_ids */ 732 + hash_for_each_possible(btf_prog_table->table, obj, hash, 733 + info->id) { 734 + if (obj->btf_id == info->id) 735 + jsonw_uint(json_wtr, obj->obj_id); 736 + } 737 + jsonw_end_array(json_wtr); /* prog_ids */ 738 + 739 + jsonw_name(json_wtr, "map_ids"); 740 + jsonw_start_array(json_wtr); /* map_ids */ 741 + hash_for_each_possible(btf_map_table->table, obj, hash, 742 + info->id) { 743 + if (obj->btf_id == info->id) 744 + jsonw_uint(json_wtr, obj->obj_id); 745 + } 746 + jsonw_end_array(json_wtr); /* map_ids */ 747 + jsonw_end_object(json_wtr); /* btf object */ 748 + } 749 + 750 + static int 751 + show_btf(int fd, struct btf_attach_table *btf_prog_table, 752 + struct btf_attach_table *btf_map_table) 753 + { 754 + struct bpf_btf_info info = {}; 755 + __u32 len = sizeof(info); 756 + int err; 757 + 758 + err = bpf_obj_get_info_by_fd(fd, &info, &len); 759 + if (err) { 760 + p_err("can't get BTF object info: %s", strerror(errno)); 761 + return -1; 762 + } 763 + 764 + if (json_output) 765 + show_btf_json(&info, fd, btf_prog_table, btf_map_table); 766 + else 767 + show_btf_plain(&info, fd, btf_prog_table, btf_map_table); 768 + 769 + return 0; 770 + } 771 + 772 + static int do_show(int argc, char **argv) 773 + { 774 + struct btf_attach_table btf_prog_table; 775 + struct btf_attach_table btf_map_table; 776 + int err, fd = -1; 777 + __u32 id = 0; 778 + 779 + if (argc == 2) { 780 + fd = btf_parse_fd(&argc, &argv); 781 + if (fd < 0) 782 + return -1; 783 + } 784 + 785 + if (argc) { 786 + if (fd >= 0) 787 + close(fd); 788 + return BAD_ARG(); 789 + } 790 + 791 + hash_init(btf_prog_table.table); 792 + hash_init(btf_map_table.table); 793 + err = build_btf_tables(&btf_prog_table, &btf_map_table); 794 + if (err) { 795 + if (fd >= 0) 796 + close(fd); 797 + return err; 798 + } 799 + 800 + if (fd >= 0) { 801 + err = show_btf(fd, &btf_prog_table, &btf_map_table); 802 + close(fd); 803 + goto exit_free; 804 + } 805 + 806 + if (json_output) 807 + jsonw_start_array(json_wtr); /* root array */ 808 + 809 + while (true) { 810 + err = bpf_btf_get_next_id(id, &id); 811 + if (err) { 812 + if (errno == ENOENT) { 813 + err = 0; 814 + break; 815 + } 816 + p_err("can't get next BTF object: %s%s", 817 + strerror(errno), 818 + errno == EINVAL ? " -- kernel too old?" : ""); 819 + err = -1; 820 + break; 821 + } 822 + 823 + fd = bpf_btf_get_fd_by_id(id); 824 + if (fd < 0) { 825 + if (errno == ENOENT) 826 + continue; 827 + p_err("can't get BTF object by id (%u): %s", 828 + id, strerror(errno)); 829 + err = -1; 830 + break; 831 + } 832 + 833 + err = show_btf(fd, &btf_prog_table, &btf_map_table); 834 + close(fd); 835 + if (err) 836 + break; 837 + } 838 + 839 + if (json_output) 840 + jsonw_end_array(json_wtr); /* root array */ 841 + 842 + exit_free: 843 + delete_btf_table(&btf_prog_table); 844 + delete_btf_table(&btf_map_table); 845 + 846 + return err; 847 + } 848 + 536 849 static int do_help(int argc, char **argv) 537 850 { 538 851 if (json_output) { ··· 865 530 } 866 531 867 532 fprintf(stderr, 868 - "Usage: %s btf dump BTF_SRC [format FORMAT]\n" 533 + "Usage: %s btf { show | list } [id BTF_ID]\n" 534 + " %s btf dump BTF_SRC [format FORMAT]\n" 869 535 " %s btf help\n" 870 536 "\n" 871 537 " BTF_SRC := { id BTF_ID | prog PROG | map MAP [{key | value | kv | all}] | file FILE }\n" ··· 875 539 " " HELP_SPEC_PROGRAM "\n" 876 540 " " HELP_SPEC_OPTIONS "\n" 877 541 "", 878 - bin_name, bin_name); 542 + bin_name, bin_name, bin_name); 879 543 880 544 return 0; 881 545 } 882 546 883 547 static const struct cmd cmds[] = { 548 + { "show", do_show }, 549 + { "list", do_show }, 884 550 { "help", do_help }, 885 551 { "dump", do_dump }, 886 552 { 0 }
+4 -4
tools/bpf/bpftool/btf_dumper.c
··· 26 26 bool is_plain_text) 27 27 { 28 28 if (is_plain_text) 29 - jsonw_printf(jw, "%p", *(unsigned long *)data); 29 + jsonw_printf(jw, "%p", data); 30 30 else 31 - jsonw_printf(jw, "%u", *(unsigned long *)data); 31 + jsonw_printf(jw, "%lu", *(unsigned long *)data); 32 32 } 33 33 34 34 static int btf_dumper_modifier(const struct btf_dumper *d, __u32 type_id, ··· 216 216 switch (BTF_INT_ENCODING(*int_type)) { 217 217 case 0: 218 218 if (BTF_INT_BITS(*int_type) == 64) 219 - jsonw_printf(jw, "%lu", *(__u64 *)data); 219 + jsonw_printf(jw, "%llu", *(__u64 *)data); 220 220 else if (BTF_INT_BITS(*int_type) == 32) 221 221 jsonw_printf(jw, "%u", *(__u32 *)data); 222 222 else if (BTF_INT_BITS(*int_type) == 16) ··· 229 229 break; 230 230 case BTF_INT_SIGNED: 231 231 if (BTF_INT_BITS(*int_type) == 64) 232 - jsonw_printf(jw, "%ld", *(long long *)data); 232 + jsonw_printf(jw, "%lld", *(long long *)data); 233 233 else if (BTF_INT_BITS(*int_type) == 32) 234 234 jsonw_printf(jw, "%d", *(int *)data); 235 235 else if (BTF_INT_BITS(*int_type) == 16)
+1 -1
tools/bpf/bpftool/cgroup.c
··· 120 120 static int show_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type, 121 121 int level) 122 122 { 123 + const char *attach_flags_str; 123 124 __u32 prog_ids[1024] = {0}; 124 - char *attach_flags_str; 125 125 __u32 prog_cnt, iter; 126 126 __u32 attach_flags; 127 127 char buf[32];
+2 -2
tools/bpf/bpftool/common.c
··· 29 29 #define BPF_FS_MAGIC 0xcafe4a11 30 30 #endif 31 31 32 - void __printf(1, 2) p_err(const char *fmt, ...) 32 + void p_err(const char *fmt, ...) 33 33 { 34 34 va_list ap; 35 35 ··· 47 47 va_end(ap); 48 48 } 49 49 50 - void __printf(1, 2) p_info(const char *fmt, ...) 50 + void p_info(const char *fmt, ...) 51 51 { 52 52 va_list ap; 53 53
+2 -4
tools/bpf/bpftool/json_writer.c
··· 15 15 #include <malloc.h> 16 16 #include <inttypes.h> 17 17 #include <stdint.h> 18 - #include <linux/compiler.h> 19 18 20 19 #include "json_writer.h" 21 20 ··· 152 153 putc(' ', self->out); 153 154 } 154 155 155 - void __printf(2, 0) 156 - jsonw_vprintf_enquote(json_writer_t *self, const char *fmt, va_list ap) 156 + void jsonw_vprintf_enquote(json_writer_t *self, const char *fmt, va_list ap) 157 157 { 158 158 jsonw_eor(self); 159 159 putc('"', self->out); ··· 160 162 putc('"', self->out); 161 163 } 162 164 163 - void __printf(2, 3) jsonw_printf(json_writer_t *self, const char *fmt, ...) 165 + void jsonw_printf(json_writer_t *self, const char *fmt, ...) 164 166 { 165 167 va_list ap; 166 168
+4 -2
tools/bpf/bpftool/json_writer.h
··· 14 14 #include <stdbool.h> 15 15 #include <stdint.h> 16 16 #include <stdarg.h> 17 + #include <linux/compiler.h> 17 18 18 19 /* Opaque class structure */ 19 20 typedef struct json_writer json_writer_t; ··· 31 30 void jsonw_name(json_writer_t *self, const char *name); 32 31 33 32 /* Add value */ 34 - void jsonw_vprintf_enquote(json_writer_t *self, const char *fmt, va_list ap); 35 - void jsonw_printf(json_writer_t *self, const char *fmt, ...); 33 + void __printf(2, 0) jsonw_vprintf_enquote(json_writer_t *self, const char *fmt, 34 + va_list ap); 35 + void __printf(2, 3) jsonw_printf(json_writer_t *self, const char *fmt, ...); 36 36 void jsonw_string(json_writer_t *self, const char *value); 37 37 void jsonw_bool(json_writer_t *self, bool value); 38 38 void jsonw_float(json_writer_t *self, double number);
+1 -1
tools/bpf/bpftool/main.c
··· 139 139 strncat(msg, "'", sizeof(msg) - strlen(msg) - 1); 140 140 141 141 if (count >= 2) { 142 - p_err(msg); 142 + p_err("%s", msg); 143 143 return -1; 144 144 } 145 145
+2 -2
tools/bpf/bpftool/main.h
··· 98 98 extern struct pinned_obj_table prog_table; 99 99 extern struct pinned_obj_table map_table; 100 100 101 - void p_err(const char *fmt, ...); 102 - void p_info(const char *fmt, ...); 101 + void __printf(1, 2) p_err(const char *fmt, ...); 102 + void __printf(1, 2) p_info(const char *fmt, ...); 103 103 104 104 bool is_prefix(const char *pfx, const char *str); 105 105 int detect_common_prefix(const char *arg, ...);
+60 -4
tools/bpf/bpftool/map.c
··· 481 481 482 482 static int show_map_close_json(int fd, struct bpf_map_info *info) 483 483 { 484 - char *memlock; 484 + char *memlock, *frozen_str; 485 + int frozen = 0; 485 486 486 487 memlock = get_fdinfo(fd, "memlock"); 488 + frozen_str = get_fdinfo(fd, "frozen"); 487 489 488 490 jsonw_start_object(json_wtr); 489 491 ··· 535 533 } 536 534 close(fd); 537 535 536 + if (frozen_str) { 537 + frozen = atoi(frozen_str); 538 + free(frozen_str); 539 + } 540 + jsonw_int_field(json_wtr, "frozen", frozen); 541 + 538 542 if (info->btf_id) 539 543 jsonw_int_field(json_wtr, "btf_id", info->btf_id); 540 544 ··· 563 555 564 556 static int show_map_close_plain(int fd, struct bpf_map_info *info) 565 557 { 566 - char *memlock; 558 + char *memlock, *frozen_str; 559 + int frozen = 0; 567 560 568 561 memlock = get_fdinfo(fd, "memlock"); 562 + frozen_str = get_fdinfo(fd, "frozen"); 569 563 570 564 printf("%u: ", info->id); 571 565 if (info->type < ARRAY_SIZE(map_type_name)) ··· 620 610 printf("\n\tpinned %s", obj->path); 621 611 } 622 612 } 613 + printf("\n"); 614 + 615 + if (frozen_str) { 616 + frozen = atoi(frozen_str); 617 + free(frozen_str); 618 + } 619 + 620 + if (!info->btf_id && !frozen) 621 + return 0; 622 + 623 + printf("\t"); 623 624 624 625 if (info->btf_id) 625 - printf("\n\tbtf_id %d", info->btf_id); 626 + printf("btf_id %d", info->btf_id); 627 + 628 + if (frozen) 629 + printf("%sfrozen", info->btf_id ? " " : ""); 626 630 627 631 printf("\n"); 628 632 return 0; ··· 1262 1238 return err; 1263 1239 } 1264 1240 1241 + static int do_freeze(int argc, char **argv) 1242 + { 1243 + int err, fd; 1244 + 1245 + if (!REQ_ARGS(2)) 1246 + return -1; 1247 + 1248 + fd = map_parse_fd(&argc, &argv); 1249 + if (fd < 0) 1250 + return -1; 1251 + 1252 + if (argc) { 1253 + close(fd); 1254 + return BAD_ARG(); 1255 + } 1256 + 1257 + err = bpf_map_freeze(fd); 1258 + close(fd); 1259 + if (err) { 1260 + p_err("failed to freeze map: %s", strerror(errno)); 1261 + return err; 1262 + } 1263 + 1264 + if (json_output) 1265 + jsonw_null(json_wtr); 1266 + 1267 + return 0; 1268 + } 1269 + 1265 1270 static int do_help(int argc, char **argv) 1266 1271 { 1267 1272 if (json_output) { ··· 1315 1262 " %s %s pop MAP\n" 1316 1263 " %s %s enqueue MAP value VALUE\n" 1317 1264 " %s %s dequeue MAP\n" 1265 + " %s %s freeze MAP\n" 1318 1266 " %s %s help\n" 1319 1267 "\n" 1320 1268 " " HELP_SPEC_MAP "\n" ··· 1334 1280 bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], 1335 1281 bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], 1336 1282 bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], 1337 - bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2]); 1283 + bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], 1284 + bin_name, argv[-2]); 1338 1285 1339 1286 return 0; 1340 1287 } ··· 1357 1302 { "enqueue", do_update }, 1358 1303 { "pop", do_pop_dequeue }, 1359 1304 { "dequeue", do_pop_dequeue }, 1305 + { "freeze", do_freeze }, 1360 1306 { 0 } 1361 1307 }; 1362 1308
+2 -2
tools/bpf/bpftool/map_perf_ring.c
··· 157 157 NEXT_ARG(); 158 158 ctx.cpu = strtoul(*argv, &endptr, 0); 159 159 if (*endptr) { 160 - p_err("can't parse %s as CPU ID", **argv); 160 + p_err("can't parse %s as CPU ID", *argv); 161 161 goto err_close_map; 162 162 } 163 163 ··· 168 168 NEXT_ARG(); 169 169 ctx.idx = strtoul(*argv, &endptr, 0); 170 170 if (*endptr) { 171 - p_err("can't parse %s as index", **argv); 171 + p_err("can't parse %s as index", *argv); 172 172 goto err_close_map; 173 173 } 174 174
+170 -8
tools/bpf/bpftool/net.c
··· 55 55 __u32 flow_dissector_id; 56 56 }; 57 57 58 + enum net_attach_type { 59 + NET_ATTACH_TYPE_XDP, 60 + NET_ATTACH_TYPE_XDP_GENERIC, 61 + NET_ATTACH_TYPE_XDP_DRIVER, 62 + NET_ATTACH_TYPE_XDP_OFFLOAD, 63 + }; 64 + 65 + static const char * const attach_type_strings[] = { 66 + [NET_ATTACH_TYPE_XDP] = "xdp", 67 + [NET_ATTACH_TYPE_XDP_GENERIC] = "xdpgeneric", 68 + [NET_ATTACH_TYPE_XDP_DRIVER] = "xdpdrv", 69 + [NET_ATTACH_TYPE_XDP_OFFLOAD] = "xdpoffload", 70 + }; 71 + 72 + const size_t net_attach_type_size = ARRAY_SIZE(attach_type_strings); 73 + 74 + static enum net_attach_type parse_attach_type(const char *str) 75 + { 76 + enum net_attach_type type; 77 + 78 + for (type = 0; type < net_attach_type_size; type++) { 79 + if (attach_type_strings[type] && 80 + is_prefix(str, attach_type_strings[type])) 81 + return type; 82 + } 83 + 84 + return net_attach_type_size; 85 + } 86 + 58 87 static int dump_link_nlmsg(void *cookie, void *msg, struct nlattr **tb) 59 88 { 60 89 struct bpf_netdev_t *netinfo = cookie; ··· 226 197 227 198 fd = open("/proc/self/ns/net", O_RDONLY); 228 199 if (fd < 0) { 229 - p_err("can't open /proc/self/ns/net: %d", 200 + p_err("can't open /proc/self/ns/net: %s", 230 201 strerror(errno)); 231 202 return -1; 232 203 } ··· 252 223 return 0; 253 224 } 254 225 226 + static int net_parse_dev(int *argc, char ***argv) 227 + { 228 + int ifindex; 229 + 230 + if (is_prefix(**argv, "dev")) { 231 + NEXT_ARGP(); 232 + 233 + ifindex = if_nametoindex(**argv); 234 + if (!ifindex) 235 + p_err("invalid devname %s", **argv); 236 + 237 + NEXT_ARGP(); 238 + } else { 239 + p_err("expected 'dev', got: '%s'?", **argv); 240 + return -1; 241 + } 242 + 243 + return ifindex; 244 + } 245 + 246 + static int do_attach_detach_xdp(int progfd, enum net_attach_type attach_type, 247 + int ifindex, bool overwrite) 248 + { 249 + __u32 flags = 0; 250 + 251 + if (!overwrite) 252 + flags = XDP_FLAGS_UPDATE_IF_NOEXIST; 253 + if (attach_type == NET_ATTACH_TYPE_XDP_GENERIC) 254 + flags |= XDP_FLAGS_SKB_MODE; 255 + if (attach_type == NET_ATTACH_TYPE_XDP_DRIVER) 256 + flags |= XDP_FLAGS_DRV_MODE; 257 + if (attach_type == NET_ATTACH_TYPE_XDP_OFFLOAD) 258 + flags |= XDP_FLAGS_HW_MODE; 259 + 260 + return bpf_set_link_xdp_fd(ifindex, progfd, flags); 261 + } 262 + 263 + static int do_attach(int argc, char **argv) 264 + { 265 + enum net_attach_type attach_type; 266 + int progfd, ifindex, err = 0; 267 + bool overwrite = false; 268 + 269 + /* parse attach args */ 270 + if (!REQ_ARGS(5)) 271 + return -EINVAL; 272 + 273 + attach_type = parse_attach_type(*argv); 274 + if (attach_type == net_attach_type_size) { 275 + p_err("invalid net attach/detach type: %s", *argv); 276 + return -EINVAL; 277 + } 278 + NEXT_ARG(); 279 + 280 + progfd = prog_parse_fd(&argc, &argv); 281 + if (progfd < 0) 282 + return -EINVAL; 283 + 284 + ifindex = net_parse_dev(&argc, &argv); 285 + if (ifindex < 1) { 286 + close(progfd); 287 + return -EINVAL; 288 + } 289 + 290 + if (argc) { 291 + if (is_prefix(*argv, "overwrite")) { 292 + overwrite = true; 293 + } else { 294 + p_err("expected 'overwrite', got: '%s'?", *argv); 295 + close(progfd); 296 + return -EINVAL; 297 + } 298 + } 299 + 300 + /* attach xdp prog */ 301 + if (is_prefix("xdp", attach_type_strings[attach_type])) 302 + err = do_attach_detach_xdp(progfd, attach_type, ifindex, 303 + overwrite); 304 + 305 + if (err < 0) { 306 + p_err("interface %s attach failed: %s", 307 + attach_type_strings[attach_type], strerror(-err)); 308 + return err; 309 + } 310 + 311 + if (json_output) 312 + jsonw_null(json_wtr); 313 + 314 + return 0; 315 + } 316 + 317 + static int do_detach(int argc, char **argv) 318 + { 319 + enum net_attach_type attach_type; 320 + int progfd, ifindex, err = 0; 321 + 322 + /* parse detach args */ 323 + if (!REQ_ARGS(3)) 324 + return -EINVAL; 325 + 326 + attach_type = parse_attach_type(*argv); 327 + if (attach_type == net_attach_type_size) { 328 + p_err("invalid net attach/detach type: %s", *argv); 329 + return -EINVAL; 330 + } 331 + NEXT_ARG(); 332 + 333 + ifindex = net_parse_dev(&argc, &argv); 334 + if (ifindex < 1) 335 + return -EINVAL; 336 + 337 + /* detach xdp prog */ 338 + progfd = -1; 339 + if (is_prefix("xdp", attach_type_strings[attach_type])) 340 + err = do_attach_detach_xdp(progfd, attach_type, ifindex, NULL); 341 + 342 + if (err < 0) { 343 + p_err("interface %s detach failed: %s", 344 + attach_type_strings[attach_type], strerror(-err)); 345 + return err; 346 + } 347 + 348 + if (json_output) 349 + jsonw_null(json_wtr); 350 + 351 + return 0; 352 + } 353 + 255 354 static int do_show(int argc, char **argv) 256 355 { 257 356 struct bpf_attach_info attach_info = {}; ··· 389 232 char err_buf[256]; 390 233 391 234 if (argc == 2) { 392 - if (strcmp(argv[0], "dev") != 0) 393 - usage(); 394 - filter_idx = if_nametoindex(argv[1]); 395 - if (filter_idx == 0) { 396 - fprintf(stderr, "invalid dev name %s\n", argv[1]); 235 + filter_idx = net_parse_dev(&argc, &argv); 236 + if (filter_idx < 1) 397 237 return -1; 398 - } 399 238 } else if (argc != 0) { 400 239 usage(); 401 240 } ··· 458 305 459 306 fprintf(stderr, 460 307 "Usage: %s %s { show | list } [dev <devname>]\n" 308 + " %s %s attach ATTACH_TYPE PROG dev <devname> [ overwrite ]\n" 309 + " %s %s detach ATTACH_TYPE dev <devname>\n" 461 310 " %s %s help\n" 311 + "\n" 312 + " " HELP_SPEC_PROGRAM "\n" 313 + " ATTACH_TYPE := { xdp | xdpgeneric | xdpdrv | xdpoffload }\n" 314 + "\n" 462 315 "Note: Only xdp and tc attachments are supported now.\n" 463 316 " For progs attached to cgroups, use \"bpftool cgroup\"\n" 464 317 " to dump program attachments. For program types\n" 465 318 " sk_{filter,skb,msg,reuseport} and lwt/seg6, please\n" 466 319 " consult iproute2.\n", 467 - bin_name, argv[-2], bin_name, argv[-2]); 320 + bin_name, argv[-2], bin_name, argv[-2], bin_name, argv[-2], 321 + bin_name, argv[-2]); 468 322 469 323 return 0; 470 324 } ··· 479 319 static const struct cmd cmds[] = { 480 320 { "show", do_show }, 481 321 { "list", do_show }, 322 + { "attach", do_attach }, 323 + { "detach", do_detach }, 482 324 { "help", do_help }, 483 325 { 0 } 484 326 };
+4
tools/bpf/bpftool/perf.c
··· 104 104 jsonw_string_field(json_wtr, "filename", buf); 105 105 jsonw_lluint_field(json_wtr, "offset", probe_offset); 106 106 break; 107 + default: 108 + break; 107 109 } 108 110 jsonw_end_object(json_wtr); 109 111 } ··· 141 139 case BPF_FD_TYPE_URETPROBE: 142 140 printf("uretprobe filename %s offset %llu\n", buf, 143 141 probe_offset); 142 + break; 143 + default: 144 144 break; 145 145 } 146 146 }
+2
tools/include/linux/compiler-gcc.h
··· 6 6 /* 7 7 * Common definitions for all gcc versions go here. 8 8 */ 9 + #ifndef GCC_VERSION 9 10 #define GCC_VERSION (__GNUC__ * 10000 \ 10 11 + __GNUC_MINOR__ * 100 \ 11 12 + __GNUC_PATCHLEVEL__) 13 + #endif 12 14 13 15 #if GCC_VERSION >= 70000 && !defined(__CHECKER__) 14 16 # define __fallthrough __attribute__ ((fallthrough))
+12 -3
tools/include/uapi/linux/bpf.h
··· 106 106 BPF_TASK_FD_QUERY, 107 107 BPF_MAP_LOOKUP_AND_DELETE_ELEM, 108 108 BPF_MAP_FREEZE, 109 + BPF_BTF_GET_NEXT_ID, 109 110 }; 110 111 111 112 enum bpf_map_type { ··· 285 284 */ 286 285 #define BPF_F_TEST_RND_HI32 (1U << 2) 287 286 287 + /* The verifier internal test flag. Behavior is undefined */ 288 + #define BPF_F_TEST_STATE_FREQ (1U << 3) 289 + 288 290 /* When BPF ldimm64's insn[0].src_reg != 0 then this can have 289 291 * two extensions: 290 292 * ··· 340 336 /* Flags for accessing BPF object from program side. */ 341 337 #define BPF_F_RDONLY_PROG (1U << 7) 342 338 #define BPF_F_WRONLY_PROG (1U << 8) 339 + 340 + /* Clone map from listener for newly accepted socket */ 341 + #define BPF_F_CLONE (1U << 9) 343 342 344 343 /* flags for BPF_PROG_QUERY */ 345 344 #define BPF_F_QUERY_EFFECTIVE (1U << 0) ··· 583 576 * limited to five). 584 577 * 585 578 * Each time the helper is called, it appends a line to the trace. 579 + * Lines are discarded while *\/sys/kernel/debug/tracing/trace* is 580 + * open, use *\/sys/kernel/debug/tracing/trace_pipe* to avoid this. 586 581 * The format of the trace is customizable, and the exact output 587 582 * one will get depends on the options set in 588 583 * *\/sys/kernel/debug/tracing/trace_options* (see also the ··· 1023 1014 * The realm of the route for the packet associated to *skb*, or 0 1024 1015 * if none was found. 1025 1016 * 1026 - * int bpf_perf_event_output(struct pt_reg *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) 1017 + * int bpf_perf_event_output(struct pt_regs *ctx, struct bpf_map *map, u64 flags, void *data, u64 size) 1027 1018 * Description 1028 1019 * Write raw *data* blob into a special BPF perf event held by 1029 1020 * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf ··· 1085 1076 * Return 1086 1077 * 0 on success, or a negative error in case of failure. 1087 1078 * 1088 - * int bpf_get_stackid(struct pt_reg *ctx, struct bpf_map *map, u64 flags) 1079 + * int bpf_get_stackid(struct pt_regs *ctx, struct bpf_map *map, u64 flags) 1089 1080 * Description 1090 1081 * Walk a user or a kernel stack and return its id. To achieve 1091 1082 * this, the helper needs *ctx*, which is a pointer to the context ··· 1734 1725 * Return 1735 1726 * 0 on success, or a negative error in case of failure. 1736 1727 * 1737 - * int bpf_override_return(struct pt_reg *regs, u64 rc) 1728 + * int bpf_override_return(struct pt_regs *regs, u64 rc) 1738 1729 * Description 1739 1730 * Used for error injection, this helper uses kprobes to override 1740 1731 * the return value of the probed function, and to set it to *rc*.
+22
tools/include/uapi/linux/if_xdp.h
··· 16 16 #define XDP_SHARED_UMEM (1 << 0) 17 17 #define XDP_COPY (1 << 1) /* Force copy-mode */ 18 18 #define XDP_ZEROCOPY (1 << 2) /* Force zero-copy mode */ 19 + /* If this option is set, the driver might go sleep and in that case 20 + * the XDP_RING_NEED_WAKEUP flag in the fill and/or Tx rings will be 21 + * set. If it is set, the application need to explicitly wake up the 22 + * driver with a poll() (Rx and Tx) or sendto() (Tx only). If you are 23 + * running the driver and the application on the same core, you should 24 + * use this option so that the kernel will yield to the user space 25 + * application. 26 + */ 27 + #define XDP_USE_NEED_WAKEUP (1 << 3) 28 + 29 + /* Flags for xsk_umem_config flags */ 30 + #define XDP_UMEM_UNALIGNED_CHUNK_FLAG (1 << 0) 19 31 20 32 struct sockaddr_xdp { 21 33 __u16 sxdp_family; ··· 37 25 __u32 sxdp_shared_umem_fd; 38 26 }; 39 27 28 + /* XDP_RING flags */ 29 + #define XDP_RING_NEED_WAKEUP (1 << 0) 30 + 40 31 struct xdp_ring_offset { 41 32 __u64 producer; 42 33 __u64 consumer; 43 34 __u64 desc; 35 + __u64 flags; 44 36 }; 45 37 46 38 struct xdp_mmap_offsets { ··· 69 53 __u64 len; /* Length of packet data area */ 70 54 __u32 chunk_size; 71 55 __u32 headroom; 56 + __u32 flags; 72 57 }; 73 58 74 59 struct xdp_statistics { ··· 90 73 #define XDP_PGOFF_TX_RING 0x80000000 91 74 #define XDP_UMEM_PGOFF_FILL_RING 0x100000000ULL 92 75 #define XDP_UMEM_PGOFF_COMPLETION_RING 0x180000000ULL 76 + 77 + /* Masks for unaligned chunks mode */ 78 + #define XSK_UNALIGNED_BUF_OFFSET_SHIFT 48 79 + #define XSK_UNALIGNED_BUF_ADDR_MASK \ 80 + ((1ULL << XSK_UNALIGNED_BUF_OFFSET_SHIFT) - 1) 93 81 94 82 /* Rx/Tx descriptor */ 95 83 struct xdp_desc {
+13 -13
tools/lib/bpf/Makefile
··· 1 1 # SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 2 # Most of this file is copied from tools/lib/traceevent/Makefile 3 3 4 - BPF_VERSION = 0 5 - BPF_PATCHLEVEL = 0 6 - BPF_EXTRAVERSION = 4 4 + LIBBPF_VERSION := $(shell \ 5 + grep -oE '^LIBBPF_([0-9.]+)' libbpf.map | \ 6 + sort -rV | head -n1 | cut -d'_' -f2) 7 + LIBBPF_MAJOR_VERSION := $(firstword $(subst ., ,$(LIBBPF_VERSION))) 7 8 8 9 MAKEFLAGS += --no-print-directory 9 10 ··· 80 79 libdir_SQ = $(subst ','\'',$(libdir)) 81 80 libdir_relative_SQ = $(subst ','\'',$(libdir_relative)) 82 81 83 - VERSION = $(BPF_VERSION) 84 - PATCHLEVEL = $(BPF_PATCHLEVEL) 85 - EXTRAVERSION = $(BPF_EXTRAVERSION) 86 - 87 82 OBJ = $@ 88 83 N = 89 - 90 - LIBBPF_VERSION = $(BPF_VERSION).$(BPF_PATCHLEVEL).$(BPF_EXTRAVERSION) 91 84 92 85 LIB_TARGET = libbpf.a libbpf.so.$(LIBBPF_VERSION) 93 86 LIB_FILE = libbpf.a libbpf.so* ··· 108 113 override CFLAGS += -fPIC 109 114 override CFLAGS += $(INCLUDES) 110 115 override CFLAGS += -fvisibility=hidden 116 + override CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 111 117 112 118 ifeq ($(VERBOSE),1) 113 119 Q = ··· 134 138 PC_FILE := $(addprefix $(OUTPUT),$(PC_FILE)) 135 139 136 140 GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN) | \ 137 - awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {s++} END{print s}') 141 + cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \ 142 + awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}' | \ 143 + sort -u | wc -l) 138 144 VERSIONED_SYM_COUNT = $(shell readelf -s --wide $(OUTPUT)libbpf.so | \ 139 145 grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l) 140 146 ··· 176 178 $(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION) 177 179 178 180 $(OUTPUT)libbpf.so.$(LIBBPF_VERSION): $(BPF_IN) 179 - $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(VERSION) \ 181 + $(QUIET_LINK)$(CC) --shared -Wl,-soname,libbpf.so.$(LIBBPF_MAJOR_VERSION) \ 180 182 -Wl,--version-script=$(VERSION_SCRIPT) $^ -lelf -o $@ 181 183 @ln -sf $(@F) $(OUTPUT)libbpf.so 182 - @ln -sf $(@F) $(OUTPUT)libbpf.so.$(VERSION) 184 + @ln -sf $(@F) $(OUTPUT)libbpf.so.$(LIBBPF_MAJOR_VERSION) 183 185 184 186 $(OUTPUT)libbpf.a: $(BPF_IN) 185 187 $(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^ ··· 203 205 "Please make sure all LIBBPF_API symbols are" \ 204 206 "versioned in $(VERSION_SCRIPT)." >&2; \ 205 207 readelf -s --wide $(OUTPUT)libbpf-in.o | \ 208 + cut -d "@" -f1 | sed 's/_v[0-9]_[0-9]_[0-9].*//' | \ 206 209 awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}'| \ 207 210 sort -u > $(OUTPUT)libbpf_global_syms.tmp; \ 208 211 readelf -s --wide $(OUTPUT)libbpf.so | \ ··· 256 257 257 258 clean: 258 259 $(call QUIET_CLEAN, libbpf) $(RM) $(TARGETS) $(CXX_TEST_TARGET) \ 259 - *.o *~ *.a *.so *.so.$(VERSION) .*.d .*.cmd *.pc LIBBPF-CFLAGS 260 + *.o *~ *.a *.so *.so.$(LIBBPF_MAJOR_VERSION) .*.d .*.cmd \ 261 + *.pc LIBBPF-CFLAGS 260 262 $(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf 261 263 262 264
+12 -12
tools/lib/bpf/bpf.c
··· 568 568 return ret; 569 569 } 570 570 571 - int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id) 571 + static int bpf_obj_get_next_id(__u32 start_id, __u32 *next_id, int cmd) 572 572 { 573 573 union bpf_attr attr; 574 574 int err; ··· 576 576 memset(&attr, 0, sizeof(attr)); 577 577 attr.start_id = start_id; 578 578 579 - err = sys_bpf(BPF_PROG_GET_NEXT_ID, &attr, sizeof(attr)); 579 + err = sys_bpf(cmd, &attr, sizeof(attr)); 580 580 if (!err) 581 581 *next_id = attr.next_id; 582 582 583 583 return err; 584 584 } 585 585 586 + int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id) 587 + { 588 + return bpf_obj_get_next_id(start_id, next_id, BPF_PROG_GET_NEXT_ID); 589 + } 590 + 586 591 int bpf_map_get_next_id(__u32 start_id, __u32 *next_id) 587 592 { 588 - union bpf_attr attr; 589 - int err; 593 + return bpf_obj_get_next_id(start_id, next_id, BPF_MAP_GET_NEXT_ID); 594 + } 590 595 591 - memset(&attr, 0, sizeof(attr)); 592 - attr.start_id = start_id; 593 - 594 - err = sys_bpf(BPF_MAP_GET_NEXT_ID, &attr, sizeof(attr)); 595 - if (!err) 596 - *next_id = attr.next_id; 597 - 598 - return err; 596 + int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id) 597 + { 598 + return bpf_obj_get_next_id(start_id, next_id, BPF_BTF_GET_NEXT_ID); 599 599 } 600 600 601 601 int bpf_prog_get_fd_by_id(__u32 id)
+1
tools/lib/bpf/bpf.h
··· 156 156 __u32 *retval, __u32 *duration); 157 157 LIBBPF_API int bpf_prog_get_next_id(__u32 start_id, __u32 *next_id); 158 158 LIBBPF_API int bpf_map_get_next_id(__u32 start_id, __u32 *next_id); 159 + LIBBPF_API int bpf_btf_get_next_id(__u32 start_id, __u32 *next_id); 159 160 LIBBPF_API int bpf_prog_get_fd_by_id(__u32 id); 160 161 LIBBPF_API int bpf_map_get_fd_by_id(__u32 id); 161 162 LIBBPF_API int bpf_btf_get_fd_by_id(__u32 id);
+6
tools/lib/bpf/libbpf.map
··· 183 183 perf_buffer__new; 184 184 perf_buffer__new_raw; 185 185 perf_buffer__poll; 186 + xsk_umem__create; 186 187 } LIBBPF_0.0.3; 188 + 189 + LIBBPF_0.0.5 { 190 + global: 191 + bpf_btf_get_next_id; 192 + } LIBBPF_0.0.4;
+48 -38
tools/lib/bpf/xsk.c
··· 74 74 int fd; 75 75 }; 76 76 77 - /* For 32-bit systems, we need to use mmap2 as the offsets are 64-bit. 78 - * Unfortunately, it is not part of glibc. 79 - */ 80 - static inline void *xsk_mmap(void *addr, size_t length, int prot, int flags, 81 - int fd, __u64 offset) 82 - { 83 - #ifdef __NR_mmap2 84 - unsigned int page_shift = __builtin_ffs(getpagesize()) - 1; 85 - long ret = syscall(__NR_mmap2, addr, length, prot, flags, fd, 86 - (off_t)(offset >> page_shift)); 87 - 88 - return (void *)ret; 89 - #else 90 - return mmap(addr, length, prot, flags, fd, offset); 91 - #endif 92 - } 93 - 94 77 int xsk_umem__fd(const struct xsk_umem *umem) 95 78 { 96 79 return umem ? umem->fd : -EINVAL; ··· 99 116 cfg->comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS; 100 117 cfg->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; 101 118 cfg->frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM; 119 + cfg->flags = XSK_UMEM__DEFAULT_FLAGS; 102 120 return; 103 121 } 104 122 ··· 107 123 cfg->comp_size = usr_cfg->comp_size; 108 124 cfg->frame_size = usr_cfg->frame_size; 109 125 cfg->frame_headroom = usr_cfg->frame_headroom; 126 + cfg->flags = usr_cfg->flags; 110 127 } 111 128 112 129 static int xsk_set_xdp_socket_config(struct xsk_socket_config *cfg, ··· 134 149 return 0; 135 150 } 136 151 137 - int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size, 138 - struct xsk_ring_prod *fill, struct xsk_ring_cons *comp, 139 - const struct xsk_umem_config *usr_config) 152 + int xsk_umem__create_v0_0_4(struct xsk_umem **umem_ptr, void *umem_area, 153 + __u64 size, struct xsk_ring_prod *fill, 154 + struct xsk_ring_cons *comp, 155 + const struct xsk_umem_config *usr_config) 140 156 { 141 157 struct xdp_mmap_offsets off; 142 158 struct xdp_umem_reg mr; ··· 168 182 mr.len = size; 169 183 mr.chunk_size = umem->config.frame_size; 170 184 mr.headroom = umem->config.frame_headroom; 185 + mr.flags = umem->config.flags; 171 186 172 187 err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr)); 173 188 if (err) { ··· 197 210 goto out_socket; 198 211 } 199 212 200 - map = xsk_mmap(NULL, off.fr.desc + 201 - umem->config.fill_size * sizeof(__u64), 202 - PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, 203 - umem->fd, XDP_UMEM_PGOFF_FILL_RING); 213 + map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64), 214 + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd, 215 + XDP_UMEM_PGOFF_FILL_RING); 204 216 if (map == MAP_FAILED) { 205 217 err = -errno; 206 218 goto out_socket; ··· 210 224 fill->size = umem->config.fill_size; 211 225 fill->producer = map + off.fr.producer; 212 226 fill->consumer = map + off.fr.consumer; 227 + fill->flags = map + off.fr.flags; 213 228 fill->ring = map + off.fr.desc; 214 229 fill->cached_cons = umem->config.fill_size; 215 230 216 - map = xsk_mmap(NULL, 217 - off.cr.desc + umem->config.comp_size * sizeof(__u64), 218 - PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, 219 - umem->fd, XDP_UMEM_PGOFF_COMPLETION_RING); 231 + map = mmap(NULL, off.cr.desc + umem->config.comp_size * sizeof(__u64), 232 + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, umem->fd, 233 + XDP_UMEM_PGOFF_COMPLETION_RING); 220 234 if (map == MAP_FAILED) { 221 235 err = -errno; 222 236 goto out_mmap; ··· 227 241 comp->size = umem->config.comp_size; 228 242 comp->producer = map + off.cr.producer; 229 243 comp->consumer = map + off.cr.consumer; 244 + comp->flags = map + off.cr.flags; 230 245 comp->ring = map + off.cr.desc; 231 246 232 247 *umem_ptr = umem; ··· 241 254 free(umem); 242 255 return err; 243 256 } 257 + 258 + struct xsk_umem_config_v1 { 259 + __u32 fill_size; 260 + __u32 comp_size; 261 + __u32 frame_size; 262 + __u32 frame_headroom; 263 + }; 264 + 265 + int xsk_umem__create_v0_0_2(struct xsk_umem **umem_ptr, void *umem_area, 266 + __u64 size, struct xsk_ring_prod *fill, 267 + struct xsk_ring_cons *comp, 268 + const struct xsk_umem_config *usr_config) 269 + { 270 + struct xsk_umem_config config; 271 + 272 + memcpy(&config, usr_config, sizeof(struct xsk_umem_config_v1)); 273 + config.flags = 0; 274 + 275 + return xsk_umem__create_v0_0_4(umem_ptr, umem_area, size, fill, comp, 276 + &config); 277 + } 278 + asm(".symver xsk_umem__create_v0_0_2, xsk_umem__create@LIBBPF_0.0.2"); 279 + asm(".symver xsk_umem__create_v0_0_4, xsk_umem__create@@LIBBPF_0.0.4"); 244 280 245 281 static int xsk_load_xdp_prog(struct xsk_socket *xsk) 246 282 { ··· 560 550 } 561 551 562 552 if (rx) { 563 - rx_map = xsk_mmap(NULL, off.rx.desc + 564 - xsk->config.rx_size * sizeof(struct xdp_desc), 565 - PROT_READ | PROT_WRITE, 566 - MAP_SHARED | MAP_POPULATE, 567 - xsk->fd, XDP_PGOFF_RX_RING); 553 + rx_map = mmap(NULL, off.rx.desc + 554 + xsk->config.rx_size * sizeof(struct xdp_desc), 555 + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, 556 + xsk->fd, XDP_PGOFF_RX_RING); 568 557 if (rx_map == MAP_FAILED) { 569 558 err = -errno; 570 559 goto out_socket; ··· 573 564 rx->size = xsk->config.rx_size; 574 565 rx->producer = rx_map + off.rx.producer; 575 566 rx->consumer = rx_map + off.rx.consumer; 567 + rx->flags = rx_map + off.rx.flags; 576 568 rx->ring = rx_map + off.rx.desc; 577 569 } 578 570 xsk->rx = rx; 579 571 580 572 if (tx) { 581 - tx_map = xsk_mmap(NULL, off.tx.desc + 582 - xsk->config.tx_size * sizeof(struct xdp_desc), 583 - PROT_READ | PROT_WRITE, 584 - MAP_SHARED | MAP_POPULATE, 585 - xsk->fd, XDP_PGOFF_TX_RING); 573 + tx_map = mmap(NULL, off.tx.desc + 574 + xsk->config.tx_size * sizeof(struct xdp_desc), 575 + PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE, 576 + xsk->fd, XDP_PGOFF_TX_RING); 586 577 if (tx_map == MAP_FAILED) { 587 578 err = -errno; 588 579 goto out_mmap_rx; ··· 592 583 tx->size = xsk->config.tx_size; 593 584 tx->producer = tx_map + off.tx.producer; 594 585 tx->consumer = tx_map + off.tx.consumer; 586 + tx->flags = tx_map + off.tx.flags; 595 587 tx->ring = tx_map + off.tx.desc; 596 588 tx->cached_cons = xsk->config.tx_size; 597 589 }
+33
tools/lib/bpf/xsk.h
··· 32 32 __u32 *producer; \ 33 33 __u32 *consumer; \ 34 34 void *ring; \ 35 + __u32 *flags; \ 35 36 } 36 37 37 38 DEFINE_XSK_RING(xsk_ring_prod); ··· 75 74 const struct xdp_desc *descs = (const struct xdp_desc *)rx->ring; 76 75 77 76 return &descs[idx & rx->mask]; 77 + } 78 + 79 + static inline int xsk_ring_prod__needs_wakeup(const struct xsk_ring_prod *r) 80 + { 81 + return *r->flags & XDP_RING_NEED_WAKEUP; 78 82 } 79 83 80 84 static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb) ··· 168 162 return &((char *)umem_area)[addr]; 169 163 } 170 164 165 + static inline __u64 xsk_umem__extract_addr(__u64 addr) 166 + { 167 + return addr & XSK_UNALIGNED_BUF_ADDR_MASK; 168 + } 169 + 170 + static inline __u64 xsk_umem__extract_offset(__u64 addr) 171 + { 172 + return addr >> XSK_UNALIGNED_BUF_OFFSET_SHIFT; 173 + } 174 + 175 + static inline __u64 xsk_umem__add_offset_to_addr(__u64 addr) 176 + { 177 + return xsk_umem__extract_addr(addr) + xsk_umem__extract_offset(addr); 178 + } 179 + 171 180 LIBBPF_API int xsk_umem__fd(const struct xsk_umem *umem); 172 181 LIBBPF_API int xsk_socket__fd(const struct xsk_socket *xsk); 173 182 ··· 191 170 #define XSK_UMEM__DEFAULT_FRAME_SHIFT 12 /* 4096 bytes */ 192 171 #define XSK_UMEM__DEFAULT_FRAME_SIZE (1 << XSK_UMEM__DEFAULT_FRAME_SHIFT) 193 172 #define XSK_UMEM__DEFAULT_FRAME_HEADROOM 0 173 + #define XSK_UMEM__DEFAULT_FLAGS 0 194 174 195 175 struct xsk_umem_config { 196 176 __u32 fill_size; 197 177 __u32 comp_size; 198 178 __u32 frame_size; 199 179 __u32 frame_headroom; 180 + __u32 flags; 200 181 }; 201 182 202 183 /* Flags for the libbpf_flags field. */ ··· 218 195 struct xsk_ring_prod *fill, 219 196 struct xsk_ring_cons *comp, 220 197 const struct xsk_umem_config *config); 198 + LIBBPF_API int xsk_umem__create_v0_0_2(struct xsk_umem **umem, 199 + void *umem_area, __u64 size, 200 + struct xsk_ring_prod *fill, 201 + struct xsk_ring_cons *comp, 202 + const struct xsk_umem_config *config); 203 + LIBBPF_API int xsk_umem__create_v0_0_4(struct xsk_umem **umem, 204 + void *umem_area, __u64 size, 205 + struct xsk_ring_prod *fill, 206 + struct xsk_ring_cons *comp, 207 + const struct xsk_umem_config *config); 221 208 LIBBPF_API int xsk_socket__create(struct xsk_socket **xsk, 222 209 const char *ifname, __u32 queue_id, 223 210 struct xsk_umem *umem,
+1
tools/testing/selftests/bpf/.gitignore
··· 42 42 test_sockopt 43 43 test_sockopt_sk 44 44 test_sockopt_multi 45 + test_sockopt_inherit 45 46 test_tcp_rtt
+4 -2
tools/testing/selftests/bpf/Makefile
··· 29 29 test_cgroup_storage test_select_reuseport test_section_names \ 30 30 test_netcnt test_tcpnotify_user test_sock_fields test_sysctl test_hashmap \ 31 31 test_btf_dump test_cgroup_attach xdping test_sockopt test_sockopt_sk \ 32 - test_sockopt_multi test_tcp_rtt 32 + test_sockopt_multi test_sockopt_inherit test_tcp_rtt 33 33 34 34 BPF_OBJ_FILES = $(patsubst %.c,%.o, $(notdir $(wildcard progs/*.c))) 35 35 TEST_GEN_FILES = $(BPF_OBJ_FILES) ··· 66 66 test_tcp_check_syncookie.sh \ 67 67 test_tc_tunnel.sh \ 68 68 test_tc_edt.sh \ 69 - test_xdping.sh 69 + test_xdping.sh \ 70 + test_bpftool_build.sh 70 71 71 72 TEST_PROGS_EXTENDED := with_addr.sh \ 72 73 with_tunnels.sh \ ··· 116 115 $(OUTPUT)/test_sockopt: cgroup_helpers.c 117 116 $(OUTPUT)/test_sockopt_sk: cgroup_helpers.c 118 117 $(OUTPUT)/test_sockopt_multi: cgroup_helpers.c 118 + $(OUTPUT)/test_sockopt_inherit: cgroup_helpers.c 119 119 $(OUTPUT)/test_tcp_rtt: cgroup_helpers.c 120 120 121 121 .PHONY: force
+15 -1
tools/testing/selftests/bpf/bpf_endian.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 1 + /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ 2 2 #ifndef __BPF_ENDIAN__ 3 3 #define __BPF_ENDIAN__ 4 4 ··· 29 29 # define __bpf_htonl(x) __builtin_bswap32(x) 30 30 # define __bpf_constant_ntohl(x) ___constant_swab32(x) 31 31 # define __bpf_constant_htonl(x) ___constant_swab32(x) 32 + # define __bpf_be64_to_cpu(x) __builtin_bswap64(x) 33 + # define __bpf_cpu_to_be64(x) __builtin_bswap64(x) 34 + # define __bpf_constant_be64_to_cpu(x) ___constant_swab64(x) 35 + # define __bpf_constant_cpu_to_be64(x) ___constant_swab64(x) 32 36 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 33 37 # define __bpf_ntohs(x) (x) 34 38 # define __bpf_htons(x) (x) ··· 42 38 # define __bpf_htonl(x) (x) 43 39 # define __bpf_constant_ntohl(x) (x) 44 40 # define __bpf_constant_htonl(x) (x) 41 + # define __bpf_be64_to_cpu(x) (x) 42 + # define __bpf_cpu_to_be64(x) (x) 43 + # define __bpf_constant_be64_to_cpu(x) (x) 44 + # define __bpf_constant_cpu_to_be64(x) (x) 45 45 #else 46 46 # error "Fix your compiler's __BYTE_ORDER__?!" 47 47 #endif ··· 62 54 #define bpf_ntohl(x) \ 63 55 (__builtin_constant_p(x) ? \ 64 56 __bpf_constant_ntohl(x) : __bpf_ntohl(x)) 57 + #define bpf_cpu_to_be64(x) \ 58 + (__builtin_constant_p(x) ? \ 59 + __bpf_constant_cpu_to_be64(x) : __bpf_cpu_to_be64(x)) 60 + #define bpf_be64_to_cpu(x) \ 61 + (__builtin_constant_p(x) ? \ 62 + __bpf_constant_be64_to_cpu(x) : __bpf_be64_to_cpu(x)) 65 63 66 64 #endif /* __BPF_ENDIAN__ */
+1 -1
tools/testing/selftests/bpf/bpf_helpers.h
··· 1 - /* SPDX-License-Identifier: GPL-2.0 */ 1 + /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ 2 2 #ifndef __BPF_HELPERS_H 3 3 #define __BPF_HELPERS_H 4 4
+12 -8
tools/testing/selftests/bpf/prog_tests/bpf_obj_id.c
··· 48 48 /* test_obj_id.o is a dumb prog. It should never fail 49 49 * to load. 50 50 */ 51 - if (err) 52 - error_cnt++; 53 - assert(!err); 51 + if (CHECK_FAIL(err)) 52 + continue; 54 53 55 54 /* Insert a magic value to the map */ 56 55 map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id"); 57 - assert(map_fds[i] >= 0); 56 + if (CHECK_FAIL(map_fds[i] < 0)) 57 + goto done; 58 58 err = bpf_map_update_elem(map_fds[i], &array_key, 59 59 &array_magic_value, 0); 60 - assert(!err); 60 + if (CHECK_FAIL(err)) 61 + goto done; 61 62 62 63 /* Check getting map info */ 63 64 info_len = sizeof(struct bpf_map_info) * 2; ··· 97 96 prog_infos[i].map_ids = ptr_to_u64(map_ids + i); 98 97 prog_infos[i].nr_map_ids = 2; 99 98 err = clock_gettime(CLOCK_REALTIME, &real_time_ts); 100 - assert(!err); 99 + if (CHECK_FAIL(err)) 100 + goto done; 101 101 err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts); 102 - assert(!err); 102 + if (CHECK_FAIL(err)) 103 + goto done; 103 104 err = bpf_obj_get_info_by_fd(prog_fds[i], &prog_infos[i], 104 105 &info_len); 105 106 load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec) ··· 227 224 nr_id_found++; 228 225 229 226 err = bpf_map_lookup_elem(map_fd, &array_key, &array_value); 230 - assert(!err); 227 + if (CHECK_FAIL(err)) 228 + goto done; 231 229 232 230 err = bpf_obj_get_info_by_fd(map_fd, &map_info, &info_len); 233 231 CHECK(err || info_len != sizeof(struct bpf_map_info) ||
+1 -8
tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c
··· 28 28 attr.prog_flags = BPF_F_TEST_RND_HI32; 29 29 err = bpf_prog_load_xattr(&attr, &obj, &prog_fd); 30 30 bpf_object__close(obj); 31 - if (err) 32 - error_cnt++; 33 31 return err; 34 32 } 35 33 ··· 103 105 continue; 104 106 105 107 err = check_load(test->file, test->attach_type); 106 - if (test->fails) { /* expected to fail */ 107 - if (err) 108 - error_cnt--; 109 - else 110 - error_cnt++; 111 - } 108 + CHECK_FAIL(err && !test->fails); 112 109 } 113 110 114 111 if (env.verifier_stats)
+1 -4
tools/testing/selftests/bpf/prog_tests/flow_dissector.c
··· 344 344 .tcp.dest = 8080, 345 345 }, 346 346 .keys = { 347 - .nhoff = 0, 348 347 .nhoff = ETH_HLEN, 349 348 .thoff = ETH_HLEN + sizeof(struct iphdr) + 350 349 sizeof(struct iphdr), ··· 451 452 452 453 err = bpf_flow_load(&obj, "./bpf_flow.o", "flow_dissector", 453 454 "jmp_table", "last_dissection", &prog_fd, &keys_fd); 454 - if (err) { 455 - error_cnt++; 455 + if (CHECK_FAIL(err)) 456 456 return; 457 - } 458 457 459 458 for (i = 0; i < ARRAY_SIZE(tests); i++) { 460 459 struct bpf_flow_keys flow_keys;
-3
tools/testing/selftests/bpf/prog_tests/get_stack_raw_tp.c
··· 135 135 exp_cnt -= err; 136 136 } 137 137 138 - goto close_prog_noerr; 139 138 close_prog: 140 - error_cnt++; 141 - close_prog_noerr: 142 139 if (!IS_ERR_OR_NULL(link)) 143 140 bpf_link__destroy(link); 144 141 if (!IS_ERR_OR_NULL(pb))
+5 -15
tools/testing/selftests/bpf/prog_tests/global_data.c
··· 7 7 uint64_t num; 8 8 9 9 map_fd = bpf_find_map(__func__, obj, "result_number"); 10 - if (map_fd < 0) { 11 - error_cnt++; 10 + if (CHECK_FAIL(map_fd < 0)) 12 11 return; 13 - } 14 12 15 13 struct { 16 14 char *name; ··· 42 44 char str[32]; 43 45 44 46 map_fd = bpf_find_map(__func__, obj, "result_string"); 45 - if (map_fd < 0) { 46 - error_cnt++; 47 + if (CHECK_FAIL(map_fd < 0)) 47 48 return; 48 - } 49 49 50 50 struct { 51 51 char *name; ··· 77 81 struct foo val; 78 82 79 83 map_fd = bpf_find_map(__func__, obj, "result_struct"); 80 - if (map_fd < 0) { 81 - error_cnt++; 84 + if (CHECK_FAIL(map_fd < 0)) 82 85 return; 83 - } 84 86 85 87 struct { 86 88 char *name; ··· 106 112 __u8 *buff; 107 113 108 114 map = bpf_object__find_map_by_name(obj, "test_glo.rodata"); 109 - if (!map || !bpf_map__is_internal(map)) { 110 - error_cnt++; 115 + if (CHECK_FAIL(!map || !bpf_map__is_internal(map))) 111 116 return; 112 - } 113 117 114 118 map_fd = bpf_map__fd(map); 115 - if (map_fd < 0) { 116 - error_cnt++; 119 + if (CHECK_FAIL(map_fd < 0)) 117 120 return; 118 - } 119 121 120 122 buff = malloc(bpf_map__def(map)->value_size); 121 123 if (buff)
+3 -6
tools/testing/selftests/bpf/prog_tests/l4lb_all.c
··· 30 30 u32 *magic = (u32 *)buf; 31 31 32 32 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); 33 - if (err) { 34 - error_cnt++; 33 + if (CHECK_FAIL(err)) 35 34 return; 36 - } 37 35 38 36 map_fd = bpf_find_map(__func__, obj, "vip_map"); 39 37 if (map_fd < 0) ··· 70 72 bytes += stats[i].bytes; 71 73 pkts += stats[i].pkts; 72 74 } 73 - if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) { 74 - error_cnt++; 75 + if (CHECK_FAIL(bytes != MAGIC_BYTES * NUM_ITER * 2 || 76 + pkts != NUM_ITER * 2)) 75 77 printf("test_l4lb:FAIL:stats %lld %lld\n", bytes, pkts); 76 - } 77 78 out: 78 79 bpf_object__close(obj); 79 80 }
+19 -19
tools/testing/selftests/bpf/prog_tests/map_lock.c
··· 8 8 9 9 for (i = 0; i < 10000; i++) { 10 10 err = bpf_map_lookup_elem_flags(map_fd, &key, vars, BPF_F_LOCK); 11 - if (err) { 11 + if (CHECK_FAIL(err)) { 12 12 printf("lookup failed\n"); 13 - error_cnt++; 14 13 goto out; 15 14 } 16 - if (vars[0] != 0) { 15 + if (CHECK_FAIL(vars[0] != 0)) { 17 16 printf("lookup #%d var[0]=%d\n", i, vars[0]); 18 - error_cnt++; 19 17 goto out; 20 18 } 21 19 rnd = vars[1]; ··· 22 24 continue; 23 25 printf("lookup #%d var[1]=%d var[%d]=%d\n", 24 26 i, rnd, j, vars[j]); 25 - error_cnt++; 27 + CHECK_FAIL(vars[j] != rnd); 26 28 goto out; 27 29 } 28 30 } ··· 40 42 void *ret; 41 43 42 44 err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd); 43 - if (err) { 45 + if (CHECK_FAIL(err)) { 44 46 printf("test_map_lock:bpf_prog_load errno %d\n", errno); 45 47 goto close_prog; 46 48 } 47 49 map_fd[0] = bpf_find_map(__func__, obj, "hash_map"); 48 - if (map_fd[0] < 0) 50 + if (CHECK_FAIL(map_fd[0] < 0)) 49 51 goto close_prog; 50 52 map_fd[1] = bpf_find_map(__func__, obj, "array_map"); 51 - if (map_fd[1] < 0) 53 + if (CHECK_FAIL(map_fd[1] < 0)) 52 54 goto close_prog; 53 55 54 56 bpf_map_update_elem(map_fd[0], &key, vars, BPF_F_LOCK); 55 57 56 58 for (i = 0; i < 4; i++) 57 - assert(pthread_create(&thread_id[i], NULL, 58 - &spin_lock_thread, &prog_fd) == 0); 59 + if (CHECK_FAIL(pthread_create(&thread_id[i], NULL, 60 + &spin_lock_thread, &prog_fd))) 61 + goto close_prog; 59 62 for (i = 4; i < 6; i++) 60 - assert(pthread_create(&thread_id[i], NULL, 61 - &parallel_map_access, &map_fd[i - 4]) == 0); 63 + if (CHECK_FAIL(pthread_create(&thread_id[i], NULL, 64 + &parallel_map_access, 65 + &map_fd[i - 4]))) 66 + goto close_prog; 62 67 for (i = 0; i < 4; i++) 63 - assert(pthread_join(thread_id[i], &ret) == 0 && 64 - ret == (void *)&prog_fd); 68 + if (CHECK_FAIL(pthread_join(thread_id[i], &ret) || 69 + ret != (void *)&prog_fd)) 70 + goto close_prog; 65 71 for (i = 4; i < 6; i++) 66 - assert(pthread_join(thread_id[i], &ret) == 0 && 67 - ret == (void *)&map_fd[i - 4]); 68 - goto close_prog_noerr; 72 + if (CHECK_FAIL(pthread_join(thread_id[i], &ret) || 73 + ret != (void *)&map_fd[i - 4])) 74 + goto close_prog; 69 75 close_prog: 70 - error_cnt++; 71 - close_prog_noerr: 72 76 bpf_object__close(obj); 73 77 }
+1 -3
tools/testing/selftests/bpf/prog_tests/pkt_access.c
··· 9 9 int err, prog_fd; 10 10 11 11 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); 12 - if (err) { 13 - error_cnt++; 12 + if (CHECK_FAIL(err)) 14 13 return; 15 - } 16 14 17 15 err = bpf_prog_test_run(prog_fd, 100000, &pkt_v4, sizeof(pkt_v4), 18 16 NULL, NULL, &retval, &duration);
+1 -3
tools/testing/selftests/bpf/prog_tests/pkt_md_access.c
··· 9 9 int err, prog_fd; 10 10 11 11 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); 12 - if (err) { 13 - error_cnt++; 12 + if (CHECK_FAIL(err)) 14 13 return; 15 - } 16 14 17 15 err = bpf_prog_test_run(prog_fd, 10, &pkt_v4, sizeof(pkt_v4), 18 16 NULL, NULL, &retval, &duration);
+2 -6
tools/testing/selftests/bpf/prog_tests/queue_stack_map.c
··· 27 27 return; 28 28 29 29 err = bpf_prog_load(file, BPF_PROG_TYPE_SCHED_CLS, &obj, &prog_fd); 30 - if (err) { 31 - error_cnt++; 30 + if (CHECK_FAIL(err)) 32 31 return; 33 - } 34 32 35 33 map_in_fd = bpf_find_map(__func__, obj, "map_in"); 36 34 if (map_in_fd < 0) ··· 41 43 /* Push 32 elements to the input map */ 42 44 for (i = 0; i < MAP_SIZE; i++) { 43 45 err = bpf_map_update_elem(map_in_fd, NULL, &vals[i], 0); 44 - if (err) { 45 - error_cnt++; 46 + if (CHECK_FAIL(err)) 46 47 goto out; 47 - } 48 48 } 49 49 50 50 /* The eBPF program pushes iph.saddr in the output map,
+1 -3
tools/testing/selftests/bpf/prog_tests/reference_tracking.c
··· 10 10 int err = 0; 11 11 12 12 obj = bpf_object__open(file); 13 - if (IS_ERR(obj)) { 14 - error_cnt++; 13 + if (CHECK_FAIL(IS_ERR(obj))) 15 14 return; 16 - } 17 15 18 16 bpf_object__for_each_program(prog, obj) { 19 17 const char *title;
+20 -23
tools/testing/selftests/bpf/prog_tests/send_signal.c
··· 8 8 sigusr1_received++; 9 9 } 10 10 11 - static int test_send_signal_common(struct perf_event_attr *attr, 11 + static void test_send_signal_common(struct perf_event_attr *attr, 12 12 int prog_type, 13 13 const char *test_name) 14 14 { ··· 23 23 24 24 if (CHECK(pipe(pipe_c2p), test_name, 25 25 "pipe pipe_c2p error: %s\n", strerror(errno))) 26 - goto no_fork_done; 26 + return; 27 27 28 28 if (CHECK(pipe(pipe_p2c), test_name, 29 29 "pipe pipe_p2c error: %s\n", strerror(errno))) { 30 30 close(pipe_c2p[0]); 31 31 close(pipe_c2p[1]); 32 - goto no_fork_done; 32 + return; 33 33 } 34 34 35 35 pid = fork(); ··· 38 38 close(pipe_c2p[1]); 39 39 close(pipe_p2c[0]); 40 40 close(pipe_p2c[1]); 41 - goto no_fork_done; 41 + return; 42 42 } 43 43 44 44 if (pid == 0) { ··· 125 125 goto disable_pmu; 126 126 } 127 127 128 - err = CHECK(buf[0] != '2', test_name, "incorrect result\n"); 128 + CHECK(buf[0] != '2', test_name, "incorrect result\n"); 129 129 130 130 /* notify child safe to exit */ 131 131 write(pipe_p2c[1], buf, 1); ··· 138 138 close(pipe_c2p[0]); 139 139 close(pipe_p2c[1]); 140 140 wait(NULL); 141 - no_fork_done: 142 - return err; 143 141 } 144 142 145 - static int test_send_signal_tracepoint(void) 143 + static void test_send_signal_tracepoint(void) 146 144 { 147 145 const char *id_path = "/sys/kernel/debug/tracing/events/syscalls/sys_enter_nanosleep/id"; 148 146 struct perf_event_attr attr = { ··· 157 159 if (CHECK(efd < 0, "tracepoint", 158 160 "open syscalls/sys_enter_nanosleep/id failure: %s\n", 159 161 strerror(errno))) 160 - return -1; 162 + return; 161 163 162 164 bytes = read(efd, buf, sizeof(buf)); 163 165 close(efd); 164 166 if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "tracepoint", 165 167 "read syscalls/sys_enter_nanosleep/id failure: %s\n", 166 168 strerror(errno))) 167 - return -1; 169 + return; 168 170 169 171 attr.config = strtol(buf, NULL, 0); 170 172 171 - return test_send_signal_common(&attr, BPF_PROG_TYPE_TRACEPOINT, "tracepoint"); 173 + test_send_signal_common(&attr, BPF_PROG_TYPE_TRACEPOINT, "tracepoint"); 172 174 } 173 175 174 - static int test_send_signal_perf(void) 176 + static void test_send_signal_perf(void) 175 177 { 176 178 struct perf_event_attr attr = { 177 179 .sample_period = 1, ··· 179 181 .config = PERF_COUNT_SW_CPU_CLOCK, 180 182 }; 181 183 182 - return test_send_signal_common(&attr, BPF_PROG_TYPE_PERF_EVENT, 183 - "perf_sw_event"); 184 + test_send_signal_common(&attr, BPF_PROG_TYPE_PERF_EVENT, 185 + "perf_sw_event"); 184 186 } 185 187 186 - static int test_send_signal_nmi(void) 188 + static void test_send_signal_nmi(void) 187 189 { 188 190 struct perf_event_attr attr = { 189 191 .sample_freq = 50, ··· 202 204 if (errno == ENOENT) { 203 205 printf("%s:SKIP:no PERF_COUNT_HW_CPU_CYCLES\n", 204 206 __func__); 205 - return 0; 207 + test__skip(); 208 + return; 206 209 } 207 210 /* Let the test fail with a more informative message */ 208 211 } else { 209 212 close(pmu_fd); 210 213 } 211 214 212 - return test_send_signal_common(&attr, BPF_PROG_TYPE_PERF_EVENT, 213 - "perf_hw_event"); 215 + test_send_signal_common(&attr, BPF_PROG_TYPE_PERF_EVENT, 216 + "perf_hw_event"); 214 217 } 215 218 216 219 void test_send_signal(void) 217 220 { 218 - int ret = 0; 219 - 220 221 if (test__start_subtest("send_signal_tracepoint")) 221 - ret |= test_send_signal_tracepoint(); 222 + test_send_signal_tracepoint(); 222 223 if (test__start_subtest("send_signal_perf")) 223 - ret |= test_send_signal_perf(); 224 + test_send_signal_perf(); 224 225 if (test__start_subtest("send_signal_nmi")) 225 - ret |= test_send_signal_nmi(); 226 + test_send_signal_nmi(); 226 227 }
+8 -8
tools/testing/selftests/bpf/prog_tests/spinlock.c
··· 11 11 void *ret; 12 12 13 13 err = bpf_prog_load(file, BPF_PROG_TYPE_CGROUP_SKB, &obj, &prog_fd); 14 - if (err) { 14 + if (CHECK_FAIL(err)) { 15 15 printf("test_spin_lock:bpf_prog_load errno %d\n", errno); 16 16 goto close_prog; 17 17 } 18 18 for (i = 0; i < 4; i++) 19 - assert(pthread_create(&thread_id[i], NULL, 20 - &spin_lock_thread, &prog_fd) == 0); 19 + if (CHECK_FAIL(pthread_create(&thread_id[i], NULL, 20 + &spin_lock_thread, &prog_fd))) 21 + goto close_prog; 22 + 21 23 for (i = 0; i < 4; i++) 22 - assert(pthread_join(thread_id[i], &ret) == 0 && 23 - ret == (void *)&prog_fd); 24 - goto close_prog_noerr; 24 + if (CHECK_FAIL(pthread_join(thread_id[i], &ret) || 25 + ret != (void *)&prog_fd)) 26 + goto close_prog; 25 27 close_prog: 26 - error_cnt++; 27 - close_prog_noerr: 28 28 bpf_object__close(obj); 29 29 }
+4 -3
tools/testing/selftests/bpf/prog_tests/stacktrace_build_id.c
··· 51 51 "err %d errno %d\n", err, errno)) 52 52 goto disable_pmu; 53 53 54 - assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null") 55 - == 0); 56 - assert(system("./urandom_read") == 0); 54 + if (CHECK_FAIL(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null"))) 55 + goto disable_pmu; 56 + if (CHECK_FAIL(system("./urandom_read"))) 57 + goto disable_pmu; 57 58 /* disable stack trace collection */ 58 59 key = 0; 59 60 val = 1;
+4 -3
tools/testing/selftests/bpf/prog_tests/stacktrace_build_id_nmi.c
··· 82 82 "err %d errno %d\n", err, errno)) 83 83 goto disable_pmu; 84 84 85 - assert(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null") 86 - == 0); 87 - assert(system("taskset 0x1 ./urandom_read 100000") == 0); 85 + if (CHECK_FAIL(system("dd if=/dev/urandom of=/dev/zero count=4 2> /dev/null"))) 86 + goto disable_pmu; 87 + if (CHECK_FAIL(system("taskset 0x1 ./urandom_read 100000"))) 88 + goto disable_pmu; 88 89 /* disable stack trace collection */ 89 90 key = 0; 90 91 val = 1;
+7 -10
tools/testing/selftests/bpf/prog_tests/stacktrace_map.c
··· 26 26 27 27 /* find map fds */ 28 28 control_map_fd = bpf_find_map(__func__, obj, "control_map"); 29 - if (control_map_fd < 0) 29 + if (CHECK_FAIL(control_map_fd < 0)) 30 30 goto disable_pmu; 31 31 32 32 stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); 33 - if (stackid_hmap_fd < 0) 33 + if (CHECK_FAIL(stackid_hmap_fd < 0)) 34 34 goto disable_pmu; 35 35 36 36 stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); 37 - if (stackmap_fd < 0) 37 + if (CHECK_FAIL(stackmap_fd < 0)) 38 38 goto disable_pmu; 39 39 40 40 stack_amap_fd = bpf_find_map(__func__, obj, "stack_amap"); 41 - if (stack_amap_fd < 0) 41 + if (CHECK_FAIL(stack_amap_fd < 0)) 42 42 goto disable_pmu; 43 43 44 44 /* give some time for bpf program run */ ··· 55 55 err = compare_map_keys(stackid_hmap_fd, stackmap_fd); 56 56 if (CHECK(err, "compare_map_keys stackid_hmap vs. stackmap", 57 57 "err %d errno %d\n", err, errno)) 58 - goto disable_pmu_noerr; 58 + goto disable_pmu; 59 59 60 60 err = compare_map_keys(stackmap_fd, stackid_hmap_fd); 61 61 if (CHECK(err, "compare_map_keys stackmap vs. stackid_hmap", 62 62 "err %d errno %d\n", err, errno)) 63 - goto disable_pmu_noerr; 63 + goto disable_pmu; 64 64 65 65 stack_trace_len = PERF_MAX_STACK_DEPTH * sizeof(__u64); 66 66 err = compare_stack_ips(stackmap_fd, stack_amap_fd, stack_trace_len); 67 67 if (CHECK(err, "compare_stack_ips stackmap vs. stack_amap", 68 68 "err %d errno %d\n", err, errno)) 69 - goto disable_pmu_noerr; 69 + goto disable_pmu; 70 70 71 - goto disable_pmu_noerr; 72 71 disable_pmu: 73 - error_cnt++; 74 - disable_pmu_noerr: 75 72 bpf_link__destroy(link); 76 73 close_prog: 77 74 bpf_object__close(obj);
+3 -6
tools/testing/selftests/bpf/prog_tests/stacktrace_map_raw_tp.c
··· 26 26 27 27 /* find map fds */ 28 28 control_map_fd = bpf_find_map(__func__, obj, "control_map"); 29 - if (control_map_fd < 0) 29 + if (CHECK_FAIL(control_map_fd < 0)) 30 30 goto close_prog; 31 31 32 32 stackid_hmap_fd = bpf_find_map(__func__, obj, "stackid_hmap"); 33 - if (stackid_hmap_fd < 0) 33 + if (CHECK_FAIL(stackid_hmap_fd < 0)) 34 34 goto close_prog; 35 35 36 36 stackmap_fd = bpf_find_map(__func__, obj, "stackmap"); 37 - if (stackmap_fd < 0) 37 + if (CHECK_FAIL(stackmap_fd < 0)) 38 38 goto close_prog; 39 39 40 40 /* give some time for bpf program run */ ··· 58 58 "err %d errno %d\n", err, errno)) 59 59 goto close_prog; 60 60 61 - goto close_prog_noerr; 62 61 close_prog: 63 - error_cnt++; 64 - close_prog_noerr: 65 62 if (!IS_ERR_OR_NULL(link)) 66 63 bpf_link__destroy(link); 67 64 bpf_object__close(obj);
-3
tools/testing/selftests/bpf/prog_tests/task_fd_query_rawtp.c
··· 70 70 if (CHECK(!err, "check_results", "fd_type %d len %u\n", fd_type, len)) 71 71 goto close_prog; 72 72 73 - goto close_prog_noerr; 74 73 close_prog: 75 - error_cnt++; 76 - close_prog_noerr: 77 74 bpf_object__close(obj); 78 75 }
-5
tools/testing/selftests/bpf/prog_tests/task_fd_query_tp.c
··· 62 62 fd_type, buf)) 63 63 goto close_pmu; 64 64 65 - close(pmu_fd); 66 - goto close_prog_noerr; 67 - 68 65 close_pmu: 69 66 close(pmu_fd); 70 67 close_prog: 71 - error_cnt++; 72 - close_prog_noerr: 73 68 bpf_object__close(obj); 74 69 } 75 70
+1 -3
tools/testing/selftests/bpf/prog_tests/tcp_estats.c
··· 10 10 11 11 err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd); 12 12 CHECK(err, "", "err %d errno %d\n", err, errno); 13 - if (err) { 14 - error_cnt++; 13 + if (err) 15 14 return; 16 - } 17 15 18 16 bpf_object__close(obj); 19 17 }
+1 -3
tools/testing/selftests/bpf/prog_tests/xdp.c
··· 16 16 int err, prog_fd, map_fd; 17 17 18 18 err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); 19 - if (err) { 20 - error_cnt++; 19 + if (CHECK_FAIL(err)) 21 20 return; 22 - } 23 21 24 22 map_fd = bpf_find_map(__func__, obj, "vip2tnl"); 25 23 if (map_fd < 0)
+1 -3
tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c
··· 10 10 int err, prog_fd; 11 11 12 12 err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); 13 - if (err) { 14 - error_cnt++; 13 + if (CHECK_FAIL(err)) 15 14 return; 16 - } 17 15 18 16 err = bpf_prog_test_run(prog_fd, 1, &pkt_v4, sizeof(pkt_v4), 19 17 buf, &size, &retval, &duration);
+3 -5
tools/testing/selftests/bpf/prog_tests/xdp_noinline.c
··· 31 31 u32 *magic = (u32 *)buf; 32 32 33 33 err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); 34 - if (err) { 35 - error_cnt++; 34 + if (CHECK_FAIL(err)) 36 35 return; 37 - } 38 36 39 37 map_fd = bpf_find_map(__func__, obj, "vip_map"); 40 38 if (map_fd < 0) ··· 71 73 bytes += stats[i].bytes; 72 74 pkts += stats[i].pkts; 73 75 } 74 - if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) { 75 - error_cnt++; 76 + if (CHECK_FAIL(bytes != MAGIC_BYTES * NUM_ITER * 2 || 77 + pkts != NUM_ITER * 2)) { 76 78 printf("test_xdp_noinline:FAIL:stats %lld %lld\n", 77 79 bytes, pkts); 78 80 }
+97
tools/testing/selftests/bpf/progs/sockopt_inherit.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <linux/bpf.h> 3 + #include "bpf_helpers.h" 4 + 5 + char _license[] SEC("license") = "GPL"; 6 + __u32 _version SEC("version") = 1; 7 + 8 + #define SOL_CUSTOM 0xdeadbeef 9 + #define CUSTOM_INHERIT1 0 10 + #define CUSTOM_INHERIT2 1 11 + #define CUSTOM_LISTENER 2 12 + 13 + struct sockopt_inherit { 14 + __u8 val; 15 + }; 16 + 17 + struct { 18 + __uint(type, BPF_MAP_TYPE_SK_STORAGE); 19 + __uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_CLONE); 20 + __type(key, int); 21 + __type(value, struct sockopt_inherit); 22 + } cloned1_map SEC(".maps"); 23 + 24 + struct { 25 + __uint(type, BPF_MAP_TYPE_SK_STORAGE); 26 + __uint(map_flags, BPF_F_NO_PREALLOC | BPF_F_CLONE); 27 + __type(key, int); 28 + __type(value, struct sockopt_inherit); 29 + } cloned2_map SEC(".maps"); 30 + 31 + struct { 32 + __uint(type, BPF_MAP_TYPE_SK_STORAGE); 33 + __uint(map_flags, BPF_F_NO_PREALLOC); 34 + __type(key, int); 35 + __type(value, struct sockopt_inherit); 36 + } listener_only_map SEC(".maps"); 37 + 38 + static __inline struct sockopt_inherit *get_storage(struct bpf_sockopt *ctx) 39 + { 40 + if (ctx->optname == CUSTOM_INHERIT1) 41 + return bpf_sk_storage_get(&cloned1_map, ctx->sk, 0, 42 + BPF_SK_STORAGE_GET_F_CREATE); 43 + else if (ctx->optname == CUSTOM_INHERIT2) 44 + return bpf_sk_storage_get(&cloned2_map, ctx->sk, 0, 45 + BPF_SK_STORAGE_GET_F_CREATE); 46 + else 47 + return bpf_sk_storage_get(&listener_only_map, ctx->sk, 0, 48 + BPF_SK_STORAGE_GET_F_CREATE); 49 + } 50 + 51 + SEC("cgroup/getsockopt") 52 + int _getsockopt(struct bpf_sockopt *ctx) 53 + { 54 + __u8 *optval_end = ctx->optval_end; 55 + struct sockopt_inherit *storage; 56 + __u8 *optval = ctx->optval; 57 + 58 + if (ctx->level != SOL_CUSTOM) 59 + return 1; /* only interested in SOL_CUSTOM */ 60 + 61 + if (optval + 1 > optval_end) 62 + return 0; /* EPERM, bounds check */ 63 + 64 + storage = get_storage(ctx); 65 + if (!storage) 66 + return 0; /* EPERM, couldn't get sk storage */ 67 + 68 + ctx->retval = 0; /* Reset system call return value to zero */ 69 + 70 + optval[0] = storage->val; 71 + ctx->optlen = 1; 72 + 73 + return 1; 74 + } 75 + 76 + SEC("cgroup/setsockopt") 77 + int _setsockopt(struct bpf_sockopt *ctx) 78 + { 79 + __u8 *optval_end = ctx->optval_end; 80 + struct sockopt_inherit *storage; 81 + __u8 *optval = ctx->optval; 82 + 83 + if (ctx->level != SOL_CUSTOM) 84 + return 1; /* only interested in SOL_CUSTOM */ 85 + 86 + if (optval + 1 > optval_end) 87 + return 0; /* EPERM, bounds check */ 88 + 89 + storage = get_storage(ctx); 90 + if (!storage) 91 + return 0; /* EPERM, couldn't get sk storage */ 92 + 93 + storage->val = optval[0]; 94 + ctx->optlen = -1; 95 + 96 + return 1; 97 + }
+6 -10
tools/testing/selftests/bpf/progs/test_lwt_seg6local.c
··· 12 12 13 13 #define SR6_FLAG_ALERT (1 << 4) 14 14 15 - #define htonll(x) ((bpf_htonl(1)) == 1 ? (x) : ((uint64_t)bpf_htonl((x) & \ 16 - 0xFFFFFFFF) << 32) | bpf_htonl((x) >> 32)) 17 - #define ntohll(x) ((bpf_ntohl(1)) == 1 ? (x) : ((uint64_t)bpf_ntohl((x) & \ 18 - 0xFFFFFFFF) << 32) | bpf_ntohl((x) >> 32)) 19 15 #define BPF_PACKET_HEADER __attribute__((packed)) 20 16 21 17 struct ip6_t { ··· 272 276 return 0; 273 277 274 278 // check if egress TLV value is correct 275 - if (ntohll(egr_addr.hi) == 0xfd00000000000000 && 276 - ntohll(egr_addr.lo) == 0x4) 279 + if (bpf_be64_to_cpu(egr_addr.hi) == 0xfd00000000000000 && 280 + bpf_be64_to_cpu(egr_addr.lo) == 0x4) 277 281 return 1; 278 282 } 279 283 ··· 304 308 305 309 #pragma clang loop unroll(full) 306 310 for (unsigned long long lo = 0; lo < 4; lo++) { 307 - seg->lo = htonll(4 - lo); 308 - seg->hi = htonll(hi); 311 + seg->lo = bpf_cpu_to_be64(4 - lo); 312 + seg->hi = bpf_cpu_to_be64(hi); 309 313 seg = (struct ip6_addr_t *)((char *)seg + sizeof(*seg)); 310 314 } 311 315 ··· 345 349 if (err) 346 350 return BPF_DROP; 347 351 348 - addr.lo = htonll(lo); 349 - addr.hi = htonll(hi); 352 + addr.lo = bpf_cpu_to_be64(lo); 353 + addr.hi = bpf_cpu_to_be64(hi); 350 354 err = bpf_lwt_seg6_action(skb, SEG6_LOCAL_ACTION_END_X, 351 355 (void *)&addr, sizeof(addr)); 352 356 if (err)
+2 -6
tools/testing/selftests/bpf/progs/test_seg6_loop.c
··· 12 12 13 13 #define SR6_FLAG_ALERT (1 << 4) 14 14 15 - #define htonll(x) ((bpf_htonl(1)) == 1 ? (x) : ((uint64_t)bpf_htonl((x) & \ 16 - 0xFFFFFFFF) << 32) | bpf_htonl((x) >> 32)) 17 - #define ntohll(x) ((bpf_ntohl(1)) == 1 ? (x) : ((uint64_t)bpf_ntohl((x) & \ 18 - 0xFFFFFFFF) << 32) | bpf_ntohl((x) >> 32)) 19 15 #define BPF_PACKET_HEADER __attribute__((packed)) 20 16 21 17 struct ip6_t { ··· 247 251 if (err) 248 252 return BPF_DROP; 249 253 250 - addr.lo = htonll(lo); 251 - addr.hi = htonll(hi); 254 + addr.lo = bpf_cpu_to_be64(lo); 255 + addr.hi = bpf_cpu_to_be64(hi); 252 256 err = bpf_lwt_seg6_action(skb, SEG6_LOCAL_ACTION_END_X, 253 257 (void *)&addr, sizeof(addr)); 254 258 if (err)
+143
tools/testing/selftests/bpf/test_bpftool_build.sh
··· 1 + #!/bin/bash 2 + # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 3 + 4 + ERROR=0 5 + TMPDIR= 6 + 7 + # If one build fails, continue but return non-0 on exit. 8 + return_value() { 9 + if [ -d "$TMPDIR" ] ; then 10 + rm -rf -- $TMPDIR 11 + fi 12 + exit $ERROR 13 + } 14 + trap return_value EXIT 15 + 16 + case $1 in 17 + -h|--help) 18 + echo -e "$0 [-j <n>]" 19 + echo -e "\tTest the different ways of building bpftool." 20 + echo -e "" 21 + echo -e "\tOptions:" 22 + echo -e "\t\t-j <n>:\tPass -j flag to 'make'." 23 + exit 24 + ;; 25 + esac 26 + 27 + J=$* 28 + 29 + # Assume script is located under tools/testing/selftests/bpf/. We want to start 30 + # build attempts from the top of kernel repository. 31 + SCRIPT_REL_PATH=$(realpath --relative-to=$PWD $0) 32 + SCRIPT_REL_DIR=$(dirname $SCRIPT_REL_PATH) 33 + KDIR_ROOT_DIR=$(realpath $PWD/$SCRIPT_REL_DIR/../../../../) 34 + cd $KDIR_ROOT_DIR 35 + 36 + check() { 37 + local dir=$(realpath $1) 38 + 39 + echo -n "binary: " 40 + # Returns non-null if file is found (and "false" is run) 41 + find $dir -type f -executable -name bpftool -print -exec false {} + && \ 42 + ERROR=1 && printf "FAILURE: Did not find bpftool\n" 43 + } 44 + 45 + make_and_clean() { 46 + echo -e "\$PWD: $PWD" 47 + echo -e "command: make -s $* >/dev/null" 48 + make $J -s $* >/dev/null 49 + if [ $? -ne 0 ] ; then 50 + ERROR=1 51 + fi 52 + if [ $# -ge 1 ] ; then 53 + check ${@: -1} 54 + else 55 + check . 56 + fi 57 + ( 58 + if [ $# -ge 1 ] ; then 59 + cd ${@: -1} 60 + fi 61 + make -s clean 62 + ) 63 + echo 64 + } 65 + 66 + make_with_tmpdir() { 67 + local ARGS 68 + 69 + TMPDIR=$(mktemp -d) 70 + if [ $# -ge 2 ] ; then 71 + ARGS=${@:1:(($# - 1))} 72 + fi 73 + echo -e "\$PWD: $PWD" 74 + echo -e "command: make -s $ARGS ${@: -1}=$TMPDIR/ >/dev/null" 75 + make $J -s $ARGS ${@: -1}=$TMPDIR/ >/dev/null 76 + if [ $? -ne 0 ] ; then 77 + ERROR=1 78 + fi 79 + check $TMPDIR 80 + rm -rf -- $TMPDIR 81 + echo 82 + } 83 + 84 + echo "Trying to build bpftool" 85 + echo -e "... through kbuild\n" 86 + 87 + if [ -f ".config" ] ; then 88 + make_and_clean tools/bpf 89 + 90 + ## $OUTPUT is overwritten in kbuild Makefile, and thus cannot be passed 91 + ## down from toplevel Makefile to bpftool's Makefile. 92 + 93 + # make_with_tmpdir tools/bpf OUTPUT 94 + echo -e "skip: make tools/bpf OUTPUT=<dir> (not supported)\n" 95 + 96 + make_with_tmpdir tools/bpf O 97 + else 98 + echo -e "skip: make tools/bpf (no .config found)\n" 99 + echo -e "skip: make tools/bpf OUTPUT=<dir> (not supported)\n" 100 + echo -e "skip: make tools/bpf O=<dir> (no .config found)\n" 101 + fi 102 + 103 + echo -e "... from kernel source tree\n" 104 + 105 + make_and_clean -C tools/bpf/bpftool 106 + 107 + make_with_tmpdir -C tools/bpf/bpftool OUTPUT 108 + 109 + make_with_tmpdir -C tools/bpf/bpftool O 110 + 111 + echo -e "... from tools/\n" 112 + cd tools/ 113 + 114 + make_and_clean bpf 115 + 116 + ## In tools/bpf/Makefile, function "descend" is called and passes $(O) and 117 + ## $(OUTPUT). We would like $(OUTPUT) to have "bpf/bpftool/" appended before 118 + ## calling bpftool's Makefile, but this is not the case as the "descend" 119 + ## function focuses on $(O)/$(subdir). However, in the present case, updating 120 + ## $(O) to have $(OUTPUT) recomputed from it in bpftool's Makefile does not 121 + ## work, because $(O) is not defined from command line and $(OUTPUT) is not 122 + ## updated in tools/scripts/Makefile.include. 123 + ## 124 + ## Workarounds would require to a) edit "descend" or use an alternative way to 125 + ## call bpftool's Makefile, b) modify the conditions to update $(OUTPUT) and 126 + ## other variables in tools/scripts/Makefile.include (at the risk of breaking 127 + ## the build of other tools), or c) append manually the "bpf/bpftool" suffix to 128 + ## $(OUTPUT) in bpf's Makefile, which may break if targets for other directories 129 + ## use "descend" in the future. 130 + 131 + # make_with_tmpdir bpf OUTPUT 132 + echo -e "skip: make bpf OUTPUT=<dir> (not supported)\n" 133 + 134 + make_with_tmpdir bpf O 135 + 136 + echo -e "... from bpftool's dir\n" 137 + cd bpf/bpftool 138 + 139 + make_and_clean 140 + 141 + make_with_tmpdir OUTPUT 142 + 143 + make_with_tmpdir O
+1 -1
tools/testing/selftests/bpf/test_offload.py
··· 1353 1353 bpftool_prog_list_wait(expected=1) 1354 1354 1355 1355 ifnameB = bpftool("prog show %s" % (progB))[1]["dev"]["ifname"] 1356 - fail(ifnameB != simB1['ifname'], "program not bound to originial device") 1356 + fail(ifnameB != simB1['ifname'], "program not bound to original device") 1357 1357 simB1.remove() 1358 1358 bpftool_prog_list_wait(expected=1) 1359 1359
+28 -14
tools/testing/selftests/bpf/test_progs.c
··· 8 8 9 9 /* defined in test_progs.h */ 10 10 struct test_env env; 11 - int error_cnt, pass_cnt; 12 11 13 12 struct prog_test_def { 14 13 const char *test_name; 15 14 int test_num; 16 15 void (*run_test)(void); 17 16 bool force_log; 18 - int pass_cnt; 19 17 int error_cnt; 18 + int skip_cnt; 20 19 bool tested; 21 20 22 21 const char *subtest_name; 23 22 int subtest_num; 24 23 25 24 /* store counts before subtest started */ 26 - int old_pass_cnt; 27 25 int old_error_cnt; 28 26 }; 29 27 ··· 45 47 46 48 if (env.verbose || test->force_log || failed) { 47 49 if (env.log_cnt) { 50 + env.log_buf[env.log_cnt] = '\0'; 48 51 fprintf(env.stdout, "%s", env.log_buf); 49 52 if (env.log_buf[env.log_cnt - 1] != '\n') 50 53 fprintf(env.stdout, "\n"); ··· 55 56 fseeko(stdout, 0, SEEK_SET); /* rewind */ 56 57 } 57 58 59 + static void skip_account(void) 60 + { 61 + if (env.test->skip_cnt) { 62 + env.skip_cnt++; 63 + env.test->skip_cnt = 0; 64 + } 65 + } 66 + 58 67 void test__end_subtest() 59 68 { 60 69 struct prog_test_def *test = env.test; 61 - int sub_error_cnt = error_cnt - test->old_error_cnt; 70 + int sub_error_cnt = test->error_cnt - test->old_error_cnt; 62 71 63 72 if (sub_error_cnt) 64 73 env.fail_cnt++; 65 74 else 66 75 env.sub_succ_cnt++; 76 + skip_account(); 67 77 68 78 dump_test_log(test, sub_error_cnt); 69 79 ··· 103 95 return false; 104 96 105 97 test->subtest_name = name; 106 - env.test->old_pass_cnt = pass_cnt; 107 - env.test->old_error_cnt = error_cnt; 98 + env.test->old_error_cnt = env.test->error_cnt; 108 99 109 100 return true; 110 101 } 111 102 112 103 void test__force_log() { 113 104 env.test->force_log = true; 105 + } 106 + 107 + void test__skip(void) 108 + { 109 + env.test->skip_cnt++; 110 + } 111 + 112 + void test__fail(void) 113 + { 114 + env.test->error_cnt++; 114 115 } 115 116 116 117 struct ipv4_packet pkt_v4 = { ··· 146 129 map = bpf_object__find_map_by_name(obj, name); 147 130 if (!map) { 148 131 printf("%s:FAIL:map '%s' not found\n", test, name); 149 - error_cnt++; 132 + test__fail(); 150 133 return -1; 151 134 } 152 135 return bpf_map__fd(map); ··· 505 488 stdio_hijack(); 506 489 for (i = 0; i < prog_test_cnt; i++) { 507 490 struct prog_test_def *test = &prog_test_defs[i]; 508 - int old_pass_cnt = pass_cnt; 509 - int old_error_cnt = error_cnt; 510 491 511 492 env.test = test; 512 493 test->test_num = i + 1; ··· 519 504 test__end_subtest(); 520 505 521 506 test->tested = true; 522 - test->pass_cnt = pass_cnt - old_pass_cnt; 523 - test->error_cnt = error_cnt - old_error_cnt; 524 507 if (test->error_cnt) 525 508 env.fail_cnt++; 526 509 else 527 510 env.succ_cnt++; 511 + skip_account(); 528 512 529 513 dump_test_log(test, test->error_cnt); 530 514 ··· 532 518 test->error_cnt ? "FAIL" : "OK"); 533 519 } 534 520 stdio_restore(); 535 - printf("Summary: %d/%d PASSED, %d FAILED\n", 536 - env.succ_cnt, env.sub_succ_cnt, env.fail_cnt); 521 + printf("Summary: %d/%d PASSED, %d SKIPPED, %d FAILED\n", 522 + env.succ_cnt, env.sub_succ_cnt, env.skip_cnt, env.fail_cnt); 537 523 538 524 free(env.test_selector.num_set); 539 525 free(env.subtest_selector.num_set); 540 526 541 - return error_cnt ? EXIT_FAILURE : EXIT_SUCCESS; 527 + return env.fail_cnt ? EXIT_FAILURE : EXIT_SUCCESS; 542 528 }
+13 -6
tools/testing/selftests/bpf/test_progs.h
··· 38 38 #include "trace_helpers.h" 39 39 #include "flow_dissector_load.h" 40 40 41 - struct prog_test_def; 42 - 43 41 struct test_selector { 44 42 const char *name; 45 43 bool *num_set; ··· 62 64 int succ_cnt; /* successful tests */ 63 65 int sub_succ_cnt; /* successful sub-tests */ 64 66 int fail_cnt; /* total failed tests + sub-tests */ 67 + int skip_cnt; /* skipped tests */ 65 68 }; 66 69 67 - extern int error_cnt; 68 - extern int pass_cnt; 69 70 extern struct test_env env; 70 71 71 72 extern void test__force_log(); 72 73 extern bool test__start_subtest(const char *name); 74 + extern void test__skip(void); 75 + extern void test__fail(void); 73 76 74 77 #define MAGIC_BYTES 123 75 78 ··· 93 94 #define _CHECK(condition, tag, duration, format...) ({ \ 94 95 int __ret = !!(condition); \ 95 96 if (__ret) { \ 96 - error_cnt++; \ 97 + test__fail(); \ 97 98 printf("%s:FAIL:%s ", __func__, tag); \ 98 99 printf(format); \ 99 100 } else { \ 100 - pass_cnt++; \ 101 101 printf("%s:PASS:%s %d nsec\n", \ 102 102 __func__, tag, duration); \ 103 + } \ 104 + __ret; \ 105 + }) 106 + 107 + #define CHECK_FAIL(condition) ({ \ 108 + int __ret = !!(condition); \ 109 + if (__ret) { \ 110 + test__fail(); \ 111 + printf("%s:FAIL:%d\n", __func__, __LINE__); \ 103 112 } \ 104 113 __ret; \ 105 114 })
+253
tools/testing/selftests/bpf/test_sockopt_inherit.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + #include <error.h> 3 + #include <errno.h> 4 + #include <stdio.h> 5 + #include <unistd.h> 6 + #include <sys/types.h> 7 + #include <sys/socket.h> 8 + #include <netinet/in.h> 9 + #include <pthread.h> 10 + 11 + #include <linux/filter.h> 12 + #include <bpf/bpf.h> 13 + #include <bpf/libbpf.h> 14 + 15 + #include "bpf_rlimit.h" 16 + #include "bpf_util.h" 17 + #include "cgroup_helpers.h" 18 + 19 + #define CG_PATH "/sockopt_inherit" 20 + #define SOL_CUSTOM 0xdeadbeef 21 + #define CUSTOM_INHERIT1 0 22 + #define CUSTOM_INHERIT2 1 23 + #define CUSTOM_LISTENER 2 24 + 25 + static int connect_to_server(int server_fd) 26 + { 27 + struct sockaddr_storage addr; 28 + socklen_t len = sizeof(addr); 29 + int fd; 30 + 31 + fd = socket(AF_INET, SOCK_STREAM, 0); 32 + if (fd < 0) { 33 + log_err("Failed to create client socket"); 34 + return -1; 35 + } 36 + 37 + if (getsockname(server_fd, (struct sockaddr *)&addr, &len)) { 38 + log_err("Failed to get server addr"); 39 + goto out; 40 + } 41 + 42 + if (connect(fd, (const struct sockaddr *)&addr, len) < 0) { 43 + log_err("Fail to connect to server"); 44 + goto out; 45 + } 46 + 47 + return fd; 48 + 49 + out: 50 + close(fd); 51 + return -1; 52 + } 53 + 54 + static int verify_sockopt(int fd, int optname, const char *msg, char expected) 55 + { 56 + socklen_t optlen = 1; 57 + char buf = 0; 58 + int err; 59 + 60 + err = getsockopt(fd, SOL_CUSTOM, optname, &buf, &optlen); 61 + if (err) { 62 + log_err("%s: failed to call getsockopt", msg); 63 + return 1; 64 + } 65 + 66 + printf("%s %d: got=0x%x ? expected=0x%x\n", msg, optname, buf, expected); 67 + 68 + if (buf != expected) { 69 + log_err("%s: unexpected getsockopt value %d != %d", msg, 70 + buf, expected); 71 + return 1; 72 + } 73 + 74 + return 0; 75 + } 76 + 77 + static void *server_thread(void *arg) 78 + { 79 + struct sockaddr_storage addr; 80 + socklen_t len = sizeof(addr); 81 + int fd = *(int *)arg; 82 + int client_fd; 83 + int err = 0; 84 + 85 + if (listen(fd, 1) < 0) 86 + error(1, errno, "Failed to listed on socket"); 87 + 88 + err += verify_sockopt(fd, CUSTOM_INHERIT1, "listen", 1); 89 + err += verify_sockopt(fd, CUSTOM_INHERIT2, "listen", 1); 90 + err += verify_sockopt(fd, CUSTOM_LISTENER, "listen", 1); 91 + 92 + client_fd = accept(fd, (struct sockaddr *)&addr, &len); 93 + if (client_fd < 0) 94 + error(1, errno, "Failed to accept client"); 95 + 96 + err += verify_sockopt(client_fd, CUSTOM_INHERIT1, "accept", 1); 97 + err += verify_sockopt(client_fd, CUSTOM_INHERIT2, "accept", 1); 98 + err += verify_sockopt(client_fd, CUSTOM_LISTENER, "accept", 0); 99 + 100 + close(client_fd); 101 + 102 + return (void *)(long)err; 103 + } 104 + 105 + static int start_server(void) 106 + { 107 + struct sockaddr_in addr = { 108 + .sin_family = AF_INET, 109 + .sin_addr.s_addr = htonl(INADDR_LOOPBACK), 110 + }; 111 + char buf; 112 + int err; 113 + int fd; 114 + int i; 115 + 116 + fd = socket(AF_INET, SOCK_STREAM, 0); 117 + if (fd < 0) { 118 + log_err("Failed to create server socket"); 119 + return -1; 120 + } 121 + 122 + for (i = CUSTOM_INHERIT1; i <= CUSTOM_LISTENER; i++) { 123 + buf = 0x01; 124 + err = setsockopt(fd, SOL_CUSTOM, i, &buf, 1); 125 + if (err) { 126 + log_err("Failed to call setsockopt(%d)", i); 127 + close(fd); 128 + return -1; 129 + } 130 + } 131 + 132 + if (bind(fd, (const struct sockaddr *)&addr, sizeof(addr)) < 0) { 133 + log_err("Failed to bind socket"); 134 + close(fd); 135 + return -1; 136 + } 137 + 138 + return fd; 139 + } 140 + 141 + static int prog_attach(struct bpf_object *obj, int cgroup_fd, const char *title) 142 + { 143 + enum bpf_attach_type attach_type; 144 + enum bpf_prog_type prog_type; 145 + struct bpf_program *prog; 146 + int err; 147 + 148 + err = libbpf_prog_type_by_name(title, &prog_type, &attach_type); 149 + if (err) { 150 + log_err("Failed to deduct types for %s BPF program", title); 151 + return -1; 152 + } 153 + 154 + prog = bpf_object__find_program_by_title(obj, title); 155 + if (!prog) { 156 + log_err("Failed to find %s BPF program", title); 157 + return -1; 158 + } 159 + 160 + err = bpf_prog_attach(bpf_program__fd(prog), cgroup_fd, 161 + attach_type, 0); 162 + if (err) { 163 + log_err("Failed to attach %s BPF program", title); 164 + return -1; 165 + } 166 + 167 + return 0; 168 + } 169 + 170 + static int run_test(int cgroup_fd) 171 + { 172 + struct bpf_prog_load_attr attr = { 173 + .file = "./sockopt_inherit.o", 174 + }; 175 + int server_fd = -1, client_fd; 176 + struct bpf_object *obj; 177 + void *server_err; 178 + pthread_t tid; 179 + int ignored; 180 + int err; 181 + 182 + err = bpf_prog_load_xattr(&attr, &obj, &ignored); 183 + if (err) { 184 + log_err("Failed to load BPF object"); 185 + return -1; 186 + } 187 + 188 + err = prog_attach(obj, cgroup_fd, "cgroup/getsockopt"); 189 + if (err) 190 + goto close_bpf_object; 191 + 192 + err = prog_attach(obj, cgroup_fd, "cgroup/setsockopt"); 193 + if (err) 194 + goto close_bpf_object; 195 + 196 + server_fd = start_server(); 197 + if (server_fd < 0) { 198 + err = -1; 199 + goto close_bpf_object; 200 + } 201 + 202 + pthread_create(&tid, NULL, server_thread, (void *)&server_fd); 203 + 204 + client_fd = connect_to_server(server_fd); 205 + if (client_fd < 0) { 206 + err = -1; 207 + goto close_server_fd; 208 + } 209 + 210 + err += verify_sockopt(client_fd, CUSTOM_INHERIT1, "connect", 0); 211 + err += verify_sockopt(client_fd, CUSTOM_INHERIT2, "connect", 0); 212 + err += verify_sockopt(client_fd, CUSTOM_LISTENER, "connect", 0); 213 + 214 + pthread_join(tid, &server_err); 215 + 216 + err += (int)(long)server_err; 217 + 218 + close(client_fd); 219 + 220 + close_server_fd: 221 + close(server_fd); 222 + close_bpf_object: 223 + bpf_object__close(obj); 224 + return err; 225 + } 226 + 227 + int main(int args, char **argv) 228 + { 229 + int cgroup_fd; 230 + int err = EXIT_SUCCESS; 231 + 232 + if (setup_cgroup_environment()) 233 + return err; 234 + 235 + cgroup_fd = create_and_get_cgroup(CG_PATH); 236 + if (cgroup_fd < 0) 237 + goto cleanup_cgroup_env; 238 + 239 + if (join_cgroup(CG_PATH)) 240 + goto cleanup_cgroup; 241 + 242 + if (run_test(cgroup_fd)) 243 + err = EXIT_FAILURE; 244 + 245 + printf("test_sockopt_inherit: %s\n", 246 + err == EXIT_SUCCESS ? "PASSED" : "FAILED"); 247 + 248 + cleanup_cgroup: 249 + close(cgroup_fd); 250 + cleanup_cgroup_env: 251 + cleanup_cgroup_environment(); 252 + return err; 253 + }
+85 -45
tools/testing/selftests/bpf/test_sysctl.c
··· 13 13 #include <bpf/bpf.h> 14 14 #include <bpf/libbpf.h> 15 15 16 + #include "bpf_endian.h" 16 17 #include "bpf_rlimit.h" 17 18 #include "bpf_util.h" 18 19 #include "cgroup_helpers.h" ··· 101 100 .descr = "ctx:write sysctl:write read ok", 102 101 .insns = { 103 102 /* If (write) */ 104 - BPF_LDX_MEM(BPF_B, BPF_REG_7, BPF_REG_1, 103 + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 105 104 offsetof(struct bpf_sysctl, write)), 106 105 BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 1, 2), 107 106 ··· 215 214 /* if (ret == expected && */ 216 215 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, sizeof("tcp_mem") - 1, 6), 217 216 /* buf == "tcp_mem\0") */ 218 - BPF_LD_IMM64(BPF_REG_8, 0x006d656d5f706374ULL), 217 + BPF_LD_IMM64(BPF_REG_8, 218 + bpf_be64_to_cpu(0x7463705f6d656d00ULL)), 219 219 BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0), 220 220 BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2), 221 221 ··· 257 255 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -E2BIG, 6), 258 256 259 257 /* buf[0:7] == "tcp_me\0") */ 260 - BPF_LD_IMM64(BPF_REG_8, 0x00656d5f706374ULL), 258 + BPF_LD_IMM64(BPF_REG_8, 259 + bpf_be64_to_cpu(0x7463705f6d650000ULL)), 261 260 BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0), 262 261 BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2), 263 262 ··· 301 298 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 16, 14), 302 299 303 300 /* buf[0:8] == "net/ipv4" && */ 304 - BPF_LD_IMM64(BPF_REG_8, 0x347670692f74656eULL), 301 + BPF_LD_IMM64(BPF_REG_8, 302 + bpf_be64_to_cpu(0x6e65742f69707634ULL)), 305 303 BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0), 306 304 BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 10), 307 305 308 306 /* buf[8:16] == "/tcp_mem" && */ 309 - BPF_LD_IMM64(BPF_REG_8, 0x6d656d5f7063742fULL), 307 + BPF_LD_IMM64(BPF_REG_8, 308 + bpf_be64_to_cpu(0x2f7463705f6d656dULL)), 310 309 BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 8), 311 310 BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 6), 312 311 ··· 355 350 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -E2BIG, 10), 356 351 357 352 /* buf[0:8] == "net/ipv4" && */ 358 - BPF_LD_IMM64(BPF_REG_8, 0x347670692f74656eULL), 353 + BPF_LD_IMM64(BPF_REG_8, 354 + bpf_be64_to_cpu(0x6e65742f69707634ULL)), 359 355 BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0), 360 356 BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 6), 361 357 362 358 /* buf[8:16] == "/tcp_me\0") */ 363 - BPF_LD_IMM64(BPF_REG_8, 0x00656d5f7063742fULL), 359 + BPF_LD_IMM64(BPF_REG_8, 360 + bpf_be64_to_cpu(0x2f7463705f6d6500ULL)), 364 361 BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 8), 365 362 BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2), 366 363 ··· 403 396 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -E2BIG, 6), 404 397 405 398 /* buf[0:8] == "net/ip\0") */ 406 - BPF_LD_IMM64(BPF_REG_8, 0x000070692f74656eULL), 399 + BPF_LD_IMM64(BPF_REG_8, 400 + bpf_be64_to_cpu(0x6e65742f69700000ULL)), 407 401 BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0), 408 402 BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2), 409 403 ··· 439 431 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 6, 6), 440 432 441 433 /* buf[0:6] == "Linux\n\0") */ 442 - BPF_LD_IMM64(BPF_REG_8, 0x000a78756e694cULL), 434 + BPF_LD_IMM64(BPF_REG_8, 435 + bpf_be64_to_cpu(0x4c696e75780a0000ULL)), 443 436 BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0), 444 437 BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2), 445 438 ··· 478 469 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 6, 6), 479 470 480 471 /* buf[0:6] == "Linux\n\0") */ 481 - BPF_LD_IMM64(BPF_REG_8, 0x000a78756e694cULL), 472 + BPF_LD_IMM64(BPF_REG_8, 473 + bpf_be64_to_cpu(0x4c696e75780a0000ULL)), 482 474 BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0), 483 475 BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2), 484 476 ··· 517 507 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, -E2BIG, 6), 518 508 519 509 /* buf[0:6] == "Linux\0") */ 520 - BPF_LD_IMM64(BPF_REG_8, 0x000078756e694cULL), 510 + BPF_LD_IMM64(BPF_REG_8, 511 + bpf_be64_to_cpu(0x4c696e7578000000ULL)), 521 512 BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0), 522 513 BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2), 523 514 ··· 661 650 662 651 /* buf[0:4] == "606\0") */ 663 652 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_7, 0), 664 - BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0x00363036, 2), 653 + BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 654 + bpf_ntohl(0x36303600), 2), 665 655 666 656 /* return DENY; */ 667 657 BPF_MOV64_IMM(BPF_REG_0, 0), ··· 697 685 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 23, 14), 698 686 699 687 /* buf[0:8] == "3000000 " && */ 700 - BPF_LD_IMM64(BPF_REG_8, 0x2030303030303033ULL), 688 + BPF_LD_IMM64(BPF_REG_8, 689 + bpf_be64_to_cpu(0x3330303030303020ULL)), 701 690 BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0), 702 691 BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 10), 703 692 704 693 /* buf[8:16] == "4000000 " && */ 705 - BPF_LD_IMM64(BPF_REG_8, 0x2030303030303034ULL), 694 + BPF_LD_IMM64(BPF_REG_8, 695 + bpf_be64_to_cpu(0x3430303030303020ULL)), 706 696 BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 8), 707 697 BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 6), 708 698 709 699 /* buf[16:24] == "6000000\0") */ 710 - BPF_LD_IMM64(BPF_REG_8, 0x0030303030303036ULL), 700 + BPF_LD_IMM64(BPF_REG_8, 701 + bpf_be64_to_cpu(0x3630303030303000ULL)), 711 702 BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 16), 712 703 BPF_JMP_REG(BPF_JNE, BPF_REG_8, BPF_REG_9, 2), 713 704 ··· 750 735 751 736 /* buf[0:3] == "60\0") */ 752 737 BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_7, 0), 753 - BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0x003036, 2), 738 + BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 739 + bpf_ntohl(0x36300000), 2), 754 740 755 741 /* return DENY; */ 756 742 BPF_MOV64_IMM(BPF_REG_0, 0), ··· 773 757 /* sysctl_set_new_value arg2 (buf) */ 774 758 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), 775 759 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8), 776 - BPF_MOV64_IMM(BPF_REG_0, 0x00303036), 760 + BPF_MOV64_IMM(BPF_REG_0, 761 + bpf_ntohl(0x36303000)), 777 762 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 778 763 779 764 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), ··· 808 791 /* sysctl_set_new_value arg2 (buf) */ 809 792 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), 810 793 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8), 811 - BPF_MOV64_IMM(BPF_REG_0, FIXUP_SYSCTL_VALUE), 794 + BPF_LD_IMM64(BPF_REG_0, FIXUP_SYSCTL_VALUE), 812 795 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 813 796 814 797 BPF_MOV64_REG(BPF_REG_2, BPF_REG_7), ··· 842 825 /* arg1 (buf) */ 843 826 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), 844 827 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8), 845 - BPF_MOV64_IMM(BPF_REG_0, 0x00303036), 846 - BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 828 + BPF_MOV64_IMM(BPF_REG_0, 829 + bpf_ntohl(0x36303000)), 830 + BPF_STX_MEM(BPF_W, BPF_REG_7, BPF_REG_0, 0), 847 831 848 832 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 849 833 ··· 887 869 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), 888 870 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8), 889 871 /* "600 602\0" */ 890 - BPF_LD_IMM64(BPF_REG_0, 0x0032303620303036ULL), 872 + BPF_LD_IMM64(BPF_REG_0, 873 + bpf_be64_to_cpu(0x3630302036303200ULL)), 891 874 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 892 875 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 893 876 ··· 956 937 /* arg1 (buf) */ 957 938 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), 958 939 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8), 959 - BPF_MOV64_IMM(BPF_REG_0, 0x00303036), 940 + BPF_MOV64_IMM(BPF_REG_0, 941 + bpf_ntohl(0x36303000)), 960 942 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 961 943 962 944 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), ··· 989 969 /* arg1 (buf) */ 990 970 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), 991 971 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8), 992 - BPF_MOV64_IMM(BPF_REG_0, 0x00373730), 993 - BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 972 + BPF_MOV64_IMM(BPF_REG_0, 973 + bpf_ntohl(0x30373700)), 974 + BPF_STX_MEM(BPF_W, BPF_REG_7, BPF_REG_0, 0), 994 975 995 976 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 996 977 ··· 1033 1012 /* arg1 (buf) */ 1034 1013 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), 1035 1014 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8), 1036 - BPF_MOV64_IMM(BPF_REG_0, 0x00303036), 1015 + BPF_MOV64_IMM(BPF_REG_0, 1016 + bpf_ntohl(0x36303000)), 1037 1017 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1038 1018 1039 1019 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), ··· 1074 1052 /* arg1 (buf) */ 1075 1053 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), 1076 1054 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8), 1077 - BPF_MOV64_IMM(BPF_REG_0, 0x090a0c0d), 1055 + BPF_MOV64_IMM(BPF_REG_0, 1056 + bpf_ntohl(0x0d0c0a09)), 1078 1057 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1079 1058 1080 1059 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), ··· 1115 1092 /* arg1 (buf) */ 1116 1093 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), 1117 1094 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8), 1118 - BPF_MOV64_IMM(BPF_REG_0, 0x00362d0a), /* " -6\0" */ 1095 + /* " -6\0" */ 1096 + BPF_MOV64_IMM(BPF_REG_0, 1097 + bpf_ntohl(0x0a2d3600)), 1119 1098 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1120 1099 1121 1100 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), ··· 1157 1132 /* arg1 (buf) */ 1158 1133 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), 1159 1134 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8), 1160 - BPF_MOV64_IMM(BPF_REG_0, 0x00362d0a), /* " -6\0" */ 1161 - BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1135 + /* " -6\0" */ 1136 + BPF_MOV64_IMM(BPF_REG_0, 1137 + bpf_ntohl(0x0a2d3600)), 1138 + BPF_STX_MEM(BPF_W, BPF_REG_7, BPF_REG_0, 0), 1162 1139 1163 1140 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 1164 1141 ··· 1202 1175 /* arg1 (buf) */ 1203 1176 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), 1204 1177 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -8), 1205 - BPF_MOV64_IMM(BPF_REG_0, 0x65667830), /* "0xfe" */ 1206 - BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1178 + /* "0xfe" */ 1179 + BPF_MOV64_IMM(BPF_REG_0, 1180 + bpf_ntohl(0x30786665)), 1181 + BPF_STX_MEM(BPF_W, BPF_REG_7, BPF_REG_0, 0), 1207 1182 1208 1183 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), 1209 1184 ··· 1247 1218 /* arg1 (buf) 9223372036854775807 */ 1248 1219 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), 1249 1220 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -24), 1250 - BPF_LD_IMM64(BPF_REG_0, 0x3032373333323239ULL), 1221 + BPF_LD_IMM64(BPF_REG_0, 1222 + bpf_be64_to_cpu(0x3932323333373230ULL)), 1251 1223 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1252 - BPF_LD_IMM64(BPF_REG_0, 0x3537373435383633ULL), 1224 + BPF_LD_IMM64(BPF_REG_0, 1225 + bpf_be64_to_cpu(0x3336383534373735ULL)), 1253 1226 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 8), 1254 - BPF_LD_IMM64(BPF_REG_0, 0x0000000000373038ULL), 1227 + BPF_LD_IMM64(BPF_REG_0, 1228 + bpf_be64_to_cpu(0x3830370000000000ULL)), 1255 1229 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 16), 1256 1230 1257 1231 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), ··· 1298 1266 /* arg1 (buf) 9223372036854775808 */ 1299 1267 BPF_MOV64_REG(BPF_REG_7, BPF_REG_10), 1300 1268 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, -24), 1301 - BPF_LD_IMM64(BPF_REG_0, 0x3032373333323239ULL), 1269 + BPF_LD_IMM64(BPF_REG_0, 1270 + bpf_be64_to_cpu(0x3932323333373230ULL)), 1302 1271 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), 1303 - BPF_LD_IMM64(BPF_REG_0, 0x3537373435383633ULL), 1272 + BPF_LD_IMM64(BPF_REG_0, 1273 + bpf_be64_to_cpu(0x3336383534373735ULL)), 1304 1274 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 8), 1305 - BPF_LD_IMM64(BPF_REG_0, 0x0000000000383038ULL), 1275 + BPF_LD_IMM64(BPF_REG_0, 1276 + bpf_be64_to_cpu(0x3830380000000000ULL)), 1306 1277 BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 16), 1307 1278 1308 1279 BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), ··· 1379 1344 static int fixup_sysctl_value(const char *buf, size_t buf_len, 1380 1345 struct bpf_insn *prog, size_t insn_num) 1381 1346 { 1382 - uint32_t value_num = 0; 1347 + union { 1348 + uint8_t raw[sizeof(uint64_t)]; 1349 + uint64_t num; 1350 + } value = {}; 1383 1351 uint8_t c, i; 1384 1352 1385 - if (buf_len > sizeof(value_num)) { 1353 + if (buf_len > sizeof(value)) { 1386 1354 log_err("Value is too big (%zd) to use in fixup", buf_len); 1387 1355 return -1; 1388 1356 } 1389 - 1390 - for (i = 0; i < buf_len; ++i) { 1391 - c = buf[i]; 1392 - value_num |= (c << i * 8); 1357 + if (prog[insn_num].code != (BPF_LD | BPF_DW | BPF_IMM)) { 1358 + log_err("Can fixup only BPF_LD_IMM64 insns"); 1359 + return -1; 1393 1360 } 1394 1361 1395 - prog[insn_num].imm = value_num; 1362 + memcpy(value.raw, buf, buf_len); 1363 + prog[insn_num].imm = (uint32_t)value.num; 1364 + prog[insn_num + 1].imm = (uint32_t)(value.num >> 32); 1396 1365 1397 1366 return 0; 1398 1367 } ··· 1538 1499 goto err; 1539 1500 } 1540 1501 1502 + errno = 0; 1541 1503 if (access_sysctl(sysctl_path, test) == -1) { 1542 1504 if (test->result == OP_EPERM && errno == EPERM) 1543 1505 goto out; ··· 1547 1507 } 1548 1508 1549 1509 if (test->result != SUCCESS) { 1550 - log_err("Unexpected failure"); 1510 + log_err("Unexpected success"); 1551 1511 goto err; 1552 1512 } 1553 1513
+31
tools/testing/selftests/bpf/test_tcp_rtt.c
··· 6 6 #include <sys/types.h> 7 7 #include <sys/socket.h> 8 8 #include <netinet/in.h> 9 + #include <netinet/tcp.h> 9 10 #include <pthread.h> 10 11 11 12 #include <linux/filter.h> ··· 33 32 34 33 if (write(fd, &b, sizeof(b)) != 1) 35 34 error(1, errno, "Failed to send single byte"); 35 + } 36 + 37 + static int wait_for_ack(int fd, int retries) 38 + { 39 + struct tcp_info info; 40 + socklen_t optlen; 41 + int i, err; 42 + 43 + for (i = 0; i < retries; i++) { 44 + optlen = sizeof(info); 45 + err = getsockopt(fd, SOL_TCP, TCP_INFO, &info, &optlen); 46 + if (err < 0) { 47 + log_err("Failed to lookup TCP stats"); 48 + return err; 49 + } 50 + 51 + if (info.tcpi_unacked == 0) 52 + return 0; 53 + 54 + usleep(10); 55 + } 56 + 57 + log_err("Did not receive ACK"); 58 + return -1; 36 59 } 37 60 38 61 static int verify_sk(int map_fd, int client_fd, const char *msg, __u32 invoked, ··· 174 149 /*icsk_retransmits=*/0); 175 150 176 151 send_byte(client_fd); 152 + if (wait_for_ack(client_fd, 100) < 0) { 153 + err = -1; 154 + goto close_client_fd; 155 + } 156 + 177 157 178 158 err += verify_sk(map_fd, client_fd, "first payload byte", 179 159 /*invoked=*/2, ··· 187 157 /*delivered_ce=*/0, 188 158 /*icsk_retransmits=*/0); 189 159 160 + close_client_fd: 190 161 close(client_fd); 191 162 192 163 close_bpf_object:
+57 -11
tools/testing/selftests/bpf/test_verifier.c
··· 61 61 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled" 62 62 static bool unpriv_disabled = false; 63 63 static int skips; 64 + static bool verbose = false; 64 65 65 66 struct bpf_test { 66 67 const char *descr; ··· 93 92 enum { 94 93 UNDEF, 95 94 ACCEPT, 96 - REJECT 95 + REJECT, 96 + VERBOSE_ACCEPT, 97 97 } result, result_unpriv; 98 98 enum bpf_prog_type prog_type; 99 99 uint8_t flags; ··· 861 859 return 0; 862 860 } 863 861 862 + static bool cmp_str_seq(const char *log, const char *exp) 863 + { 864 + char needle[80]; 865 + const char *p, *q; 866 + int len; 867 + 868 + do { 869 + p = strchr(exp, '\t'); 870 + if (!p) 871 + p = exp + strlen(exp); 872 + 873 + len = p - exp; 874 + if (len >= sizeof(needle) || !len) { 875 + printf("FAIL\nTestcase bug\n"); 876 + return false; 877 + } 878 + strncpy(needle, exp, len); 879 + needle[len] = 0; 880 + q = strstr(log, needle); 881 + if (!q) { 882 + printf("FAIL\nUnexpected verifier log in successful load!\n" 883 + "EXP: %s\nRES:\n", needle); 884 + return false; 885 + } 886 + log = q + len; 887 + exp = p + 1; 888 + } while (*p); 889 + return true; 890 + } 891 + 864 892 static void do_test_single(struct bpf_test *test, bool unpriv, 865 893 int *passes, int *errors) 866 894 { ··· 929 897 pflags |= BPF_F_STRICT_ALIGNMENT; 930 898 if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) 931 899 pflags |= BPF_F_ANY_ALIGNMENT; 900 + if (test->flags & ~3) 901 + pflags |= test->flags; 932 902 903 + expected_ret = unpriv && test->result_unpriv != UNDEF ? 904 + test->result_unpriv : test->result; 905 + expected_err = unpriv && test->errstr_unpriv ? 906 + test->errstr_unpriv : test->errstr; 933 907 memset(&attr, 0, sizeof(attr)); 934 908 attr.prog_type = prog_type; 935 909 attr.expected_attach_type = test->expected_attach_type; 936 910 attr.insns = prog; 937 911 attr.insns_cnt = prog_len; 938 912 attr.license = "GPL"; 939 - attr.log_level = 4; 913 + attr.log_level = verbose || expected_ret == VERBOSE_ACCEPT ? 1 : 4; 940 914 attr.prog_flags = pflags; 941 915 942 916 fd_prog = bpf_load_program_xattr(&attr, bpf_vlog, sizeof(bpf_vlog)); ··· 952 914 goto close_fds; 953 915 } 954 916 955 - expected_ret = unpriv && test->result_unpriv != UNDEF ? 956 - test->result_unpriv : test->result; 957 - expected_err = unpriv && test->errstr_unpriv ? 958 - test->errstr_unpriv : test->errstr; 959 - 960 917 alignment_prevented_execution = 0; 961 918 962 - if (expected_ret == ACCEPT) { 919 + if (expected_ret == ACCEPT || expected_ret == VERBOSE_ACCEPT) { 963 920 if (fd_prog < 0) { 964 921 printf("FAIL\nFailed to load prog '%s'!\n", 965 922 strerror(errno)); ··· 965 932 (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)) 966 933 alignment_prevented_execution = 1; 967 934 #endif 935 + if (expected_ret == VERBOSE_ACCEPT && !cmp_str_seq(bpf_vlog, expected_err)) { 936 + goto fail_log; 937 + } 968 938 } else { 969 939 if (fd_prog >= 0) { 970 940 printf("FAIL\nUnexpected success to load!\n"); ··· 992 956 goto fail_log; 993 957 } 994 958 } 959 + 960 + if (verbose) 961 + printf(", verifier log:\n%s", bpf_vlog); 995 962 996 963 run_errs = 0; 997 964 run_successes = 0; ··· 1136 1097 { 1137 1098 unsigned int from = 0, to = ARRAY_SIZE(tests); 1138 1099 bool unpriv = !is_admin(); 1100 + int arg = 1; 1101 + 1102 + if (argc > 1 && strcmp(argv[1], "-v") == 0) { 1103 + arg++; 1104 + verbose = true; 1105 + argc--; 1106 + } 1139 1107 1140 1108 if (argc == 3) { 1141 - unsigned int l = atoi(argv[argc - 2]); 1142 - unsigned int u = atoi(argv[argc - 1]); 1109 + unsigned int l = atoi(argv[arg]); 1110 + unsigned int u = atoi(argv[arg + 1]); 1143 1111 1144 1112 if (l < to && u < to) { 1145 1113 from = l; 1146 1114 to = u + 1; 1147 1115 } 1148 1116 } else if (argc == 2) { 1149 - unsigned int t = atoi(argv[argc - 1]); 1117 + unsigned int t = atoi(argv[arg]); 1150 1118 1151 1119 if (t < to) { 1152 1120 from = t;
+194
tools/testing/selftests/bpf/verifier/precise.c
··· 1 + { 2 + "precise: test 1", 3 + .insns = { 4 + BPF_MOV64_IMM(BPF_REG_0, 1), 5 + BPF_LD_MAP_FD(BPF_REG_6, 0), 6 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 7 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP), 8 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 9 + BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0), 10 + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 11 + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 12 + BPF_EXIT_INSN(), 13 + 14 + BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), 15 + 16 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 17 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP), 18 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 19 + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 20 + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 21 + BPF_EXIT_INSN(), 22 + 23 + BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), 24 + 25 + BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8), /* map_value_ptr -= map_value_ptr */ 26 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_9), 27 + BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 8, 1), 28 + BPF_EXIT_INSN(), 29 + 30 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=inv(umin=1, umax=8) */ 31 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP), 32 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 33 + BPF_MOV64_IMM(BPF_REG_3, 0), 34 + BPF_EMIT_CALL(BPF_FUNC_probe_read), 35 + BPF_EXIT_INSN(), 36 + }, 37 + .prog_type = BPF_PROG_TYPE_TRACEPOINT, 38 + .fixup_map_array_48b = { 1 }, 39 + .result = VERBOSE_ACCEPT, 40 + .errstr = 41 + "26: (85) call bpf_probe_read#4\ 42 + last_idx 26 first_idx 20\ 43 + regs=4 stack=0 before 25\ 44 + regs=4 stack=0 before 24\ 45 + regs=4 stack=0 before 23\ 46 + regs=4 stack=0 before 22\ 47 + regs=4 stack=0 before 20\ 48 + parent didn't have regs=4 stack=0 marks\ 49 + last_idx 19 first_idx 10\ 50 + regs=4 stack=0 before 19\ 51 + regs=200 stack=0 before 18\ 52 + regs=300 stack=0 before 17\ 53 + regs=201 stack=0 before 15\ 54 + regs=201 stack=0 before 14\ 55 + regs=200 stack=0 before 13\ 56 + regs=200 stack=0 before 12\ 57 + regs=200 stack=0 before 11\ 58 + regs=200 stack=0 before 10\ 59 + parent already had regs=0 stack=0 marks", 60 + }, 61 + { 62 + "precise: test 2", 63 + .insns = { 64 + BPF_MOV64_IMM(BPF_REG_0, 1), 65 + BPF_LD_MAP_FD(BPF_REG_6, 0), 66 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 67 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP), 68 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 69 + BPF_ST_MEM(BPF_DW, BPF_REG_FP, -8, 0), 70 + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 71 + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 72 + BPF_EXIT_INSN(), 73 + 74 + BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), 75 + 76 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), 77 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_FP), 78 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 79 + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 80 + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 81 + BPF_EXIT_INSN(), 82 + 83 + BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), 84 + 85 + BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8), /* map_value_ptr -= map_value_ptr */ 86 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_9), 87 + BPF_JMP_IMM(BPF_JLT, BPF_REG_2, 8, 1), 88 + BPF_EXIT_INSN(), 89 + 90 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1), /* R2=inv(umin=1, umax=8) */ 91 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_FP), 92 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 93 + BPF_MOV64_IMM(BPF_REG_3, 0), 94 + BPF_EMIT_CALL(BPF_FUNC_probe_read), 95 + BPF_EXIT_INSN(), 96 + }, 97 + .prog_type = BPF_PROG_TYPE_TRACEPOINT, 98 + .fixup_map_array_48b = { 1 }, 99 + .result = VERBOSE_ACCEPT, 100 + .flags = BPF_F_TEST_STATE_FREQ, 101 + .errstr = 102 + "26: (85) call bpf_probe_read#4\ 103 + last_idx 26 first_idx 22\ 104 + regs=4 stack=0 before 25\ 105 + regs=4 stack=0 before 24\ 106 + regs=4 stack=0 before 23\ 107 + regs=4 stack=0 before 22\ 108 + parent didn't have regs=4 stack=0 marks\ 109 + last_idx 20 first_idx 20\ 110 + regs=4 stack=0 before 20\ 111 + parent didn't have regs=4 stack=0 marks\ 112 + last_idx 19 first_idx 17\ 113 + regs=4 stack=0 before 19\ 114 + regs=200 stack=0 before 18\ 115 + regs=300 stack=0 before 17\ 116 + parent already had regs=0 stack=0 marks", 117 + }, 118 + { 119 + "precise: cross frame pruning", 120 + .insns = { 121 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), 122 + BPF_MOV64_IMM(BPF_REG_8, 0), 123 + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 124 + BPF_MOV64_IMM(BPF_REG_8, 1), 125 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), 126 + BPF_MOV64_IMM(BPF_REG_9, 0), 127 + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1), 128 + BPF_MOV64_IMM(BPF_REG_9, 1), 129 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 130 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), 131 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_8, 1, 1), 132 + BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_2, 0), 133 + BPF_MOV64_IMM(BPF_REG_0, 0), 134 + BPF_EXIT_INSN(), 135 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 0), 136 + BPF_EXIT_INSN(), 137 + }, 138 + .prog_type = BPF_PROG_TYPE_XDP, 139 + .flags = BPF_F_TEST_STATE_FREQ, 140 + .errstr = "!read_ok", 141 + .result = REJECT, 142 + }, 143 + { 144 + "precise: ST insn causing spi > allocated_stack", 145 + .insns = { 146 + BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), 147 + BPF_JMP_IMM(BPF_JNE, BPF_REG_3, 123, 0), 148 + BPF_ST_MEM(BPF_DW, BPF_REG_3, -8, 0), 149 + BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 150 + BPF_MOV64_IMM(BPF_REG_0, -1), 151 + BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 0), 152 + BPF_EXIT_INSN(), 153 + }, 154 + .prog_type = BPF_PROG_TYPE_XDP, 155 + .flags = BPF_F_TEST_STATE_FREQ, 156 + .errstr = "5: (2d) if r4 > r0 goto pc+0\ 157 + last_idx 5 first_idx 5\ 158 + parent didn't have regs=10 stack=0 marks\ 159 + last_idx 4 first_idx 2\ 160 + regs=10 stack=0 before 4\ 161 + regs=10 stack=0 before 3\ 162 + regs=0 stack=1 before 2\ 163 + last_idx 5 first_idx 5\ 164 + parent didn't have regs=1 stack=0 marks", 165 + .result = VERBOSE_ACCEPT, 166 + .retval = -1, 167 + }, 168 + { 169 + "precise: STX insn causing spi > allocated_stack", 170 + .insns = { 171 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), 172 + BPF_MOV64_REG(BPF_REG_3, BPF_REG_10), 173 + BPF_JMP_IMM(BPF_JNE, BPF_REG_3, 123, 0), 174 + BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, -8), 175 + BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), 176 + BPF_MOV64_IMM(BPF_REG_0, -1), 177 + BPF_JMP_REG(BPF_JGT, BPF_REG_4, BPF_REG_0, 0), 178 + BPF_EXIT_INSN(), 179 + }, 180 + .prog_type = BPF_PROG_TYPE_XDP, 181 + .flags = BPF_F_TEST_STATE_FREQ, 182 + .errstr = "last_idx 6 first_idx 6\ 183 + parent didn't have regs=10 stack=0 marks\ 184 + last_idx 5 first_idx 3\ 185 + regs=10 stack=0 before 5\ 186 + regs=10 stack=0 before 4\ 187 + regs=0 stack=1 before 3\ 188 + last_idx 6 first_idx 6\ 189 + parent didn't have regs=1 stack=0 marks\ 190 + last_idx 5 first_idx 3\ 191 + regs=1 stack=0 before 5", 192 + .result = VERBOSE_ACCEPT, 193 + .retval = -1, 194 + },