Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf: add initial bpf tracepoints

This work adds a number of tracepoints to paths that are either
considered slow-path or exception-like states, where monitoring or
inspecting them would be desirable.

For bpf(2) syscall, tracepoints have been placed for main commands
when they succeed. In XDP case, tracepoint is for exceptions, that
is, f.e. on abnormal BPF program exit such as unknown or XDP_ABORTED
return code, or when error occurs during XDP_TX action and the packet
could not be forwarded.

Both have been split into separate event headers, and can be further
extended. Worst case, if they unexpectedly should get into our way in
future, they can also removed [1]. Of course, these tracepoints (like
any other) can be analyzed by eBPF itself, etc. Example output:

# ./perf record -a -e bpf:* sleep 10
# ./perf script
sock_example 6197 [005] 283.980322: bpf:bpf_map_create: map type=ARRAY ufd=4 key=4 val=8 max=256 flags=0
sock_example 6197 [005] 283.980721: bpf:bpf_prog_load: prog=a5ea8fa30ea6849c type=SOCKET_FILTER ufd=5
sock_example 6197 [005] 283.988423: bpf:bpf_prog_get_type: prog=a5ea8fa30ea6849c type=SOCKET_FILTER
sock_example 6197 [005] 283.988443: bpf:bpf_map_lookup_elem: map type=ARRAY ufd=4 key=[06 00 00 00] val=[00 00 00 00 00 00 00 00]
[...]
sock_example 6197 [005] 288.990868: bpf:bpf_map_lookup_elem: map type=ARRAY ufd=4 key=[01 00 00 00] val=[14 00 00 00 00 00 00 00]
swapper 0 [005] 289.338243: bpf:bpf_prog_put_rcu: prog=a5ea8fa30ea6849c type=SOCKET_FILTER

[1] https://lwn.net/Articles/705270/

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Daniel Borkmann and committed by
David S. Miller
a67edbf4 0fe05591

+483 -15
+3
drivers/net/ethernet/mellanox/mlx4/en_rx.c
··· 33 33 34 34 #include <net/busy_poll.h> 35 35 #include <linux/bpf.h> 36 + #include <linux/bpf_trace.h> 36 37 #include <linux/mlx4/cq.h> 37 38 #include <linux/slab.h> 38 39 #include <linux/mlx4/qp.h> ··· 927 926 length, cq->ring, 928 927 &doorbell_pending))) 929 928 goto consumed; 929 + trace_xdp_exception(dev, xdp_prog, act); 930 930 goto xdp_drop_no_cnt; /* Drop on xmit failure */ 931 931 default: 932 932 bpf_warn_invalid_xdp_action(act); 933 933 case XDP_ABORTED: 934 + trace_xdp_exception(dev, xdp_prog, act); 934 935 case XDP_DROP: 935 936 ring->xdp_drop++; 936 937 xdp_drop_no_cnt:
+8 -4
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
··· 33 33 #include <linux/ip.h> 34 34 #include <linux/ipv6.h> 35 35 #include <linux/tcp.h> 36 + #include <linux/bpf_trace.h> 36 37 #include <net/busy_poll.h> 37 38 #include "en.h" 38 39 #include "en_tc.h" ··· 641 640 mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0); 642 641 } 643 642 644 - static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, 643 + static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, 645 644 struct mlx5e_dma_info *di, 646 645 const struct xdp_buff *xdp) 647 646 { ··· 663 662 MLX5E_SW2HW_MTU(rq->netdev->mtu) < dma_len)) { 664 663 rq->stats.xdp_drop++; 665 664 mlx5e_page_release(rq, di, true); 666 - return; 665 + return false; 667 666 } 668 667 669 668 if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_XDP_TX_WQEBBS))) { ··· 674 673 } 675 674 rq->stats.xdp_tx_full++; 676 675 mlx5e_page_release(rq, di, true); 677 - return; 676 + return false; 678 677 } 679 678 680 679 dma_len -= MLX5E_XDP_MIN_INLINE; ··· 704 703 705 704 sq->db.xdp.doorbell = true; 706 705 rq->stats.xdp_tx++; 706 + return true; 707 707 } 708 708 709 709 /* returns true if packet was consumed by xdp */ ··· 730 728 *len = xdp.data_end - xdp.data; 731 729 return false; 732 730 case XDP_TX: 733 - mlx5e_xmit_xdp_frame(rq, di, &xdp); 731 + if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp))) 732 + trace_xdp_exception(rq->netdev, prog, act); 734 733 return true; 735 734 default: 736 735 bpf_warn_invalid_xdp_action(act); 737 736 case XDP_ABORTED: 737 + trace_xdp_exception(rq->netdev, prog, act); 738 738 case XDP_DROP: 739 739 rq->stats.xdp_drop++; 740 740 mlx5e_page_release(rq, di, true);
+10 -5
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
··· 42 42 */ 43 43 44 44 #include <linux/bpf.h> 45 + #include <linux/bpf_trace.h> 45 46 #include <linux/module.h> 46 47 #include <linux/kernel.h> 47 48 #include <linux/init.h> ··· 1460 1459 dev_kfree_skb_any(skb); 1461 1460 } 1462 1461 1463 - static void 1462 + static bool 1464 1463 nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring, 1465 1464 struct nfp_net_tx_ring *tx_ring, 1466 1465 struct nfp_net_rx_buf *rxbuf, unsigned int pkt_off, ··· 1474 1473 1475 1474 if (unlikely(nfp_net_tx_full(tx_ring, 1))) { 1476 1475 nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL); 1477 - return; 1476 + return false; 1478 1477 } 1479 1478 1480 1479 new_frag = nfp_net_napi_alloc_one(nn, DMA_BIDIRECTIONAL, &new_dma_addr); 1481 1480 if (unlikely(!new_frag)) { 1482 1481 nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL); 1483 - return; 1482 + return false; 1484 1483 } 1485 1484 nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr); 1486 1485 ··· 1510 1509 1511 1510 tx_ring->wr_p++; 1512 1511 tx_ring->wr_ptr_add++; 1512 + return true; 1513 1513 } 1514 1514 1515 1515 static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len) ··· 1615 1613 case XDP_PASS: 1616 1614 break; 1617 1615 case XDP_TX: 1618 - nfp_net_tx_xdp_buf(nn, rx_ring, tx_ring, rxbuf, 1619 - pkt_off, pkt_len); 1616 + if (unlikely(!nfp_net_tx_xdp_buf(nn, rx_ring, 1617 + tx_ring, rxbuf, 1618 + pkt_off, pkt_len))) 1619 + trace_xdp_exception(nn->netdev, xdp_prog, act); 1620 1620 continue; 1621 1621 default: 1622 1622 bpf_warn_invalid_xdp_action(act); 1623 1623 case XDP_ABORTED: 1624 + trace_xdp_exception(nn->netdev, xdp_prog, act); 1624 1625 case XDP_DROP: 1625 1626 nfp_net_rx_give_one(rx_ring, rxbuf->frag, 1626 1627 rxbuf->dma_addr);
+4
drivers/net/ethernet/qlogic/qede/qede_fp.c
··· 32 32 #include <linux/netdevice.h> 33 33 #include <linux/etherdevice.h> 34 34 #include <linux/skbuff.h> 35 + #include <linux/bpf_trace.h> 35 36 #include <net/udp_tunnel.h> 36 37 #include <linux/ip.h> 37 38 #include <net/ipv6.h> ··· 1017 1016 /* We need the replacement buffer before transmit. */ 1018 1017 if (qede_alloc_rx_buffer(rxq, true)) { 1019 1018 qede_recycle_rx_bd_ring(rxq, 1); 1019 + trace_xdp_exception(edev->ndev, prog, act); 1020 1020 return false; 1021 1021 } 1022 1022 ··· 1028 1026 dma_unmap_page(rxq->dev, bd->mapping, 1029 1027 PAGE_SIZE, DMA_BIDIRECTIONAL); 1030 1028 __free_page(bd->data); 1029 + trace_xdp_exception(edev->ndev, prog, act); 1031 1030 } 1032 1031 1033 1032 /* Regardless, we've consumed an Rx BD */ ··· 1038 1035 default: 1039 1036 bpf_warn_invalid_xdp_action(act); 1040 1037 case XDP_ABORTED: 1038 + trace_xdp_exception(edev->ndev, prog, act); 1041 1039 case XDP_DROP: 1042 1040 qede_recycle_rx_bd_ring(rxq, cqe->bd_num); 1043 1041 }
+9 -3
drivers/net/virtio_net.c
··· 23 23 #include <linux/virtio.h> 24 24 #include <linux/virtio_net.h> 25 25 #include <linux/bpf.h> 26 + #include <linux/bpf_trace.h> 26 27 #include <linux/scatterlist.h> 27 28 #include <linux/if_vlan.h> 28 29 #include <linux/slab.h> ··· 331 330 return skb; 332 331 } 333 332 334 - static void virtnet_xdp_xmit(struct virtnet_info *vi, 333 + static bool virtnet_xdp_xmit(struct virtnet_info *vi, 335 334 struct receive_queue *rq, 336 335 struct send_queue *sq, 337 336 struct xdp_buff *xdp, ··· 383 382 put_page(page); 384 383 } else /* small buffer */ 385 384 kfree_skb(data); 386 - return; // On error abort to avoid unnecessary kick 385 + /* On error abort to avoid unnecessary kick */ 386 + return false; 387 387 } 388 388 389 389 virtqueue_kick(sq->vq); 390 + return true; 390 391 } 391 392 392 393 static u32 do_xdp_prog(struct virtnet_info *vi, ··· 424 421 vi->xdp_queue_pairs + 425 422 smp_processor_id(); 426 423 xdp.data = buf; 427 - virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, data); 424 + if (unlikely(!virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, 425 + data))) 426 + trace_xdp_exception(vi->dev, xdp_prog, act); 428 427 return XDP_TX; 429 428 default: 430 429 bpf_warn_invalid_xdp_action(act); 431 430 case XDP_ABORTED: 431 + trace_xdp_exception(vi->dev, xdp_prog, act); 432 432 case XDP_DROP: 433 433 return XDP_DROP; 434 434 }
+7
include/linux/bpf_trace.h
··· 1 + #ifndef __LINUX_BPF_TRACE_H__ 2 + #define __LINUX_BPF_TRACE_H__ 3 + 4 + #include <trace/events/bpf.h> 5 + #include <trace/events/xdp.h> 6 + 7 + #endif /* __LINUX_BPF_TRACE_H__ */
+347
include/trace/events/bpf.h
··· 1 + #undef TRACE_SYSTEM 2 + #define TRACE_SYSTEM bpf 3 + 4 + #if !defined(_TRACE_BPF_H) || defined(TRACE_HEADER_MULTI_READ) 5 + #define _TRACE_BPF_H 6 + 7 + #include <linux/filter.h> 8 + #include <linux/bpf.h> 9 + #include <linux/fs.h> 10 + #include <linux/tracepoint.h> 11 + 12 + #define __PROG_TYPE_MAP(FN) \ 13 + FN(SOCKET_FILTER) \ 14 + FN(KPROBE) \ 15 + FN(SCHED_CLS) \ 16 + FN(SCHED_ACT) \ 17 + FN(TRACEPOINT) \ 18 + FN(XDP) \ 19 + FN(PERF_EVENT) \ 20 + FN(CGROUP_SKB) \ 21 + FN(CGROUP_SOCK) \ 22 + FN(LWT_IN) \ 23 + FN(LWT_OUT) \ 24 + FN(LWT_XMIT) 25 + 26 + #define __MAP_TYPE_MAP(FN) \ 27 + FN(HASH) \ 28 + FN(ARRAY) \ 29 + FN(PROG_ARRAY) \ 30 + FN(PERF_EVENT_ARRAY) \ 31 + FN(PERCPU_HASH) \ 32 + FN(PERCPU_ARRAY) \ 33 + FN(STACK_TRACE) \ 34 + FN(CGROUP_ARRAY) \ 35 + FN(LRU_HASH) \ 36 + FN(LRU_PERCPU_HASH) \ 37 + FN(LPM_TRIE) 38 + 39 + #define __PROG_TYPE_TP_FN(x) \ 40 + TRACE_DEFINE_ENUM(BPF_PROG_TYPE_##x); 41 + #define __PROG_TYPE_SYM_FN(x) \ 42 + { BPF_PROG_TYPE_##x, #x }, 43 + #define __PROG_TYPE_SYM_TAB \ 44 + __PROG_TYPE_MAP(__PROG_TYPE_SYM_FN) { -1, 0 } 45 + __PROG_TYPE_MAP(__PROG_TYPE_TP_FN) 46 + 47 + #define __MAP_TYPE_TP_FN(x) \ 48 + TRACE_DEFINE_ENUM(BPF_MAP_TYPE_##x); 49 + #define __MAP_TYPE_SYM_FN(x) \ 50 + { BPF_MAP_TYPE_##x, #x }, 51 + #define __MAP_TYPE_SYM_TAB \ 52 + __MAP_TYPE_MAP(__MAP_TYPE_SYM_FN) { -1, 0 } 53 + __MAP_TYPE_MAP(__MAP_TYPE_TP_FN) 54 + 55 + DECLARE_EVENT_CLASS(bpf_prog_event, 56 + 57 + TP_PROTO(const struct bpf_prog *prg), 58 + 59 + TP_ARGS(prg), 60 + 61 + TP_STRUCT__entry( 62 + __array(u8, prog_tag, 8) 63 + __field(u32, type) 64 + ), 65 + 66 + TP_fast_assign( 67 + BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag)); 68 + memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag)); 69 + __entry->type = prg->type; 70 + ), 71 + 72 + TP_printk("prog=%s type=%s", 73 + __print_hex_str(__entry->prog_tag, 8), 74 + __print_symbolic(__entry->type, __PROG_TYPE_SYM_TAB)) 75 + ); 76 + 77 + DEFINE_EVENT(bpf_prog_event, bpf_prog_get_type, 78 + 79 + TP_PROTO(const struct bpf_prog *prg), 80 + 81 + TP_ARGS(prg) 82 + ); 83 + 84 + DEFINE_EVENT(bpf_prog_event, bpf_prog_put_rcu, 85 + 86 + TP_PROTO(const struct bpf_prog *prg), 87 + 88 + TP_ARGS(prg) 89 + ); 90 + 91 + TRACE_EVENT(bpf_prog_load, 92 + 93 + TP_PROTO(const struct bpf_prog *prg, int ufd), 94 + 95 + TP_ARGS(prg, ufd), 96 + 97 + TP_STRUCT__entry( 98 + __array(u8, prog_tag, 8) 99 + __field(u32, type) 100 + __field(int, ufd) 101 + ), 102 + 103 + TP_fast_assign( 104 + BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag)); 105 + memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag)); 106 + __entry->type = prg->type; 107 + __entry->ufd = ufd; 108 + ), 109 + 110 + TP_printk("prog=%s type=%s ufd=%d", 111 + __print_hex_str(__entry->prog_tag, 8), 112 + __print_symbolic(__entry->type, __PROG_TYPE_SYM_TAB), 113 + __entry->ufd) 114 + ); 115 + 116 + TRACE_EVENT(bpf_map_create, 117 + 118 + TP_PROTO(const struct bpf_map *map, int ufd), 119 + 120 + TP_ARGS(map, ufd), 121 + 122 + TP_STRUCT__entry( 123 + __field(u32, type) 124 + __field(u32, size_key) 125 + __field(u32, size_value) 126 + __field(u32, max_entries) 127 + __field(u32, flags) 128 + __field(int, ufd) 129 + ), 130 + 131 + TP_fast_assign( 132 + __entry->type = map->map_type; 133 + __entry->size_key = map->key_size; 134 + __entry->size_value = map->value_size; 135 + __entry->max_entries = map->max_entries; 136 + __entry->flags = map->map_flags; 137 + __entry->ufd = ufd; 138 + ), 139 + 140 + TP_printk("map type=%s ufd=%d key=%u val=%u max=%u flags=%x", 141 + __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB), 142 + __entry->ufd, __entry->size_key, __entry->size_value, 143 + __entry->max_entries, __entry->flags) 144 + ); 145 + 146 + DECLARE_EVENT_CLASS(bpf_obj_prog, 147 + 148 + TP_PROTO(const struct bpf_prog *prg, int ufd, 149 + const struct filename *pname), 150 + 151 + TP_ARGS(prg, ufd, pname), 152 + 153 + TP_STRUCT__entry( 154 + __array(u8, prog_tag, 8) 155 + __field(int, ufd) 156 + __string(path, pname->name) 157 + ), 158 + 159 + TP_fast_assign( 160 + BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag)); 161 + memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag)); 162 + __assign_str(path, pname->name); 163 + __entry->ufd = ufd; 164 + ), 165 + 166 + TP_printk("prog=%s path=%s ufd=%d", 167 + __print_hex_str(__entry->prog_tag, 8), 168 + __get_str(path), __entry->ufd) 169 + ); 170 + 171 + DEFINE_EVENT(bpf_obj_prog, bpf_obj_pin_prog, 172 + 173 + TP_PROTO(const struct bpf_prog *prg, int ufd, 174 + const struct filename *pname), 175 + 176 + TP_ARGS(prg, ufd, pname) 177 + ); 178 + 179 + DEFINE_EVENT(bpf_obj_prog, bpf_obj_get_prog, 180 + 181 + TP_PROTO(const struct bpf_prog *prg, int ufd, 182 + const struct filename *pname), 183 + 184 + TP_ARGS(prg, ufd, pname) 185 + ); 186 + 187 + DECLARE_EVENT_CLASS(bpf_obj_map, 188 + 189 + TP_PROTO(const struct bpf_map *map, int ufd, 190 + const struct filename *pname), 191 + 192 + TP_ARGS(map, ufd, pname), 193 + 194 + TP_STRUCT__entry( 195 + __field(u32, type) 196 + __field(int, ufd) 197 + __string(path, pname->name) 198 + ), 199 + 200 + TP_fast_assign( 201 + __assign_str(path, pname->name); 202 + __entry->type = map->map_type; 203 + __entry->ufd = ufd; 204 + ), 205 + 206 + TP_printk("map type=%s ufd=%d path=%s", 207 + __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB), 208 + __entry->ufd, __get_str(path)) 209 + ); 210 + 211 + DEFINE_EVENT(bpf_obj_map, bpf_obj_pin_map, 212 + 213 + TP_PROTO(const struct bpf_map *map, int ufd, 214 + const struct filename *pname), 215 + 216 + TP_ARGS(map, ufd, pname) 217 + ); 218 + 219 + DEFINE_EVENT(bpf_obj_map, bpf_obj_get_map, 220 + 221 + TP_PROTO(const struct bpf_map *map, int ufd, 222 + const struct filename *pname), 223 + 224 + TP_ARGS(map, ufd, pname) 225 + ); 226 + 227 + DECLARE_EVENT_CLASS(bpf_map_keyval, 228 + 229 + TP_PROTO(const struct bpf_map *map, int ufd, 230 + const void *key, const void *val), 231 + 232 + TP_ARGS(map, ufd, key, val), 233 + 234 + TP_STRUCT__entry( 235 + __field(u32, type) 236 + __field(u32, key_len) 237 + __dynamic_array(u8, key, map->key_size) 238 + __field(bool, key_trunc) 239 + __field(u32, val_len) 240 + __dynamic_array(u8, val, map->value_size) 241 + __field(bool, val_trunc) 242 + __field(int, ufd) 243 + ), 244 + 245 + TP_fast_assign( 246 + memcpy(__get_dynamic_array(key), key, map->key_size); 247 + memcpy(__get_dynamic_array(val), val, map->value_size); 248 + __entry->type = map->map_type; 249 + __entry->key_len = min(map->key_size, 16U); 250 + __entry->key_trunc = map->key_size != __entry->key_len; 251 + __entry->val_len = min(map->value_size, 16U); 252 + __entry->val_trunc = map->value_size != __entry->val_len; 253 + __entry->ufd = ufd; 254 + ), 255 + 256 + TP_printk("map type=%s ufd=%d key=[%s%s] val=[%s%s]", 257 + __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB), 258 + __entry->ufd, 259 + __print_hex(__get_dynamic_array(key), __entry->key_len), 260 + __entry->key_trunc ? " ..." : "", 261 + __print_hex(__get_dynamic_array(val), __entry->val_len), 262 + __entry->val_trunc ? " ..." : "") 263 + ); 264 + 265 + DEFINE_EVENT(bpf_map_keyval, bpf_map_lookup_elem, 266 + 267 + TP_PROTO(const struct bpf_map *map, int ufd, 268 + const void *key, const void *val), 269 + 270 + TP_ARGS(map, ufd, key, val) 271 + ); 272 + 273 + DEFINE_EVENT(bpf_map_keyval, bpf_map_update_elem, 274 + 275 + TP_PROTO(const struct bpf_map *map, int ufd, 276 + const void *key, const void *val), 277 + 278 + TP_ARGS(map, ufd, key, val) 279 + ); 280 + 281 + TRACE_EVENT(bpf_map_delete_elem, 282 + 283 + TP_PROTO(const struct bpf_map *map, int ufd, 284 + const void *key), 285 + 286 + TP_ARGS(map, ufd, key), 287 + 288 + TP_STRUCT__entry( 289 + __field(u32, type) 290 + __field(u32, key_len) 291 + __dynamic_array(u8, key, map->key_size) 292 + __field(bool, key_trunc) 293 + __field(int, ufd) 294 + ), 295 + 296 + TP_fast_assign( 297 + memcpy(__get_dynamic_array(key), key, map->key_size); 298 + __entry->type = map->map_type; 299 + __entry->key_len = min(map->key_size, 16U); 300 + __entry->key_trunc = map->key_size != __entry->key_len; 301 + __entry->ufd = ufd; 302 + ), 303 + 304 + TP_printk("map type=%s ufd=%d key=[%s%s]", 305 + __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB), 306 + __entry->ufd, 307 + __print_hex(__get_dynamic_array(key), __entry->key_len), 308 + __entry->key_trunc ? " ..." : "") 309 + ); 310 + 311 + TRACE_EVENT(bpf_map_next_key, 312 + 313 + TP_PROTO(const struct bpf_map *map, int ufd, 314 + const void *key, const void *key_next), 315 + 316 + TP_ARGS(map, ufd, key, key_next), 317 + 318 + TP_STRUCT__entry( 319 + __field(u32, type) 320 + __field(u32, key_len) 321 + __dynamic_array(u8, key, map->key_size) 322 + __dynamic_array(u8, nxt, map->key_size) 323 + __field(bool, key_trunc) 324 + __field(int, ufd) 325 + ), 326 + 327 + TP_fast_assign( 328 + memcpy(__get_dynamic_array(key), key, map->key_size); 329 + memcpy(__get_dynamic_array(nxt), key_next, map->key_size); 330 + __entry->type = map->map_type; 331 + __entry->key_len = min(map->key_size, 16U); 332 + __entry->key_trunc = map->key_size != __entry->key_len; 333 + __entry->ufd = ufd; 334 + ), 335 + 336 + TP_printk("map type=%s ufd=%d key=[%s%s] next=[%s%s]", 337 + __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB), 338 + __entry->ufd, 339 + __print_hex(__get_dynamic_array(key), __entry->key_len), 340 + __entry->key_trunc ? " ..." : "", 341 + __print_hex(__get_dynamic_array(nxt), __entry->key_len), 342 + __entry->key_trunc ? " ..." : "") 343 + ); 344 + 345 + #endif /* _TRACE_BPF_H */ 346 + 347 + #include <trace/define_trace.h>
+53
include/trace/events/xdp.h
··· 1 + #undef TRACE_SYSTEM 2 + #define TRACE_SYSTEM xdp 3 + 4 + #if !defined(_TRACE_XDP_H) || defined(TRACE_HEADER_MULTI_READ) 5 + #define _TRACE_XDP_H 6 + 7 + #include <linux/netdevice.h> 8 + #include <linux/filter.h> 9 + #include <linux/tracepoint.h> 10 + 11 + #define __XDP_ACT_MAP(FN) \ 12 + FN(ABORTED) \ 13 + FN(DROP) \ 14 + FN(PASS) \ 15 + FN(TX) 16 + 17 + #define __XDP_ACT_TP_FN(x) \ 18 + TRACE_DEFINE_ENUM(XDP_##x); 19 + #define __XDP_ACT_SYM_FN(x) \ 20 + { XDP_##x, #x }, 21 + #define __XDP_ACT_SYM_TAB \ 22 + __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, 0 } 23 + __XDP_ACT_MAP(__XDP_ACT_TP_FN) 24 + 25 + TRACE_EVENT(xdp_exception, 26 + 27 + TP_PROTO(const struct net_device *dev, 28 + const struct bpf_prog *xdp, u32 act), 29 + 30 + TP_ARGS(dev, xdp, act), 31 + 32 + TP_STRUCT__entry( 33 + __string(name, dev->name) 34 + __array(u8, prog_tag, 8) 35 + __field(u32, act) 36 + ), 37 + 38 + TP_fast_assign( 39 + BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(xdp->tag)); 40 + memcpy(__entry->prog_tag, xdp->tag, sizeof(xdp->tag)); 41 + __assign_str(name, dev->name); 42 + __entry->act = act; 43 + ), 44 + 45 + TP_printk("prog=%s device=%s action=%s", 46 + __print_hex_str(__entry->prog_tag, 8), 47 + __get_str(name), 48 + __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB)) 49 + ); 50 + 51 + #endif /* _TRACE_XDP_H */ 52 + 53 + #include <trace/define_trace.h>
+9
kernel/bpf/core.c
··· 1173 1173 { 1174 1174 return -EFAULT; 1175 1175 } 1176 + 1177 + /* All definitions of tracepoints related to BPF. */ 1178 + #define CREATE_TRACE_POINTS 1179 + #include <linux/bpf_trace.h> 1180 + 1181 + EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception); 1182 + 1183 + EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_get_type); 1184 + EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_put_rcu);
+16 -1
kernel/bpf/inode.c
··· 21 21 #include <linux/parser.h> 22 22 #include <linux/filter.h> 23 23 #include <linux/bpf.h> 24 + #include <linux/bpf_trace.h> 24 25 25 26 enum bpf_type { 26 27 BPF_TYPE_UNSPEC = 0, ··· 282 281 ret = bpf_obj_do_pin(pname, raw, type); 283 282 if (ret != 0) 284 283 bpf_any_put(raw, type); 284 + if ((trace_bpf_obj_pin_prog_enabled() || 285 + trace_bpf_obj_pin_map_enabled()) && !ret) { 286 + if (type == BPF_TYPE_PROG) 287 + trace_bpf_obj_pin_prog(raw, ufd, pname); 288 + if (type == BPF_TYPE_MAP) 289 + trace_bpf_obj_pin_map(raw, ufd, pname); 290 + } 285 291 out: 286 292 putname(pname); 287 293 return ret; ··· 350 342 else 351 343 goto out; 352 344 353 - if (ret < 0) 345 + if (ret < 0) { 354 346 bpf_any_put(raw, type); 347 + } else if (trace_bpf_obj_get_prog_enabled() || 348 + trace_bpf_obj_get_map_enabled()) { 349 + if (type == BPF_TYPE_PROG) 350 + trace_bpf_obj_get_prog(raw, ret, pname); 351 + if (type == BPF_TYPE_MAP) 352 + trace_bpf_obj_get_map(raw, ret, pname); 353 + } 355 354 out: 356 355 putname(pname); 357 356 return ret;
+17 -2
kernel/bpf/syscall.c
··· 10 10 * General Public License for more details. 11 11 */ 12 12 #include <linux/bpf.h> 13 + #include <linux/bpf_trace.h> 13 14 #include <linux/syscalls.h> 14 15 #include <linux/slab.h> 15 16 #include <linux/anon_inodes.h> ··· 216 215 /* failed to allocate fd */ 217 216 goto free_map; 218 217 218 + trace_bpf_map_create(map, err); 219 219 return err; 220 220 221 221 free_map: ··· 341 339 if (copy_to_user(uvalue, value, value_size) != 0) 342 340 goto free_value; 343 341 342 + trace_bpf_map_lookup_elem(map, ufd, key, value); 344 343 err = 0; 345 344 346 345 free_value: ··· 424 421 __this_cpu_dec(bpf_prog_active); 425 422 preempt_enable(); 426 423 424 + if (!err) 425 + trace_bpf_map_update_elem(map, ufd, key, value); 427 426 free_value: 428 427 kfree(value); 429 428 free_key: ··· 471 466 __this_cpu_dec(bpf_prog_active); 472 467 preempt_enable(); 473 468 469 + if (!err) 470 + trace_bpf_map_delete_elem(map, ufd, key); 474 471 free_key: 475 472 kfree(key); 476 473 err_put: ··· 525 518 if (copy_to_user(unext_key, next_key, map->key_size) != 0) 526 519 goto free_next_key; 527 520 521 + trace_bpf_map_next_key(map, ufd, key, next_key); 528 522 err = 0; 529 523 530 524 free_next_key: ··· 679 671 680 672 void bpf_prog_put(struct bpf_prog *prog) 681 673 { 682 - if (atomic_dec_and_test(&prog->aux->refcnt)) 674 + if (atomic_dec_and_test(&prog->aux->refcnt)) { 675 + trace_bpf_prog_put_rcu(prog); 683 676 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 677 + } 684 678 } 685 679 EXPORT_SYMBOL_GPL(bpf_prog_put); 686 680 ··· 791 781 792 782 struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type) 793 783 { 794 - return __bpf_prog_get(ufd, &type); 784 + struct bpf_prog *prog = __bpf_prog_get(ufd, &type); 785 + 786 + if (!IS_ERR(prog)) 787 + trace_bpf_prog_get_type(prog); 788 + return prog; 795 789 } 796 790 EXPORT_SYMBOL_GPL(bpf_prog_get_type); 797 791 ··· 877 863 /* failed to allocate fd */ 878 864 goto free_used_maps; 879 865 866 + trace_bpf_prog_load(prog, err); 880 867 return err; 881 868 882 869 free_used_maps: