Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next

Daniel Borkmann says:

====================
pull-request: bpf-next 2018-03-03

The following pull-request contains BPF updates for your *net-next* tree.

The main changes are:

1) Extend bpftool to build up CFG information of eBPF programs and add an
option to dump this in DOT format such that this can later be used with
DOT graphic tools (xdot, graphviz, etc) to visualize it. Part of the
analysis performed is sub-program detection and basic-block partitioning,
from Jiong.

2) Multiple enhancements for bpftool's batch mode, more specifically the
parser now understands comments (#), continuation lines (\), and arguments
enclosed between quotes. Also, allow to read from stdin via '-' as input
file, all from Quentin.

3) Improve BPF kselftests by i) unifying the rlimit handling into a helper
that is then used by all tests, and ii) add support for testing tail calls
to test_verifier plus add tests covering all corner cases. The latter is
especially useful for testing JITs, from Daniel.

4) Remove x64 JIT's bpf_flush_icache() since flush_icache_range() is a noop
on x64, from Daniel.

5) Fix one more occasion in BPF samples where we do not detach the BPF program
from the cgroup after completion, from Prashant.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+1258 -373
+2 -13
arch/x86/net/bpf_jit_comp.c
··· 11 11 #include <linux/netdevice.h> 12 12 #include <linux/filter.h> 13 13 #include <linux/if_vlan.h> 14 - #include <asm/cacheflush.h> 14 + #include <linux/bpf.h> 15 + 15 16 #include <asm/set_memory.h> 16 17 #include <asm/nospec-branch.h> 17 - #include <linux/bpf.h> 18 18 19 19 /* 20 20 * assembly code in arch/x86/net/bpf_jit.S ··· 102 102 #define X86_JGE 0x7D 103 103 #define X86_JLE 0x7E 104 104 #define X86_JG 0x7F 105 - 106 - static void bpf_flush_icache(void *start, void *end) 107 - { 108 - mm_segment_t old_fs = get_fs(); 109 - 110 - set_fs(KERNEL_DS); 111 - smp_wmb(); 112 - flush_icache_range((unsigned long)start, (unsigned long)end); 113 - set_fs(old_fs); 114 - } 115 105 116 106 #define CHOOSE_LOAD_FUNC(K, func) \ 117 107 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) ··· 1256 1266 bpf_jit_dump(prog->len, proglen, pass + 1, image); 1257 1267 1258 1268 if (image) { 1259 - bpf_flush_icache(header, image + proglen); 1260 1269 if (!prog->is_func || extra_pass) { 1261 1270 bpf_jit_binary_lock_ro(header); 1262 1271 } else {
+1
samples/bpf/test_cgrp2_sock.sh
··· 61 61 62 62 [ -n "$msg" ] && echo "ERROR: $msg" 63 63 64 + test_cgrp2_sock -d ${CGRP_MNT}/sockopts 64 65 ip li del cgrp2_sock 65 66 umount ${CGRP_MNT} 66 67
+3
samples/bpf/test_cgrp2_sock2.sh
··· 28 28 } 29 29 30 30 function cleanup { 31 + if [ -d /tmp/cgroupv2/foo ]; then 32 + test_cgrp2_sock -d /tmp/cgroupv2/foo 33 + fi 31 34 ip link del veth0b 32 35 ip netns delete at_ns0 33 36 umount /tmp/cgroupv2
+12 -6
tools/bpf/bpftool/Documentation/bpftool-prog.rst
··· 21 21 ============= 22 22 23 23 | **bpftool** **prog { show | list }** [*PROG*] 24 - | **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes**}] 24 + | **bpftool** **prog dump xlated** *PROG* [{**file** *FILE* | **opcodes** | **visual**}] 25 25 | **bpftool** **prog dump jited** *PROG* [{**file** *FILE* | **opcodes**}] 26 26 | **bpftool** **prog pin** *PROG* *FILE* 27 27 | **bpftool** **prog load** *OBJ* *FILE* ··· 39 39 Output will start with program ID followed by program type and 40 40 zero or more named attributes (depending on kernel version). 41 41 42 - **bpftool prog dump xlated** *PROG* [{ **file** *FILE* | **opcodes** }] 43 - Dump eBPF instructions of the program from the kernel. 44 - If *FILE* is specified image will be written to a file, 45 - otherwise it will be disassembled and printed to stdout. 42 + **bpftool prog dump xlated** *PROG* [{ **file** *FILE* | **opcodes** | **visual** }] 43 + Dump eBPF instructions of the program from the kernel. By 44 + default, eBPF will be disassembled and printed to standard 45 + output in human-readable format. In this case, **opcodes** 46 + controls if raw opcodes should be printed as well. 46 47 47 - **opcodes** controls if raw opcodes will be printed. 48 + If **file** is specified, the binary image will instead be 49 + written to *FILE*. 50 + 51 + If **visual** is specified, control flow graph (CFG) will be 52 + built instead, and eBPF instructions will be presented with 53 + CFG in DOT format, on standard output. 48 54 49 55 **bpftool prog dump jited** *PROG* [{ **file** *FILE* | **opcodes** }] 50 56 Dump jited image (host machine code) of the program.
+9 -4
tools/bpf/bpftool/bash-completion/bpftool
··· 147 147 148 148 # Deal with simplest keywords 149 149 case $prev in 150 - help|key|opcodes) 150 + help|key|opcodes|visual) 151 151 return 0 152 152 ;; 153 153 tag) ··· 223 223 return 0 224 224 ;; 225 225 *) 226 - _bpftool_once_attr 'file' 226 + _bpftool_once_attr 'file' 227 + if _bpftool_search_list 'xlated'; then 228 + COMPREPLY+=( $( compgen -W 'opcodes visual' -- \ 229 + "$cur" ) ) 230 + else 227 231 COMPREPLY+=( $( compgen -W 'opcodes' -- \ 228 232 "$cur" ) ) 229 - return 0 230 - ;; 233 + fi 234 + return 0 235 + ;; 231 236 esac 232 237 ;; 233 238 pin)
+514
tools/bpf/bpftool/cfg.c
··· 1 + // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + /* 3 + * Copyright (C) 2018 Netronome Systems, Inc. 4 + * 5 + * This software is dual licensed under the GNU General License Version 2, 6 + * June 1991 as shown in the file COPYING in the top-level directory of this 7 + * source tree or the BSD 2-Clause License provided below. You have the 8 + * option to license this software under the complete terms of either license. 9 + * 10 + * The BSD 2-Clause License: 11 + * 12 + * Redistribution and use in source and binary forms, with or 13 + * without modification, are permitted provided that the following 14 + * conditions are met: 15 + * 16 + * 1. Redistributions of source code must retain the above 17 + * copyright notice, this list of conditions and the following 18 + * disclaimer. 19 + * 20 + * 2. Redistributions in binary form must reproduce the above 21 + * copyright notice, this list of conditions and the following 22 + * disclaimer in the documentation and/or other materials 23 + * provided with the distribution. 24 + * 25 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 26 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 29 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 + * POSSIBILITY OF SUCH DAMAGE. 36 + */ 37 + 38 + #include <linux/list.h> 39 + #include <stdlib.h> 40 + #include <string.h> 41 + 42 + #include "cfg.h" 43 + #include "main.h" 44 + #include "xlated_dumper.h" 45 + 46 + struct cfg { 47 + struct list_head funcs; 48 + int func_num; 49 + }; 50 + 51 + struct func_node { 52 + struct list_head l; 53 + struct list_head bbs; 54 + struct bpf_insn *start; 55 + struct bpf_insn *end; 56 + int idx; 57 + int bb_num; 58 + }; 59 + 60 + struct bb_node { 61 + struct list_head l; 62 + struct list_head e_prevs; 63 + struct list_head e_succs; 64 + struct bpf_insn *head; 65 + struct bpf_insn *tail; 66 + int idx; 67 + }; 68 + 69 + #define EDGE_FLAG_EMPTY 0x0 70 + #define EDGE_FLAG_FALLTHROUGH 0x1 71 + #define EDGE_FLAG_JUMP 0x2 72 + struct edge_node { 73 + struct list_head l; 74 + struct bb_node *src; 75 + struct bb_node *dst; 76 + int flags; 77 + }; 78 + 79 + #define ENTRY_BLOCK_INDEX 0 80 + #define EXIT_BLOCK_INDEX 1 81 + #define NUM_FIXED_BLOCKS 2 82 + #define func_prev(func) list_prev_entry(func, l) 83 + #define func_next(func) list_next_entry(func, l) 84 + #define bb_prev(bb) list_prev_entry(bb, l) 85 + #define bb_next(bb) list_next_entry(bb, l) 86 + #define entry_bb(func) func_first_bb(func) 87 + #define exit_bb(func) func_last_bb(func) 88 + #define cfg_first_func(cfg) \ 89 + list_first_entry(&cfg->funcs, struct func_node, l) 90 + #define cfg_last_func(cfg) \ 91 + list_last_entry(&cfg->funcs, struct func_node, l) 92 + #define func_first_bb(func) \ 93 + list_first_entry(&func->bbs, struct bb_node, l) 94 + #define func_last_bb(func) \ 95 + list_last_entry(&func->bbs, struct bb_node, l) 96 + 97 + static struct func_node *cfg_append_func(struct cfg *cfg, struct bpf_insn *insn) 98 + { 99 + struct func_node *new_func, *func; 100 + 101 + list_for_each_entry(func, &cfg->funcs, l) { 102 + if (func->start == insn) 103 + return func; 104 + else if (func->start > insn) 105 + break; 106 + } 107 + 108 + func = func_prev(func); 109 + new_func = calloc(1, sizeof(*new_func)); 110 + if (!new_func) { 111 + p_err("OOM when allocating FUNC node"); 112 + return NULL; 113 + } 114 + new_func->start = insn; 115 + new_func->idx = cfg->func_num; 116 + list_add(&new_func->l, &func->l); 117 + cfg->func_num++; 118 + 119 + return new_func; 120 + } 121 + 122 + static struct bb_node *func_append_bb(struct func_node *func, 123 + struct bpf_insn *insn) 124 + { 125 + struct bb_node *new_bb, *bb; 126 + 127 + list_for_each_entry(bb, &func->bbs, l) { 128 + if (bb->head == insn) 129 + return bb; 130 + else if (bb->head > insn) 131 + break; 132 + } 133 + 134 + bb = bb_prev(bb); 135 + new_bb = calloc(1, sizeof(*new_bb)); 136 + if (!new_bb) { 137 + p_err("OOM when allocating BB node"); 138 + return NULL; 139 + } 140 + new_bb->head = insn; 141 + INIT_LIST_HEAD(&new_bb->e_prevs); 142 + INIT_LIST_HEAD(&new_bb->e_succs); 143 + list_add(&new_bb->l, &bb->l); 144 + 145 + return new_bb; 146 + } 147 + 148 + static struct bb_node *func_insert_dummy_bb(struct list_head *after) 149 + { 150 + struct bb_node *bb; 151 + 152 + bb = calloc(1, sizeof(*bb)); 153 + if (!bb) { 154 + p_err("OOM when allocating BB node"); 155 + return NULL; 156 + } 157 + 158 + INIT_LIST_HEAD(&bb->e_prevs); 159 + INIT_LIST_HEAD(&bb->e_succs); 160 + list_add(&bb->l, after); 161 + 162 + return bb; 163 + } 164 + 165 + static bool cfg_partition_funcs(struct cfg *cfg, struct bpf_insn *cur, 166 + struct bpf_insn *end) 167 + { 168 + struct func_node *func, *last_func; 169 + 170 + func = cfg_append_func(cfg, cur); 171 + if (!func) 172 + return true; 173 + 174 + for (; cur < end; cur++) { 175 + if (cur->code != (BPF_JMP | BPF_CALL)) 176 + continue; 177 + if (cur->src_reg != BPF_PSEUDO_CALL) 178 + continue; 179 + func = cfg_append_func(cfg, cur + cur->off + 1); 180 + if (!func) 181 + return true; 182 + } 183 + 184 + last_func = cfg_last_func(cfg); 185 + last_func->end = end - 1; 186 + func = cfg_first_func(cfg); 187 + list_for_each_entry_from(func, &last_func->l, l) { 188 + func->end = func_next(func)->start - 1; 189 + } 190 + 191 + return false; 192 + } 193 + 194 + static bool func_partition_bb_head(struct func_node *func) 195 + { 196 + struct bpf_insn *cur, *end; 197 + struct bb_node *bb; 198 + 199 + cur = func->start; 200 + end = func->end; 201 + INIT_LIST_HEAD(&func->bbs); 202 + bb = func_append_bb(func, cur); 203 + if (!bb) 204 + return true; 205 + 206 + for (; cur <= end; cur++) { 207 + if (BPF_CLASS(cur->code) == BPF_JMP) { 208 + u8 opcode = BPF_OP(cur->code); 209 + 210 + if (opcode == BPF_EXIT || opcode == BPF_CALL) 211 + continue; 212 + 213 + bb = func_append_bb(func, cur + cur->off + 1); 214 + if (!bb) 215 + return true; 216 + 217 + if (opcode != BPF_JA) { 218 + bb = func_append_bb(func, cur + 1); 219 + if (!bb) 220 + return true; 221 + } 222 + } 223 + } 224 + 225 + return false; 226 + } 227 + 228 + static void func_partition_bb_tail(struct func_node *func) 229 + { 230 + unsigned int bb_idx = NUM_FIXED_BLOCKS; 231 + struct bb_node *bb, *last; 232 + 233 + last = func_last_bb(func); 234 + last->tail = func->end; 235 + bb = func_first_bb(func); 236 + list_for_each_entry_from(bb, &last->l, l) { 237 + bb->tail = bb_next(bb)->head - 1; 238 + bb->idx = bb_idx++; 239 + } 240 + 241 + last->idx = bb_idx++; 242 + func->bb_num = bb_idx; 243 + } 244 + 245 + static bool func_add_special_bb(struct func_node *func) 246 + { 247 + struct bb_node *bb; 248 + 249 + bb = func_insert_dummy_bb(&func->bbs); 250 + if (!bb) 251 + return true; 252 + bb->idx = ENTRY_BLOCK_INDEX; 253 + 254 + bb = func_insert_dummy_bb(&func_last_bb(func)->l); 255 + if (!bb) 256 + return true; 257 + bb->idx = EXIT_BLOCK_INDEX; 258 + 259 + return false; 260 + } 261 + 262 + static bool func_partition_bb(struct func_node *func) 263 + { 264 + if (func_partition_bb_head(func)) 265 + return true; 266 + 267 + func_partition_bb_tail(func); 268 + 269 + return false; 270 + } 271 + 272 + static struct bb_node *func_search_bb_with_head(struct func_node *func, 273 + struct bpf_insn *insn) 274 + { 275 + struct bb_node *bb; 276 + 277 + list_for_each_entry(bb, &func->bbs, l) { 278 + if (bb->head == insn) 279 + return bb; 280 + } 281 + 282 + return NULL; 283 + } 284 + 285 + static struct edge_node *new_edge(struct bb_node *src, struct bb_node *dst, 286 + int flags) 287 + { 288 + struct edge_node *e; 289 + 290 + e = calloc(1, sizeof(*e)); 291 + if (!e) { 292 + p_err("OOM when allocating edge node"); 293 + return NULL; 294 + } 295 + 296 + if (src) 297 + e->src = src; 298 + if (dst) 299 + e->dst = dst; 300 + 301 + e->flags |= flags; 302 + 303 + return e; 304 + } 305 + 306 + static bool func_add_bb_edges(struct func_node *func) 307 + { 308 + struct bpf_insn *insn; 309 + struct edge_node *e; 310 + struct bb_node *bb; 311 + 312 + bb = entry_bb(func); 313 + e = new_edge(bb, bb_next(bb), EDGE_FLAG_FALLTHROUGH); 314 + if (!e) 315 + return true; 316 + list_add_tail(&e->l, &bb->e_succs); 317 + 318 + bb = exit_bb(func); 319 + e = new_edge(bb_prev(bb), bb, EDGE_FLAG_FALLTHROUGH); 320 + if (!e) 321 + return true; 322 + list_add_tail(&e->l, &bb->e_prevs); 323 + 324 + bb = entry_bb(func); 325 + bb = bb_next(bb); 326 + list_for_each_entry_from(bb, &exit_bb(func)->l, l) { 327 + e = new_edge(bb, NULL, EDGE_FLAG_EMPTY); 328 + if (!e) 329 + return true; 330 + e->src = bb; 331 + 332 + insn = bb->tail; 333 + if (BPF_CLASS(insn->code) != BPF_JMP || 334 + BPF_OP(insn->code) == BPF_EXIT) { 335 + e->dst = bb_next(bb); 336 + e->flags |= EDGE_FLAG_FALLTHROUGH; 337 + list_add_tail(&e->l, &bb->e_succs); 338 + continue; 339 + } else if (BPF_OP(insn->code) == BPF_JA) { 340 + e->dst = func_search_bb_with_head(func, 341 + insn + insn->off + 1); 342 + e->flags |= EDGE_FLAG_JUMP; 343 + list_add_tail(&e->l, &bb->e_succs); 344 + continue; 345 + } 346 + 347 + e->dst = bb_next(bb); 348 + e->flags |= EDGE_FLAG_FALLTHROUGH; 349 + list_add_tail(&e->l, &bb->e_succs); 350 + 351 + e = new_edge(bb, NULL, EDGE_FLAG_JUMP); 352 + if (!e) 353 + return true; 354 + e->src = bb; 355 + e->dst = func_search_bb_with_head(func, insn + insn->off + 1); 356 + list_add_tail(&e->l, &bb->e_succs); 357 + } 358 + 359 + return false; 360 + } 361 + 362 + static bool cfg_build(struct cfg *cfg, struct bpf_insn *insn, unsigned int len) 363 + { 364 + int cnt = len / sizeof(*insn); 365 + struct func_node *func; 366 + 367 + INIT_LIST_HEAD(&cfg->funcs); 368 + 369 + if (cfg_partition_funcs(cfg, insn, insn + cnt)) 370 + return true; 371 + 372 + list_for_each_entry(func, &cfg->funcs, l) { 373 + if (func_partition_bb(func) || func_add_special_bb(func)) 374 + return true; 375 + 376 + if (func_add_bb_edges(func)) 377 + return true; 378 + } 379 + 380 + return false; 381 + } 382 + 383 + static void cfg_destroy(struct cfg *cfg) 384 + { 385 + struct func_node *func, *func2; 386 + 387 + list_for_each_entry_safe(func, func2, &cfg->funcs, l) { 388 + struct bb_node *bb, *bb2; 389 + 390 + list_for_each_entry_safe(bb, bb2, &func->bbs, l) { 391 + struct edge_node *e, *e2; 392 + 393 + list_for_each_entry_safe(e, e2, &bb->e_prevs, l) { 394 + list_del(&e->l); 395 + free(e); 396 + } 397 + 398 + list_for_each_entry_safe(e, e2, &bb->e_succs, l) { 399 + list_del(&e->l); 400 + free(e); 401 + } 402 + 403 + list_del(&bb->l); 404 + free(bb); 405 + } 406 + 407 + list_del(&func->l); 408 + free(func); 409 + } 410 + } 411 + 412 + static void draw_bb_node(struct func_node *func, struct bb_node *bb) 413 + { 414 + const char *shape; 415 + 416 + if (bb->idx == ENTRY_BLOCK_INDEX || bb->idx == EXIT_BLOCK_INDEX) 417 + shape = "Mdiamond"; 418 + else 419 + shape = "record"; 420 + 421 + printf("\tfn_%d_bb_%d [shape=%s,style=filled,label=\"", 422 + func->idx, bb->idx, shape); 423 + 424 + if (bb->idx == ENTRY_BLOCK_INDEX) { 425 + printf("ENTRY"); 426 + } else if (bb->idx == EXIT_BLOCK_INDEX) { 427 + printf("EXIT"); 428 + } else { 429 + unsigned int start_idx; 430 + struct dump_data dd = {}; 431 + 432 + printf("{"); 433 + kernel_syms_load(&dd); 434 + start_idx = bb->head - func->start; 435 + dump_xlated_for_graph(&dd, bb->head, bb->tail, start_idx); 436 + kernel_syms_destroy(&dd); 437 + printf("}"); 438 + } 439 + 440 + printf("\"];\n\n"); 441 + } 442 + 443 + static void draw_bb_succ_edges(struct func_node *func, struct bb_node *bb) 444 + { 445 + const char *style = "\"solid,bold\""; 446 + const char *color = "black"; 447 + int func_idx = func->idx; 448 + struct edge_node *e; 449 + int weight = 10; 450 + 451 + if (list_empty(&bb->e_succs)) 452 + return; 453 + 454 + list_for_each_entry(e, &bb->e_succs, l) { 455 + printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=%s, color=%s, weight=%d, constraint=true", 456 + func_idx, e->src->idx, func_idx, e->dst->idx, 457 + style, color, weight); 458 + printf("];\n"); 459 + } 460 + } 461 + 462 + static void func_output_bb_def(struct func_node *func) 463 + { 464 + struct bb_node *bb; 465 + 466 + list_for_each_entry(bb, &func->bbs, l) { 467 + draw_bb_node(func, bb); 468 + } 469 + } 470 + 471 + static void func_output_edges(struct func_node *func) 472 + { 473 + int func_idx = func->idx; 474 + struct bb_node *bb; 475 + 476 + list_for_each_entry(bb, &func->bbs, l) { 477 + draw_bb_succ_edges(func, bb); 478 + } 479 + 480 + /* Add an invisible edge from ENTRY to EXIT, this is to 481 + * improve the graph layout. 482 + */ 483 + printf("\tfn_%d_bb_%d:s -> fn_%d_bb_%d:n [style=\"invis\", constraint=true];\n", 484 + func_idx, ENTRY_BLOCK_INDEX, func_idx, EXIT_BLOCK_INDEX); 485 + } 486 + 487 + static void cfg_dump(struct cfg *cfg) 488 + { 489 + struct func_node *func; 490 + 491 + printf("digraph \"DOT graph for eBPF program\" {\n"); 492 + list_for_each_entry(func, &cfg->funcs, l) { 493 + printf("subgraph \"cluster_%d\" {\n\tstyle=\"dashed\";\n\tcolor=\"black\";\n\tlabel=\"func_%d ()\";\n", 494 + func->idx, func->idx); 495 + func_output_bb_def(func); 496 + func_output_edges(func); 497 + printf("}\n"); 498 + } 499 + printf("}\n"); 500 + } 501 + 502 + void dump_xlated_cfg(void *buf, unsigned int len) 503 + { 504 + struct bpf_insn *insn = buf; 505 + struct cfg cfg; 506 + 507 + memset(&cfg, 0, sizeof(cfg)); 508 + if (cfg_build(&cfg, insn, len)) 509 + return; 510 + 511 + cfg_dump(&cfg); 512 + 513 + cfg_destroy(&cfg); 514 + }
+43
tools/bpf/bpftool/cfg.h
··· 1 + // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + /* 3 + * Copyright (C) 2018 Netronome Systems, Inc. 4 + * 5 + * This software is dual licensed under the GNU General License Version 2, 6 + * June 1991 as shown in the file COPYING in the top-level directory of this 7 + * source tree or the BSD 2-Clause License provided below. You have the 8 + * option to license this software under the complete terms of either license. 9 + * 10 + * The BSD 2-Clause License: 11 + * 12 + * Redistribution and use in source and binary forms, with or 13 + * without modification, are permitted provided that the following 14 + * conditions are met: 15 + * 16 + * 1. Redistributions of source code must retain the above 17 + * copyright notice, this list of conditions and the following 18 + * disclaimer. 19 + * 20 + * 2. Redistributions in binary form must reproduce the above 21 + * copyright notice, this list of conditions and the following 22 + * disclaimer in the documentation and/or other materials 23 + * provided with the distribution. 24 + * 25 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 26 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 29 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 + * POSSIBILITY OF SUCH DAMAGE. 36 + */ 37 + 38 + #ifndef __BPF_TOOL_CFG_H 39 + #define __BPF_TOOL_CFG_H 40 + 41 + void dump_xlated_cfg(void *buf, unsigned int len); 42 + 43 + #endif /* __BPF_TOOL_CFG_H */
+89 -15
tools/bpf/bpftool/main.c
··· 46 46 47 47 #include "main.h" 48 48 49 + #define BATCH_LINE_LEN_MAX 65536 50 + #define BATCH_ARG_NB_MAX 4096 51 + 49 52 const char *bin_name; 50 53 static int last_argc; 51 54 static char **last_argv; ··· 160 157 } 161 158 } 162 159 160 + /* Split command line into argument vector. */ 161 + static int make_args(char *line, char *n_argv[], int maxargs, int cmd_nb) 162 + { 163 + static const char ws[] = " \t\r\n"; 164 + char *cp = line; 165 + int n_argc = 0; 166 + 167 + while (*cp) { 168 + /* Skip leading whitespace. */ 169 + cp += strspn(cp, ws); 170 + 171 + if (*cp == '\0') 172 + break; 173 + 174 + if (n_argc >= (maxargs - 1)) { 175 + p_err("too many arguments to command %d", cmd_nb); 176 + return -1; 177 + } 178 + 179 + /* Word begins with quote. */ 180 + if (*cp == '\'' || *cp == '"') { 181 + char quote = *cp++; 182 + 183 + n_argv[n_argc++] = cp; 184 + /* Find ending quote. */ 185 + cp = strchr(cp, quote); 186 + if (!cp) { 187 + p_err("unterminated quoted string in command %d", 188 + cmd_nb); 189 + return -1; 190 + } 191 + } else { 192 + n_argv[n_argc++] = cp; 193 + 194 + /* Find end of word. */ 195 + cp += strcspn(cp, ws); 196 + if (*cp == '\0') 197 + break; 198 + } 199 + 200 + /* Separate words. */ 201 + *cp++ = 0; 202 + } 203 + n_argv[n_argc] = NULL; 204 + 205 + return n_argc; 206 + } 207 + 163 208 static int do_batch(int argc, char **argv); 164 209 165 210 static const struct cmd cmds[] = { ··· 222 171 223 172 static int do_batch(int argc, char **argv) 224 173 { 174 + char buf[BATCH_LINE_LEN_MAX], contline[BATCH_LINE_LEN_MAX]; 175 + char *n_argv[BATCH_ARG_NB_MAX]; 225 176 unsigned int lines = 0; 226 - char *n_argv[4096]; 227 - char buf[65536]; 228 177 int n_argc; 229 178 FILE *fp; 179 + char *cp; 230 180 int err; 231 181 int i; 232 182 ··· 243 191 } 244 192 NEXT_ARG(); 245 193 246 - fp = fopen(*argv, "r"); 194 + if (!strcmp(*argv, "-")) 195 + fp = stdin; 196 + else 197 + fp = fopen(*argv, "r"); 247 198 if (!fp) { 248 199 p_err("Can't open file (%s): %s", *argv, strerror(errno)); 249 200 return -1; ··· 255 200 if (json_output) 256 201 jsonw_start_array(json_wtr); 257 202 while (fgets(buf, sizeof(buf), fp)) { 203 + cp = strchr(buf, '#'); 204 + if (cp) 205 + *cp = '\0'; 206 + 258 207 if (strlen(buf) == sizeof(buf) - 1) { 259 208 errno = E2BIG; 260 209 break; 261 210 } 262 211 263 - n_argc = 0; 264 - n_argv[n_argc] = strtok(buf, " \t\n"); 265 - 266 - while (n_argv[n_argc]) { 267 - n_argc++; 268 - if (n_argc == ARRAY_SIZE(n_argv)) { 269 - p_err("line %d has too many arguments, skip", 212 + /* Append continuation lines if any (coming after a line ending 213 + * with '\' in the batch file). 214 + */ 215 + while ((cp = strstr(buf, "\\\n")) != NULL) { 216 + if (!fgets(contline, sizeof(contline), fp) || 217 + strlen(contline) == 0) { 218 + p_err("missing continuation line on command %d", 270 219 lines); 271 - n_argc = 0; 272 - break; 220 + err = -1; 221 + goto err_close; 273 222 } 274 - n_argv[n_argc] = strtok(NULL, " \t\n"); 223 + 224 + cp = strchr(contline, '#'); 225 + if (cp) 226 + *cp = '\0'; 227 + 228 + if (strlen(buf) + strlen(contline) + 1 > sizeof(buf)) { 229 + p_err("command %d is too long", lines); 230 + err = -1; 231 + goto err_close; 232 + } 233 + buf[strlen(buf) - 2] = '\0'; 234 + strcat(buf, contline); 275 235 } 276 236 237 + n_argc = make_args(buf, n_argv, BATCH_ARG_NB_MAX, lines); 277 238 if (!n_argc) 278 239 continue; 240 + if (n_argc < 0) 241 + goto err_close; 279 242 280 243 if (json_output) { 281 244 jsonw_start_object(json_wtr); ··· 320 247 p_err("reading batch file failed: %s", strerror(errno)); 321 248 err = -1; 322 249 } else { 323 - p_info("processed %d lines", lines); 250 + p_info("processed %d commands", lines); 324 251 err = 0; 325 252 } 326 253 err_close: 327 - fclose(fp); 254 + if (fp != stdin) 255 + fclose(fp); 328 256 329 257 if (json_output) 330 258 jsonw_end_array(json_wtr);
+29 -274
tools/bpf/bpftool/prog.c
··· 47 47 #include <bpf.h> 48 48 #include <libbpf.h> 49 49 50 + #include "cfg.h" 50 51 #include "main.h" 51 - #include "disasm.h" 52 + #include "xlated_dumper.h" 52 53 53 54 static const char * const prog_type_name[] = { 54 55 [BPF_PROG_TYPE_UNSPEC] = "unspec", ··· 408 407 return err; 409 408 } 410 409 411 - #define SYM_MAX_NAME 256 412 - 413 - struct kernel_sym { 414 - unsigned long address; 415 - char name[SYM_MAX_NAME]; 416 - }; 417 - 418 - struct dump_data { 419 - unsigned long address_call_base; 420 - struct kernel_sym *sym_mapping; 421 - __u32 sym_count; 422 - char scratch_buff[SYM_MAX_NAME]; 423 - }; 424 - 425 - static int kernel_syms_cmp(const void *sym_a, const void *sym_b) 426 - { 427 - return ((struct kernel_sym *)sym_a)->address - 428 - ((struct kernel_sym *)sym_b)->address; 429 - } 430 - 431 - static void kernel_syms_load(struct dump_data *dd) 432 - { 433 - struct kernel_sym *sym; 434 - char buff[256]; 435 - void *tmp, *address; 436 - FILE *fp; 437 - 438 - fp = fopen("/proc/kallsyms", "r"); 439 - if (!fp) 440 - return; 441 - 442 - while (!feof(fp)) { 443 - if (!fgets(buff, sizeof(buff), fp)) 444 - break; 445 - tmp = realloc(dd->sym_mapping, 446 - (dd->sym_count + 1) * 447 - sizeof(*dd->sym_mapping)); 448 - if (!tmp) { 449 - out: 450 - free(dd->sym_mapping); 451 - dd->sym_mapping = NULL; 452 - fclose(fp); 453 - return; 454 - } 455 - dd->sym_mapping = tmp; 456 - sym = &dd->sym_mapping[dd->sym_count]; 457 - if (sscanf(buff, "%p %*c %s", &address, sym->name) != 2) 458 - continue; 459 - sym->address = (unsigned long)address; 460 - if (!strcmp(sym->name, "__bpf_call_base")) { 461 - dd->address_call_base = sym->address; 462 - /* sysctl kernel.kptr_restrict was set */ 463 - if (!sym->address) 464 - goto out; 465 - } 466 - if (sym->address) 467 - dd->sym_count++; 468 - } 469 - 470 - fclose(fp); 471 - 472 - qsort(dd->sym_mapping, dd->sym_count, 473 - sizeof(*dd->sym_mapping), kernel_syms_cmp); 474 - } 475 - 476 - static void kernel_syms_destroy(struct dump_data *dd) 477 - { 478 - free(dd->sym_mapping); 479 - } 480 - 481 - static struct kernel_sym *kernel_syms_search(struct dump_data *dd, 482 - unsigned long key) 483 - { 484 - struct kernel_sym sym = { 485 - .address = key, 486 - }; 487 - 488 - return dd->sym_mapping ? 489 - bsearch(&sym, dd->sym_mapping, dd->sym_count, 490 - sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL; 491 - } 492 - 493 - static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...) 494 - { 495 - va_list args; 496 - 497 - va_start(args, fmt); 498 - vprintf(fmt, args); 499 - va_end(args); 500 - } 501 - 502 - static const char *print_call_pcrel(struct dump_data *dd, 503 - struct kernel_sym *sym, 504 - unsigned long address, 505 - const struct bpf_insn *insn) 506 - { 507 - if (sym) 508 - snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), 509 - "%+d#%s", insn->off, sym->name); 510 - else 511 - snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), 512 - "%+d#0x%lx", insn->off, address); 513 - return dd->scratch_buff; 514 - } 515 - 516 - static const char *print_call_helper(struct dump_data *dd, 517 - struct kernel_sym *sym, 518 - unsigned long address) 519 - { 520 - if (sym) 521 - snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), 522 - "%s", sym->name); 523 - else 524 - snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), 525 - "0x%lx", address); 526 - return dd->scratch_buff; 527 - } 528 - 529 - static const char *print_call(void *private_data, 530 - const struct bpf_insn *insn) 531 - { 532 - struct dump_data *dd = private_data; 533 - unsigned long address = dd->address_call_base + insn->imm; 534 - struct kernel_sym *sym; 535 - 536 - sym = kernel_syms_search(dd, address); 537 - if (insn->src_reg == BPF_PSEUDO_CALL) 538 - return print_call_pcrel(dd, sym, address, insn); 539 - else 540 - return print_call_helper(dd, sym, address); 541 - } 542 - 543 - static const char *print_imm(void *private_data, 544 - const struct bpf_insn *insn, 545 - __u64 full_imm) 546 - { 547 - struct dump_data *dd = private_data; 548 - 549 - if (insn->src_reg == BPF_PSEUDO_MAP_FD) 550 - snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), 551 - "map[id:%u]", insn->imm); 552 - else 553 - snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), 554 - "0x%llx", (unsigned long long)full_imm); 555 - return dd->scratch_buff; 556 - } 557 - 558 - static void dump_xlated_plain(struct dump_data *dd, void *buf, 559 - unsigned int len, bool opcodes) 560 - { 561 - const struct bpf_insn_cbs cbs = { 562 - .cb_print = print_insn, 563 - .cb_call = print_call, 564 - .cb_imm = print_imm, 565 - .private_data = dd, 566 - }; 567 - struct bpf_insn *insn = buf; 568 - bool double_insn = false; 569 - unsigned int i; 570 - 571 - for (i = 0; i < len / sizeof(*insn); i++) { 572 - if (double_insn) { 573 - double_insn = false; 574 - continue; 575 - } 576 - 577 - double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW); 578 - 579 - printf("% 4d: ", i); 580 - print_bpf_insn(&cbs, NULL, insn + i, true); 581 - 582 - if (opcodes) { 583 - printf(" "); 584 - fprint_hex(stdout, insn + i, 8, " "); 585 - if (double_insn && i < len - 1) { 586 - printf(" "); 587 - fprint_hex(stdout, insn + i + 1, 8, " "); 588 - } 589 - printf("\n"); 590 - } 591 - } 592 - } 593 - 594 - static void print_insn_json(struct bpf_verifier_env *env, const char *fmt, ...) 595 - { 596 - unsigned int l = strlen(fmt); 597 - char chomped_fmt[l]; 598 - va_list args; 599 - 600 - va_start(args, fmt); 601 - if (l > 0) { 602 - strncpy(chomped_fmt, fmt, l - 1); 603 - chomped_fmt[l - 1] = '\0'; 604 - } 605 - jsonw_vprintf_enquote(json_wtr, chomped_fmt, args); 606 - va_end(args); 607 - } 608 - 609 - static void dump_xlated_json(struct dump_data *dd, void *buf, 610 - unsigned int len, bool opcodes) 611 - { 612 - const struct bpf_insn_cbs cbs = { 613 - .cb_print = print_insn_json, 614 - .cb_call = print_call, 615 - .cb_imm = print_imm, 616 - .private_data = dd, 617 - }; 618 - struct bpf_insn *insn = buf; 619 - bool double_insn = false; 620 - unsigned int i; 621 - 622 - jsonw_start_array(json_wtr); 623 - for (i = 0; i < len / sizeof(*insn); i++) { 624 - if (double_insn) { 625 - double_insn = false; 626 - continue; 627 - } 628 - double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW); 629 - 630 - jsonw_start_object(json_wtr); 631 - jsonw_name(json_wtr, "disasm"); 632 - print_bpf_insn(&cbs, NULL, insn + i, true); 633 - 634 - if (opcodes) { 635 - jsonw_name(json_wtr, "opcodes"); 636 - jsonw_start_object(json_wtr); 637 - 638 - jsonw_name(json_wtr, "code"); 639 - jsonw_printf(json_wtr, "\"0x%02hhx\"", insn[i].code); 640 - 641 - jsonw_name(json_wtr, "src_reg"); 642 - jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].src_reg); 643 - 644 - jsonw_name(json_wtr, "dst_reg"); 645 - jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].dst_reg); 646 - 647 - jsonw_name(json_wtr, "off"); 648 - print_hex_data_json((uint8_t *)(&insn[i].off), 2); 649 - 650 - jsonw_name(json_wtr, "imm"); 651 - if (double_insn && i < len - 1) 652 - print_hex_data_json((uint8_t *)(&insn[i].imm), 653 - 12); 654 - else 655 - print_hex_data_json((uint8_t *)(&insn[i].imm), 656 - 4); 657 - jsonw_end_object(json_wtr); 658 - } 659 - jsonw_end_object(json_wtr); 660 - } 661 - jsonw_end_array(json_wtr); 662 - } 663 - 664 410 static int do_dump(int argc, char **argv) 665 411 { 666 412 struct bpf_prog_info info = {}; ··· 416 668 unsigned int buf_size; 417 669 char *filepath = NULL; 418 670 bool opcodes = false; 671 + bool visual = false; 419 672 unsigned char *buf; 420 673 __u32 *member_len; 421 674 __u64 *member_ptr; ··· 454 705 NEXT_ARG(); 455 706 } else if (is_prefix(*argv, "opcodes")) { 456 707 opcodes = true; 708 + NEXT_ARG(); 709 + } else if (is_prefix(*argv, "visual")) { 710 + visual = true; 457 711 NEXT_ARG(); 458 712 } 459 713 ··· 529 777 530 778 if (json_output) 531 779 jsonw_null(json_wtr); 532 - } else { 533 - if (member_len == &info.jited_prog_len) { 534 - const char *name = NULL; 780 + } else if (member_len == &info.jited_prog_len) { 781 + const char *name = NULL; 535 782 536 - if (info.ifindex) { 537 - name = ifindex_to_bfd_name_ns(info.ifindex, 538 - info.netns_dev, 539 - info.netns_ino); 540 - if (!name) 541 - goto err_free; 542 - } 543 - 544 - disasm_print_insn(buf, *member_len, opcodes, name); 545 - } else { 546 - kernel_syms_load(&dd); 547 - if (json_output) 548 - dump_xlated_json(&dd, buf, *member_len, opcodes); 549 - else 550 - dump_xlated_plain(&dd, buf, *member_len, opcodes); 551 - kernel_syms_destroy(&dd); 783 + if (info.ifindex) { 784 + name = ifindex_to_bfd_name_ns(info.ifindex, 785 + info.netns_dev, 786 + info.netns_ino); 787 + if (!name) 788 + goto err_free; 552 789 } 790 + 791 + disasm_print_insn(buf, *member_len, opcodes, name); 792 + } else if (visual) { 793 + if (json_output) 794 + jsonw_null(json_wtr); 795 + else 796 + dump_xlated_cfg(buf, *member_len); 797 + } else { 798 + kernel_syms_load(&dd); 799 + if (json_output) 800 + dump_xlated_json(&dd, buf, *member_len, opcodes); 801 + else 802 + dump_xlated_plain(&dd, buf, *member_len, opcodes); 803 + kernel_syms_destroy(&dd); 553 804 } 554 805 555 806 free(buf); ··· 606 851 607 852 fprintf(stderr, 608 853 "Usage: %s %s { show | list } [PROG]\n" 609 - " %s %s dump xlated PROG [{ file FILE | opcodes }]\n" 854 + " %s %s dump xlated PROG [{ file FILE | opcodes | visual }]\n" 610 855 " %s %s dump jited PROG [{ file FILE | opcodes }]\n" 611 856 " %s %s pin PROG FILE\n" 612 857 " %s %s load OBJ FILE\n"
+338
tools/bpf/bpftool/xlated_dumper.c
··· 1 + // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + /* 3 + * Copyright (C) 2018 Netronome Systems, Inc. 4 + * 5 + * This software is dual licensed under the GNU General License Version 2, 6 + * June 1991 as shown in the file COPYING in the top-level directory of this 7 + * source tree or the BSD 2-Clause License provided below. You have the 8 + * option to license this software under the complete terms of either license. 9 + * 10 + * The BSD 2-Clause License: 11 + * 12 + * Redistribution and use in source and binary forms, with or 13 + * without modification, are permitted provided that the following 14 + * conditions are met: 15 + * 16 + * 1. Redistributions of source code must retain the above 17 + * copyright notice, this list of conditions and the following 18 + * disclaimer. 19 + * 20 + * 2. Redistributions in binary form must reproduce the above 21 + * copyright notice, this list of conditions and the following 22 + * disclaimer in the documentation and/or other materials 23 + * provided with the distribution. 24 + * 25 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 26 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 29 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 + * POSSIBILITY OF SUCH DAMAGE. 36 + */ 37 + 38 + #include <stdarg.h> 39 + #include <stdio.h> 40 + #include <stdlib.h> 41 + #include <string.h> 42 + #include <sys/types.h> 43 + 44 + #include "disasm.h" 45 + #include "json_writer.h" 46 + #include "main.h" 47 + #include "xlated_dumper.h" 48 + 49 + static int kernel_syms_cmp(const void *sym_a, const void *sym_b) 50 + { 51 + return ((struct kernel_sym *)sym_a)->address - 52 + ((struct kernel_sym *)sym_b)->address; 53 + } 54 + 55 + void kernel_syms_load(struct dump_data *dd) 56 + { 57 + struct kernel_sym *sym; 58 + char buff[256]; 59 + void *tmp, *address; 60 + FILE *fp; 61 + 62 + fp = fopen("/proc/kallsyms", "r"); 63 + if (!fp) 64 + return; 65 + 66 + while (!feof(fp)) { 67 + if (!fgets(buff, sizeof(buff), fp)) 68 + break; 69 + tmp = realloc(dd->sym_mapping, 70 + (dd->sym_count + 1) * 71 + sizeof(*dd->sym_mapping)); 72 + if (!tmp) { 73 + out: 74 + free(dd->sym_mapping); 75 + dd->sym_mapping = NULL; 76 + fclose(fp); 77 + return; 78 + } 79 + dd->sym_mapping = tmp; 80 + sym = &dd->sym_mapping[dd->sym_count]; 81 + if (sscanf(buff, "%p %*c %s", &address, sym->name) != 2) 82 + continue; 83 + sym->address = (unsigned long)address; 84 + if (!strcmp(sym->name, "__bpf_call_base")) { 85 + dd->address_call_base = sym->address; 86 + /* sysctl kernel.kptr_restrict was set */ 87 + if (!sym->address) 88 + goto out; 89 + } 90 + if (sym->address) 91 + dd->sym_count++; 92 + } 93 + 94 + fclose(fp); 95 + 96 + qsort(dd->sym_mapping, dd->sym_count, 97 + sizeof(*dd->sym_mapping), kernel_syms_cmp); 98 + } 99 + 100 + void kernel_syms_destroy(struct dump_data *dd) 101 + { 102 + free(dd->sym_mapping); 103 + } 104 + 105 + static struct kernel_sym *kernel_syms_search(struct dump_data *dd, 106 + unsigned long key) 107 + { 108 + struct kernel_sym sym = { 109 + .address = key, 110 + }; 111 + 112 + return dd->sym_mapping ? 113 + bsearch(&sym, dd->sym_mapping, dd->sym_count, 114 + sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL; 115 + } 116 + 117 + static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...) 118 + { 119 + va_list args; 120 + 121 + va_start(args, fmt); 122 + vprintf(fmt, args); 123 + va_end(args); 124 + } 125 + 126 + static void 127 + print_insn_for_graph(struct bpf_verifier_env *env, const char *fmt, ...) 128 + { 129 + char buf[64], *p; 130 + va_list args; 131 + 132 + va_start(args, fmt); 133 + vsnprintf(buf, sizeof(buf), fmt, args); 134 + va_end(args); 135 + 136 + p = buf; 137 + while (*p != '\0') { 138 + if (*p == '\n') { 139 + memmove(p + 3, p, strlen(buf) + 1 - (p - buf)); 140 + /* Align each instruction dump row left. */ 141 + *p++ = '\\'; 142 + *p++ = 'l'; 143 + /* Output multiline concatenation. */ 144 + *p++ = '\\'; 145 + } else if (*p == '<' || *p == '>' || *p == '|' || *p == '&') { 146 + memmove(p + 1, p, strlen(buf) + 1 - (p - buf)); 147 + /* Escape special character. */ 148 + *p++ = '\\'; 149 + } 150 + 151 + p++; 152 + } 153 + 154 + printf("%s", buf); 155 + } 156 + 157 + static void print_insn_json(struct bpf_verifier_env *env, const char *fmt, ...) 158 + { 159 + unsigned int l = strlen(fmt); 160 + char chomped_fmt[l]; 161 + va_list args; 162 + 163 + va_start(args, fmt); 164 + if (l > 0) { 165 + strncpy(chomped_fmt, fmt, l - 1); 166 + chomped_fmt[l - 1] = '\0'; 167 + } 168 + jsonw_vprintf_enquote(json_wtr, chomped_fmt, args); 169 + va_end(args); 170 + } 171 + 172 + static const char *print_call_pcrel(struct dump_data *dd, 173 + struct kernel_sym *sym, 174 + unsigned long address, 175 + const struct bpf_insn *insn) 176 + { 177 + if (sym) 178 + snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), 179 + "%+d#%s", insn->off, sym->name); 180 + else 181 + snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), 182 + "%+d#0x%lx", insn->off, address); 183 + return dd->scratch_buff; 184 + } 185 + 186 + static const char *print_call_helper(struct dump_data *dd, 187 + struct kernel_sym *sym, 188 + unsigned long address) 189 + { 190 + if (sym) 191 + snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), 192 + "%s", sym->name); 193 + else 194 + snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), 195 + "0x%lx", address); 196 + return dd->scratch_buff; 197 + } 198 + 199 + static const char *print_call(void *private_data, 200 + const struct bpf_insn *insn) 201 + { 202 + struct dump_data *dd = private_data; 203 + unsigned long address = dd->address_call_base + insn->imm; 204 + struct kernel_sym *sym; 205 + 206 + sym = kernel_syms_search(dd, address); 207 + if (insn->src_reg == BPF_PSEUDO_CALL) 208 + return print_call_pcrel(dd, sym, address, insn); 209 + else 210 + return print_call_helper(dd, sym, address); 211 + } 212 + 213 + static const char *print_imm(void *private_data, 214 + const struct bpf_insn *insn, 215 + __u64 full_imm) 216 + { 217 + struct dump_data *dd = private_data; 218 + 219 + if (insn->src_reg == BPF_PSEUDO_MAP_FD) 220 + snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), 221 + "map[id:%u]", insn->imm); 222 + else 223 + snprintf(dd->scratch_buff, sizeof(dd->scratch_buff), 224 + "0x%llx", (unsigned long long)full_imm); 225 + return dd->scratch_buff; 226 + } 227 + 228 + void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len, 229 + bool opcodes) 230 + { 231 + const struct bpf_insn_cbs cbs = { 232 + .cb_print = print_insn_json, 233 + .cb_call = print_call, 234 + .cb_imm = print_imm, 235 + .private_data = dd, 236 + }; 237 + struct bpf_insn *insn = buf; 238 + bool double_insn = false; 239 + unsigned int i; 240 + 241 + jsonw_start_array(json_wtr); 242 + for (i = 0; i < len / sizeof(*insn); i++) { 243 + if (double_insn) { 244 + double_insn = false; 245 + continue; 246 + } 247 + double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW); 248 + 249 + jsonw_start_object(json_wtr); 250 + jsonw_name(json_wtr, "disasm"); 251 + print_bpf_insn(&cbs, NULL, insn + i, true); 252 + 253 + if (opcodes) { 254 + jsonw_name(json_wtr, "opcodes"); 255 + jsonw_start_object(json_wtr); 256 + 257 + jsonw_name(json_wtr, "code"); 258 + jsonw_printf(json_wtr, "\"0x%02hhx\"", insn[i].code); 259 + 260 + jsonw_name(json_wtr, "src_reg"); 261 + jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].src_reg); 262 + 263 + jsonw_name(json_wtr, "dst_reg"); 264 + jsonw_printf(json_wtr, "\"0x%hhx\"", insn[i].dst_reg); 265 + 266 + jsonw_name(json_wtr, "off"); 267 + print_hex_data_json((uint8_t *)(&insn[i].off), 2); 268 + 269 + jsonw_name(json_wtr, "imm"); 270 + if (double_insn && i < len - 1) 271 + print_hex_data_json((uint8_t *)(&insn[i].imm), 272 + 12); 273 + else 274 + print_hex_data_json((uint8_t *)(&insn[i].imm), 275 + 4); 276 + jsonw_end_object(json_wtr); 277 + } 278 + jsonw_end_object(json_wtr); 279 + } 280 + jsonw_end_array(json_wtr); 281 + } 282 + 283 + void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len, 284 + bool opcodes) 285 + { 286 + const struct bpf_insn_cbs cbs = { 287 + .cb_print = print_insn, 288 + .cb_call = print_call, 289 + .cb_imm = print_imm, 290 + .private_data = dd, 291 + }; 292 + struct bpf_insn *insn = buf; 293 + bool double_insn = false; 294 + unsigned int i; 295 + 296 + for (i = 0; i < len / sizeof(*insn); i++) { 297 + if (double_insn) { 298 + double_insn = false; 299 + continue; 300 + } 301 + 302 + double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW); 303 + 304 + printf("% 4d: ", i); 305 + print_bpf_insn(&cbs, NULL, insn + i, true); 306 + 307 + if (opcodes) { 308 + printf(" "); 309 + fprint_hex(stdout, insn + i, 8, " "); 310 + if (double_insn && i < len - 1) { 311 + printf(" "); 312 + fprint_hex(stdout, insn + i + 1, 8, " "); 313 + } 314 + printf("\n"); 315 + } 316 + } 317 + } 318 + 319 + void dump_xlated_for_graph(struct dump_data *dd, void *buf_start, void *buf_end, 320 + unsigned int start_idx) 321 + { 322 + const struct bpf_insn_cbs cbs = { 323 + .cb_print = print_insn_for_graph, 324 + .cb_call = print_call, 325 + .cb_imm = print_imm, 326 + .private_data = dd, 327 + }; 328 + struct bpf_insn *insn_start = buf_start; 329 + struct bpf_insn *insn_end = buf_end; 330 + struct bpf_insn *cur = insn_start; 331 + 332 + for (; cur <= insn_end; cur++) { 333 + printf("% 4d: ", (int)(cur - insn_start + start_idx)); 334 + print_bpf_insn(&cbs, NULL, cur, true); 335 + if (cur != insn_end) 336 + printf(" | "); 337 + } 338 + }
+64
tools/bpf/bpftool/xlated_dumper.h
··· 1 + // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + /* 3 + * Copyright (C) 2018 Netronome Systems, Inc. 4 + * 5 + * This software is dual licensed under the GNU General License Version 2, 6 + * June 1991 as shown in the file COPYING in the top-level directory of this 7 + * source tree or the BSD 2-Clause License provided below. You have the 8 + * option to license this software under the complete terms of either license. 9 + * 10 + * The BSD 2-Clause License: 11 + * 12 + * Redistribution and use in source and binary forms, with or 13 + * without modification, are permitted provided that the following 14 + * conditions are met: 15 + * 16 + * 1. Redistributions of source code must retain the above 17 + * copyright notice, this list of conditions and the following 18 + * disclaimer. 19 + * 20 + * 2. Redistributions in binary form must reproduce the above 21 + * copyright notice, this list of conditions and the following 22 + * disclaimer in the documentation and/or other materials 23 + * provided with the distribution. 24 + * 25 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 26 + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 29 + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 + * POSSIBILITY OF SUCH DAMAGE. 36 + */ 37 + 38 + #ifndef __BPF_TOOL_XLATED_DUMPER_H 39 + #define __BPF_TOOL_XLATED_DUMPER_H 40 + 41 + #define SYM_MAX_NAME 256 42 + 43 + struct kernel_sym { 44 + unsigned long address; 45 + char name[SYM_MAX_NAME]; 46 + }; 47 + 48 + struct dump_data { 49 + unsigned long address_call_base; 50 + struct kernel_sym *sym_mapping; 51 + __u32 sym_count; 52 + char scratch_buff[SYM_MAX_NAME]; 53 + }; 54 + 55 + void kernel_syms_load(struct dump_data *dd); 56 + void kernel_syms_destroy(struct dump_data *dd); 57 + void dump_xlated_json(struct dump_data *dd, void *buf, unsigned int len, 58 + bool opcodes); 59 + void dump_xlated_plain(struct dump_data *dd, void *buf, unsigned int len, 60 + bool opcodes); 61 + void dump_xlated_for_graph(struct dump_data *dd, void *buf, void *buf_end, 62 + unsigned int start_index); 63 + 64 + #endif
+28
tools/testing/selftests/bpf/bpf_rlimit.h
··· 1 + #include <sys/resource.h> 2 + #include <stdio.h> 3 + 4 + static __attribute__((constructor)) void bpf_rlimit_ctor(void) 5 + { 6 + struct rlimit rlim_old, rlim_new = { 7 + .rlim_cur = RLIM_INFINITY, 8 + .rlim_max = RLIM_INFINITY, 9 + }; 10 + 11 + getrlimit(RLIMIT_MEMLOCK, &rlim_old); 12 + /* For the sake of running the test cases, we temporarily 13 + * set rlimit to infinity in order for kernel to focus on 14 + * errors from actual test cases and not getting noise 15 + * from hitting memlock limits. The limit is on per-process 16 + * basis and not a global one, hence destructor not really 17 + * needed here. 18 + */ 19 + if (setrlimit(RLIMIT_MEMLOCK, &rlim_new) < 0) { 20 + perror("Unable to lift memlock rlimit"); 21 + /* Trying out lower limit, but expect potential test 22 + * case failures from this! 23 + */ 24 + rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20); 25 + rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20); 26 + setrlimit(RLIMIT_MEMLOCK, &rlim_new); 27 + } 28 + }
+1 -5
tools/testing/selftests/bpf/test_align.c
··· 9 9 #include <stddef.h> 10 10 #include <stdbool.h> 11 11 12 - #include <sys/resource.h> 13 - 14 12 #include <linux/unistd.h> 15 13 #include <linux/filter.h> 16 14 #include <linux/bpf_perf_event.h> ··· 17 19 #include <bpf/bpf.h> 18 20 19 21 #include "../../../include/linux/filter.h" 22 + #include "bpf_rlimit.h" 20 23 21 24 #ifndef ARRAY_SIZE 22 25 # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) ··· 701 702 int main(int argc, char **argv) 702 703 { 703 704 unsigned int from = 0, to = ARRAY_SIZE(tests); 704 - struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; 705 - 706 - setrlimit(RLIMIT_MEMLOCK, &rinf); 707 705 708 706 if (argc == 3) { 709 707 unsigned int l = atoi(argv[argc - 2]);
+1 -5
tools/testing/selftests/bpf/test_dev_cgroup.c
··· 11 11 #include <errno.h> 12 12 #include <assert.h> 13 13 #include <sys/time.h> 14 - #include <sys/resource.h> 15 14 16 15 #include <linux/bpf.h> 17 16 #include <bpf/bpf.h> 18 17 #include <bpf/libbpf.h> 19 18 20 19 #include "cgroup_helpers.h" 20 + #include "bpf_rlimit.h" 21 21 22 22 #define DEV_CGROUP_PROG "./dev_cgroup.o" 23 23 ··· 25 25 26 26 int main(int argc, char **argv) 27 27 { 28 - struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY }; 29 28 struct bpf_object *obj; 30 29 int error = EXIT_FAILURE; 31 30 int prog_fd, cgroup_fd; 32 31 __u32 prog_cnt; 33 - 34 - if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0) 35 - perror("Unable to lift memlock rlimit"); 36 32 37 33 if (bpf_prog_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE, 38 34 &obj, &prog_fd)) {
+3 -11
tools/testing/selftests/bpf/test_lpm_map.c
··· 22 22 #include <unistd.h> 23 23 #include <arpa/inet.h> 24 24 #include <sys/time.h> 25 - #include <sys/resource.h> 26 25 27 26 #include <bpf/bpf.h> 27 + 28 28 #include "bpf_util.h" 29 + #include "bpf_rlimit.h" 29 30 30 31 struct tlpm_node { 31 32 struct tlpm_node *next; ··· 737 736 738 737 int main(void) 739 738 { 740 - struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY }; 741 - int i, ret; 739 + int i; 742 740 743 741 /* we want predictable, pseudo random tests */ 744 742 srand(0xf00ba1); 745 - 746 - /* allow unlimited locked memory */ 747 - ret = setrlimit(RLIMIT_MEMLOCK, &limit); 748 - if (ret < 0) 749 - perror("Unable to lift memlock rlimit"); 750 743 751 744 test_lpm_basic(); 752 745 test_lpm_order(); ··· 750 755 test_lpm_map(i); 751 756 752 757 test_lpm_ipaddr(); 753 - 754 758 test_lpm_delete(); 755 - 756 759 test_lpm_get_next_key(); 757 - 758 760 test_lpm_multi_thread(); 759 761 760 762 printf("test_lpm: OK\n");
+2 -4
tools/testing/selftests/bpf/test_lru_map.c
··· 16 16 #include <time.h> 17 17 18 18 #include <sys/wait.h> 19 - #include <sys/resource.h> 20 19 21 20 #include <bpf/bpf.h> 21 + 22 22 #include "bpf_util.h" 23 + #include "bpf_rlimit.h" 23 24 24 25 #define LOCAL_FREE_TARGET (128) 25 26 #define PERCPU_FREE_TARGET (4) ··· 614 613 615 614 int main(int argc, char **argv) 616 615 { 617 - struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY}; 618 616 int map_types[] = {BPF_MAP_TYPE_LRU_HASH, 619 617 BPF_MAP_TYPE_LRU_PERCPU_HASH}; 620 618 int map_flags[] = {0, BPF_F_NO_COMMON_LRU}; 621 619 int t, f; 622 620 623 621 setbuf(stdout, NULL); 624 - 625 - assert(!setrlimit(RLIMIT_MEMLOCK, &r)); 626 622 627 623 nr_cpus = bpf_num_possible_cpus(); 628 624 assert(nr_cpus != -1);
+2 -5
tools/testing/selftests/bpf/test_maps.c
··· 17 17 #include <stdlib.h> 18 18 19 19 #include <sys/wait.h> 20 - #include <sys/resource.h> 21 20 22 21 #include <linux/bpf.h> 23 22 24 23 #include <bpf/bpf.h> 25 24 #include <bpf/libbpf.h> 25 + 26 26 #include "bpf_util.h" 27 + #include "bpf_rlimit.h" 27 28 28 29 static int map_flags; 29 30 ··· 1127 1126 1128 1127 int main(void) 1129 1128 { 1130 - struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; 1131 - 1132 - setrlimit(RLIMIT_MEMLOCK, &rinf); 1133 - 1134 1129 map_flags = 0; 1135 1130 run_all_tests(); 1136 1131
+2 -5
tools/testing/selftests/bpf/test_progs.c
··· 26 26 27 27 #include <sys/ioctl.h> 28 28 #include <sys/wait.h> 29 - #include <sys/resource.h> 30 29 #include <sys/types.h> 31 30 #include <fcntl.h> 32 31 ··· 33 34 #include <linux/err.h> 34 35 #include <bpf/bpf.h> 35 36 #include <bpf/libbpf.h> 37 + 36 38 #include "test_iptunnel_common.h" 37 39 #include "bpf_util.h" 38 40 #include "bpf_endian.h" 41 + #include "bpf_rlimit.h" 39 42 40 43 static int error_cnt, pass_cnt; 41 44 ··· 966 965 967 966 int main(void) 968 967 { 969 - struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; 970 - 971 - setrlimit(RLIMIT_MEMLOCK, &rinf); 972 - 973 968 test_pkt_access(); 974 969 test_xdp(); 975 970 test_l4lb_all();
+1 -3
tools/testing/selftests/bpf/test_tag.c
··· 12 12 #include <assert.h> 13 13 14 14 #include <sys/socket.h> 15 - #include <sys/resource.h> 16 15 17 16 #include <linux/filter.h> 18 17 #include <linux/bpf.h> ··· 20 21 #include <bpf/bpf.h> 21 22 22 23 #include "../../../include/linux/filter.h" 24 + #include "bpf_rlimit.h" 23 25 24 26 static struct bpf_insn prog[BPF_MAXINSNS]; 25 27 ··· 184 184 185 185 int main(void) 186 186 { 187 - struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; 188 187 uint32_t tests = 0; 189 188 int i, fd_map; 190 189 191 - setrlimit(RLIMIT_MEMLOCK, &rinf); 192 190 fd_map = bpf_create_map(BPF_MAP_TYPE_HASH, sizeof(int), 193 191 sizeof(int), 1, BPF_F_NO_PREALLOC); 194 192 assert(fd_map > 0);
+1 -5
tools/testing/selftests/bpf/test_tcpbpf_user.c
··· 12 12 #include <linux/bpf.h> 13 13 #include <sys/ioctl.h> 14 14 #include <sys/time.h> 15 - #include <sys/resource.h> 16 15 #include <sys/types.h> 17 16 #include <sys/stat.h> 18 17 #include <fcntl.h> 19 18 #include <bpf/bpf.h> 20 19 #include <bpf/libbpf.h> 21 20 #include "bpf_util.h" 21 + #include "bpf_rlimit.h" 22 22 #include <linux/perf_event.h> 23 23 #include "test_tcpbpf.h" 24 24 ··· 44 44 45 45 int main(int argc, char **argv) 46 46 { 47 - struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY }; 48 47 const char *file = "test_tcpbpf_kern.o"; 49 48 struct tcpbpf_globals g = {0}; 50 49 int cg_fd, prog_fd, map_fd; ··· 55 56 __u32 key = 0; 56 57 int pid; 57 58 int rv; 58 - 59 - if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0) 60 - perror("Unable to lift memlock rlimit"); 61 59 62 60 if (argc > 1 && strcmp(argv[1], "-d") == 0) 63 61 debug_flag = true;
+111 -12
tools/testing/selftests/bpf/test_verifier.c
··· 24 24 #include <limits.h> 25 25 26 26 #include <sys/capability.h> 27 - #include <sys/resource.h> 28 27 29 28 #include <linux/unistd.h> 30 29 #include <linux/filter.h> ··· 40 41 # define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 1 41 42 # endif 42 43 #endif 43 - 44 + #include "bpf_rlimit.h" 44 45 #include "../../../include/linux/filter.h" 45 46 46 47 #ifndef ARRAY_SIZE ··· 2589 2590 .result = ACCEPT, 2590 2591 }, 2591 2592 { 2593 + "runtime/jit: tail_call within bounds, prog once", 2594 + .insns = { 2595 + BPF_MOV64_IMM(BPF_REG_3, 0), 2596 + BPF_LD_MAP_FD(BPF_REG_2, 0), 2597 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2598 + BPF_FUNC_tail_call), 2599 + BPF_MOV64_IMM(BPF_REG_0, 1), 2600 + BPF_EXIT_INSN(), 2601 + }, 2602 + .fixup_prog = { 1 }, 2603 + .result = ACCEPT, 2604 + .retval = 42, 2605 + }, 2606 + { 2607 + "runtime/jit: tail_call within bounds, prog loop", 2608 + .insns = { 2609 + BPF_MOV64_IMM(BPF_REG_3, 1), 2610 + BPF_LD_MAP_FD(BPF_REG_2, 0), 2611 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2612 + BPF_FUNC_tail_call), 2613 + BPF_MOV64_IMM(BPF_REG_0, 1), 2614 + BPF_EXIT_INSN(), 2615 + }, 2616 + .fixup_prog = { 1 }, 2617 + .result = ACCEPT, 2618 + .retval = 41, 2619 + }, 2620 + { 2621 + "runtime/jit: tail_call within bounds, no prog", 2622 + .insns = { 2623 + BPF_MOV64_IMM(BPF_REG_3, 2), 2624 + BPF_LD_MAP_FD(BPF_REG_2, 0), 2625 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2626 + BPF_FUNC_tail_call), 2627 + BPF_MOV64_IMM(BPF_REG_0, 1), 2628 + BPF_EXIT_INSN(), 2629 + }, 2630 + .fixup_prog = { 1 }, 2631 + .result = ACCEPT, 2632 + .retval = 1, 2633 + }, 2634 + { 2635 + "runtime/jit: tail_call out of bounds", 2636 + .insns = { 2637 + BPF_MOV64_IMM(BPF_REG_3, 256), 2638 + BPF_LD_MAP_FD(BPF_REG_2, 0), 2639 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2640 + BPF_FUNC_tail_call), 2641 + BPF_MOV64_IMM(BPF_REG_0, 2), 2642 + BPF_EXIT_INSN(), 2643 + }, 2644 + .fixup_prog = { 1 }, 2645 + .result = ACCEPT, 2646 + .retval = 2, 2647 + }, 2648 + { 2592 2649 "runtime/jit: pass negative index to tail_call", 2593 2650 .insns = { 2594 2651 BPF_MOV64_IMM(BPF_REG_3, -1), 2595 2652 BPF_LD_MAP_FD(BPF_REG_2, 0), 2596 2653 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2597 2654 BPF_FUNC_tail_call), 2598 - BPF_MOV64_IMM(BPF_REG_0, 0), 2655 + BPF_MOV64_IMM(BPF_REG_0, 2), 2599 2656 BPF_EXIT_INSN(), 2600 2657 }, 2601 2658 .fixup_prog = { 1 }, 2602 2659 .result = ACCEPT, 2660 + .retval = 2, 2603 2661 }, 2604 2662 { 2605 2663 "runtime/jit: pass > 32bit index to tail_call", ··· 2665 2609 BPF_LD_MAP_FD(BPF_REG_2, 0), 2666 2610 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 2667 2611 BPF_FUNC_tail_call), 2668 - BPF_MOV64_IMM(BPF_REG_0, 0), 2612 + BPF_MOV64_IMM(BPF_REG_0, 2), 2669 2613 BPF_EXIT_INSN(), 2670 2614 }, 2671 2615 .fixup_prog = { 2 }, 2672 2616 .result = ACCEPT, 2617 + .retval = 42, 2673 2618 }, 2674 2619 { 2675 2620 "stack pointer arithmetic", ··· 11336 11279 return fd; 11337 11280 } 11338 11281 11282 + static int create_prog_dummy1(void) 11283 + { 11284 + struct bpf_insn prog[] = { 11285 + BPF_MOV64_IMM(BPF_REG_0, 42), 11286 + BPF_EXIT_INSN(), 11287 + }; 11288 + 11289 + return bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog, 11290 + ARRAY_SIZE(prog), "GPL", 0, NULL, 0); 11291 + } 11292 + 11293 + static int create_prog_dummy2(int mfd, int idx) 11294 + { 11295 + struct bpf_insn prog[] = { 11296 + BPF_MOV64_IMM(BPF_REG_3, idx), 11297 + BPF_LD_MAP_FD(BPF_REG_2, mfd), 11298 + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 11299 + BPF_FUNC_tail_call), 11300 + BPF_MOV64_IMM(BPF_REG_0, 41), 11301 + BPF_EXIT_INSN(), 11302 + }; 11303 + 11304 + return bpf_load_program(BPF_PROG_TYPE_SOCKET_FILTER, prog, 11305 + ARRAY_SIZE(prog), "GPL", 0, NULL, 0); 11306 + } 11307 + 11339 11308 static int create_prog_array(void) 11340 11309 { 11341 - int fd; 11310 + int p1key = 0, p2key = 1; 11311 + int mfd, p1fd, p2fd; 11342 11312 11343 - fd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int), 11344 - sizeof(int), 4, 0); 11345 - if (fd < 0) 11313 + mfd = bpf_create_map(BPF_MAP_TYPE_PROG_ARRAY, sizeof(int), 11314 + sizeof(int), 4, 0); 11315 + if (mfd < 0) { 11346 11316 printf("Failed to create prog array '%s'!\n", strerror(errno)); 11317 + return -1; 11318 + } 11347 11319 11348 - return fd; 11320 + p1fd = create_prog_dummy1(); 11321 + p2fd = create_prog_dummy2(mfd, p2key); 11322 + if (p1fd < 0 || p2fd < 0) 11323 + goto out; 11324 + if (bpf_map_update_elem(mfd, &p1key, &p1fd, BPF_ANY) < 0) 11325 + goto out; 11326 + if (bpf_map_update_elem(mfd, &p2key, &p2fd, BPF_ANY) < 0) 11327 + goto out; 11328 + close(p2fd); 11329 + close(p1fd); 11330 + 11331 + return mfd; 11332 + out: 11333 + close(p2fd); 11334 + close(p1fd); 11335 + close(mfd); 11336 + return -1; 11349 11337 } 11350 11338 11351 11339 static int create_map_in_map(void) ··· 11645 11543 11646 11544 int main(int argc, char **argv) 11647 11545 { 11648 - struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY }; 11649 - struct rlimit rlim = { 1 << 20, 1 << 20 }; 11650 11546 unsigned int from = 0, to = ARRAY_SIZE(tests); 11651 11547 bool unpriv = !is_admin(); 11652 11548 ··· 11672 11572 return EXIT_FAILURE; 11673 11573 } 11674 11574 11675 - setrlimit(RLIMIT_MEMLOCK, unpriv ? &rlim : &rinf); 11676 11575 return do_test(unpriv, from, to); 11677 11576 }
+2 -6
tools/testing/selftests/bpf/test_verifier_log.c
··· 4 4 #include <string.h> 5 5 #include <unistd.h> 6 6 #include <sys/time.h> 7 - #include <sys/resource.h> 8 7 9 8 #include <linux/bpf.h> 10 9 #include <linux/filter.h> 11 10 #include <linux/unistd.h> 12 11 13 12 #include <bpf/bpf.h> 13 + 14 + #include "bpf_rlimit.h" 14 15 15 16 #define LOG_SIZE (1 << 20) 16 17 ··· 134 133 135 134 int main(int argc, char **argv) 136 135 { 137 - struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY }; 138 136 char full_log[LOG_SIZE]; 139 137 char log[LOG_SIZE]; 140 138 size_t want_len; 141 139 int i; 142 - 143 - /* allow unlimited locked memory to have more consistent error code */ 144 - if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0) 145 - perror("Unable to lift memlock rlimit"); 146 140 147 141 memset(log, 1, LOG_SIZE); 148 142