Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf

Daniel Borkmann says:

====================
pull-request: bpf 2017-11-23

The following pull-request contains BPF updates for your *net* tree.

The main changes are:

1) Several BPF offloading fixes, from Jakub. Among others:

- Limit offload to cls_bpf and XDP program types only.
- Move device validation into the driver and don't make
any assumptions about the device in the classifier due
to shared blocks semantics.
- Don't pass offloaded XDP program into the driver when
it should be run in native XDP instead. Offloaded ones
are not JITed for the host in such cases.
- Don't destroy device offload state when moved to
another namespace.
- Revert dumping offload info into user space for now,
since ifindex alone is not sufficient. This will be
redone properly for bpf-next tree.

2) Fix test_verifier to avoid using bpf_probe_write_user()
helper in test cases, since it's dumping a warning into
kernel log which may confuse users when only running tests.
Switch to use bpf_trace_printk() instead, from Yonghong.

3) Several fixes for correcting ARG_CONST_SIZE_OR_ZERO semantics
before it becomes uabi, from Gianluca. More specifically:

- Add a type ARG_PTR_TO_MEM_OR_NULL that is used only
by bpf_csum_diff(), where the argument is either a
valid pointer or NULL. The subsequent ARG_CONST_SIZE_OR_ZERO
then enforces a valid pointer in case of non-0 size
or a valid pointer or NULL in case of size 0. Given
that, the semantics for ARG_PTR_TO_MEM in combination
with ARG_CONST_SIZE_OR_ZERO are now such that in case
of size 0, the pointer must always be valid and cannot
be NULL. This fix in semantics allows for bpf_probe_read()
to drop the recently added size == 0 check in the helper
that would become part of uabi otherwise once released.
At the same time we can then fix bpf_probe_read_str() and
bpf_perf_event_output() to use ARG_CONST_SIZE_OR_ZERO
instead of ARG_CONST_SIZE in order to fix recently
reported issues by Arnaldo et al, where LLVM optimizes
two boundary checks into a single one for unknown
variables where the verifier looses track of the variable
bounds and thus rejects valid programs otherwise.

4) A fix for the verifier for the case when it detects
comparison of two constants where the branch is guaranteed
to not be taken at runtime. Verifier will rightfully prune
the exploration of such paths, but we still pass the program
to JITs, where they would complain about using reserved
fields, etc. Track such dead instructions and sanitize
them with mov r0,r0. Rejection is not possible since LLVM
may generate them for valid C code and doesn't do as much
data flow analysis as verifier. For bpf-next we might
implement removal of such dead code and adjust branches
instead. Fix from Alexei.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+216 -152
+8 -2
drivers/net/ethernet/netronome/nfp/bpf/offload.c
··· 214 214 { 215 215 int err; 216 216 217 - if (prog && !prog->aux->offload) 218 - return -EINVAL; 217 + if (prog) { 218 + struct bpf_dev_offload *offload = prog->aux->offload; 219 + 220 + if (!offload) 221 + return -EINVAL; 222 + if (offload->netdev != nn->dp.netdev) 223 + return -EINVAL; 224 + } 219 225 220 226 if (prog && old_prog) { 221 227 u8 cap;
+9 -10
include/linux/bpf.h
··· 78 78 * functions that access data on eBPF program stack 79 79 */ 80 80 ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ 81 + ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */ 81 82 ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized, 82 83 * helper function must fill all bytes or clear 83 84 * them in error case. ··· 335 334 extern const struct bpf_verifier_ops xdp_analyzer_ops; 336 335 337 336 struct bpf_prog *bpf_prog_get(u32 ufd); 338 - struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type); 339 337 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 340 - struct net_device *netdev); 338 + bool attach_drv); 341 339 struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i); 342 340 void bpf_prog_sub(struct bpf_prog *prog, int i); 343 341 struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog); ··· 425 425 return ERR_PTR(-EOPNOTSUPP); 426 426 } 427 427 428 - static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, 429 - enum bpf_prog_type type) 430 - { 431 - return ERR_PTR(-EOPNOTSUPP); 432 - } 433 - 434 428 static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, 435 429 enum bpf_prog_type type, 436 - struct net_device *netdev) 430 + bool attach_drv) 437 431 { 438 432 return ERR_PTR(-EOPNOTSUPP); 439 433 } ··· 508 514 } 509 515 #endif /* CONFIG_BPF_SYSCALL */ 510 516 517 + static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, 518 + enum bpf_prog_type type) 519 + { 520 + return bpf_prog_get_type_dev(ufd, type, false); 521 + } 522 + 511 523 int bpf_prog_offload_compile(struct bpf_prog *prog); 512 524 void bpf_prog_offload_destroy(struct bpf_prog *prog); 513 - u32 bpf_prog_offload_ifindex(struct bpf_prog *prog); 514 525 515 526 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) 516 527 int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
+2 -2
include/linux/bpf_verifier.h
··· 115 115 struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ 116 116 }; 117 117 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ 118 - int converted_op_size; /* the valid value width after perceived conversion */ 118 + bool seen; /* this insn was processed by the verifier */ 119 119 }; 120 120 121 121 #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ ··· 171 171 #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) 172 172 int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env); 173 173 #else 174 - int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env) 174 + static inline int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env) 175 175 { 176 176 return -EOPNOTSUPP; 177 177 }
+1 -7
include/uapi/linux/bpf.h
··· 262 262 __u32 kern_version; /* checked when prog_type=kprobe */ 263 263 __u32 prog_flags; 264 264 char prog_name[BPF_OBJ_NAME_LEN]; 265 - __u32 prog_target_ifindex; /* ifindex of netdev to prep for */ 265 + __u32 prog_ifindex; /* ifindex of netdev to prep for */ 266 266 }; 267 267 268 268 struct { /* anonymous struct used by BPF_OBJ_* commands */ ··· 897 897 898 898 #define BPF_TAG_SIZE 8 899 899 900 - enum bpf_prog_status { 901 - BPF_PROG_STATUS_DEV_BOUND = (1 << 0), 902 - }; 903 - 904 900 struct bpf_prog_info { 905 901 __u32 type; 906 902 __u32 id; ··· 910 914 __u32 nr_map_ids; 911 915 __aligned_u64 map_ids; 912 916 char name[BPF_OBJ_NAME_LEN]; 913 - __u32 ifindex; 914 - __u32 status; 915 917 } __attribute__((aligned(8))); 916 918 917 919 struct bpf_map_info {
+12 -15
kernel/bpf/offload.c
··· 14 14 struct net *net = current->nsproxy->net_ns; 15 15 struct bpf_dev_offload *offload; 16 16 17 - if (!capable(CAP_SYS_ADMIN)) 18 - return -EPERM; 17 + if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS && 18 + attr->prog_type != BPF_PROG_TYPE_XDP) 19 + return -EINVAL; 19 20 20 21 if (attr->prog_flags) 21 22 return -EINVAL; ··· 29 28 init_waitqueue_head(&offload->verifier_done); 30 29 31 30 rtnl_lock(); 32 - offload->netdev = __dev_get_by_index(net, attr->prog_target_ifindex); 31 + offload->netdev = __dev_get_by_index(net, attr->prog_ifindex); 33 32 if (!offload->netdev) { 34 33 rtnl_unlock(); 35 34 kfree(offload); ··· 85 84 { 86 85 struct bpf_dev_offload *offload = prog->aux->offload; 87 86 struct netdev_bpf data = {}; 87 + 88 + /* Caution - if netdev is destroyed before the program, this function 89 + * will be called twice. 90 + */ 88 91 89 92 data.offload.prog = prog; 90 93 ··· 149 144 return bpf_prog_offload_translate(prog); 150 145 } 151 146 152 - u32 bpf_prog_offload_ifindex(struct bpf_prog *prog) 153 - { 154 - struct bpf_dev_offload *offload = prog->aux->offload; 155 - u32 ifindex; 156 - 157 - rtnl_lock(); 158 - ifindex = offload->netdev ? offload->netdev->ifindex : 0; 159 - rtnl_unlock(); 160 - 161 - return ifindex; 162 - } 163 - 164 147 const struct bpf_prog_ops bpf_offload_prog_ops = { 165 148 }; 166 149 ··· 162 169 163 170 switch (event) { 164 171 case NETDEV_UNREGISTER: 172 + /* ignore namespace changes */ 173 + if (netdev->reg_state != NETREG_UNREGISTERING) 174 + break; 175 + 165 176 list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs, 166 177 offloads) { 167 178 if (offload->netdev == netdev)
+13 -27
kernel/bpf/syscall.c
··· 1057 1057 } 1058 1058 EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero); 1059 1059 1060 - static bool bpf_prog_can_attach(struct bpf_prog *prog, 1061 - enum bpf_prog_type *attach_type, 1062 - struct net_device *netdev) 1060 + static bool bpf_prog_get_ok(struct bpf_prog *prog, 1061 + enum bpf_prog_type *attach_type, bool attach_drv) 1063 1062 { 1064 - struct bpf_dev_offload *offload = prog->aux->offload; 1063 + /* not an attachment, just a refcount inc, always allow */ 1064 + if (!attach_type) 1065 + return true; 1065 1066 1066 1067 if (prog->type != *attach_type) 1067 1068 return false; 1068 - if (offload && offload->netdev != netdev) 1069 + if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv) 1069 1070 return false; 1070 1071 1071 1072 return true; 1072 1073 } 1073 1074 1074 1075 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type, 1075 - struct net_device *netdev) 1076 + bool attach_drv) 1076 1077 { 1077 1078 struct fd f = fdget(ufd); 1078 1079 struct bpf_prog *prog; ··· 1081 1080 prog = ____bpf_prog_get(f); 1082 1081 if (IS_ERR(prog)) 1083 1082 return prog; 1084 - if (attach_type && !bpf_prog_can_attach(prog, attach_type, netdev)) { 1083 + if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) { 1085 1084 prog = ERR_PTR(-EINVAL); 1086 1085 goto out; 1087 1086 } ··· 1094 1093 1095 1094 struct bpf_prog *bpf_prog_get(u32 ufd) 1096 1095 { 1097 - return __bpf_prog_get(ufd, NULL, NULL); 1096 + return __bpf_prog_get(ufd, NULL, false); 1098 1097 } 1099 - 1100 - struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type) 1101 - { 1102 - struct bpf_prog *prog = __bpf_prog_get(ufd, &type, NULL); 1103 - 1104 - if (!IS_ERR(prog)) 1105 - trace_bpf_prog_get_type(prog); 1106 - return prog; 1107 - } 1108 - EXPORT_SYMBOL_GPL(bpf_prog_get_type); 1109 1098 1110 1099 struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, 1111 - struct net_device *netdev) 1100 + bool attach_drv) 1112 1101 { 1113 - struct bpf_prog *prog = __bpf_prog_get(ufd, &type, netdev); 1102 + struct bpf_prog *prog = __bpf_prog_get(ufd, &type, attach_drv); 1114 1103 1115 1104 if (!IS_ERR(prog)) 1116 1105 trace_bpf_prog_get_type(prog); ··· 1109 1118 EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev); 1110 1119 1111 1120 /* last field in 'union bpf_attr' used by this command */ 1112 - #define BPF_PROG_LOAD_LAST_FIELD prog_target_ifindex 1121 + #define BPF_PROG_LOAD_LAST_FIELD prog_ifindex 1113 1122 1114 1123 static int bpf_prog_load(union bpf_attr *attr) 1115 1124 { ··· 1172 1181 atomic_set(&prog->aux->refcnt, 1); 1173 1182 prog->gpl_compatible = is_gpl ? 1 : 0; 1174 1183 1175 - if (attr->prog_target_ifindex) { 1184 + if (attr->prog_ifindex) { 1176 1185 err = bpf_prog_offload_init(prog, attr); 1177 1186 if (err) 1178 1187 goto free_prog; ··· 1614 1623 ulen = min_t(u32, info.xlated_prog_len, ulen); 1615 1624 if (copy_to_user(uinsns, prog->insnsi, ulen)) 1616 1625 return -EFAULT; 1617 - } 1618 - 1619 - if (bpf_prog_is_dev_bound(prog->aux)) { 1620 - info.status |= BPF_PROG_STATUS_DEV_BOUND; 1621 - info.ifindex = bpf_prog_offload_ifindex(prog); 1622 1626 } 1623 1627 1624 1628 done:
+30 -1
kernel/bpf/verifier.c
··· 1384 1384 if (type != expected_type) 1385 1385 goto err_type; 1386 1386 } else if (arg_type == ARG_PTR_TO_MEM || 1387 + arg_type == ARG_PTR_TO_MEM_OR_NULL || 1387 1388 arg_type == ARG_PTR_TO_UNINIT_MEM) { 1388 1389 expected_type = PTR_TO_STACK; 1389 1390 /* One exception here. In case function allows for NULL to be 1390 1391 * passed in as argument, it's a SCALAR_VALUE type. Final test 1391 1392 * happens during stack boundary checking. 1392 1393 */ 1393 - if (register_is_null(*reg)) 1394 + if (register_is_null(*reg) && 1395 + arg_type == ARG_PTR_TO_MEM_OR_NULL) 1394 1396 /* final test in check_stack_boundary() */; 1395 1397 else if (!type_is_pkt_pointer(type) && 1396 1398 type != PTR_TO_MAP_VALUE && ··· 3827 3825 return err; 3828 3826 3829 3827 regs = cur_regs(env); 3828 + env->insn_aux_data[insn_idx].seen = true; 3830 3829 if (class == BPF_ALU || class == BPF_ALU64) { 3831 3830 err = check_alu_op(env, insn); 3832 3831 if (err) ··· 4023 4020 return err; 4024 4021 4025 4022 insn_idx++; 4023 + env->insn_aux_data[insn_idx].seen = true; 4026 4024 } else { 4027 4025 verbose(env, "invalid BPF_LD mode\n"); 4028 4026 return -EINVAL; ··· 4206 4202 u32 off, u32 cnt) 4207 4203 { 4208 4204 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; 4205 + int i; 4209 4206 4210 4207 if (cnt == 1) 4211 4208 return 0; ··· 4216 4211 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); 4217 4212 memcpy(new_data + off + cnt - 1, old_data + off, 4218 4213 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); 4214 + for (i = off; i < off + cnt - 1; i++) 4215 + new_data[i].seen = true; 4219 4216 env->insn_aux_data = new_data; 4220 4217 vfree(old_data); 4221 4218 return 0; ··· 4234 4227 if (adjust_insn_aux_data(env, new_prog->len, off, len)) 4235 4228 return NULL; 4236 4229 return new_prog; 4230 + } 4231 + 4232 + /* The verifier does more data flow analysis than llvm and will not explore 4233 + * branches that are dead at run time. Malicious programs can have dead code 4234 + * too. Therefore replace all dead at-run-time code with nops. 4235 + */ 4236 + static void sanitize_dead_code(struct bpf_verifier_env *env) 4237 + { 4238 + struct bpf_insn_aux_data *aux_data = env->insn_aux_data; 4239 + struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0); 4240 + struct bpf_insn *insn = env->prog->insnsi; 4241 + const int insn_cnt = env->prog->len; 4242 + int i; 4243 + 4244 + for (i = 0; i < insn_cnt; i++) { 4245 + if (aux_data[i].seen) 4246 + continue; 4247 + memcpy(insn + i, &nop, sizeof(nop)); 4248 + } 4237 4249 } 4238 4250 4239 4251 /* convert load instructions that access fields of 'struct __sk_buff' ··· 4580 4554 skip_full_check: 4581 4555 while (!pop_stack(env, NULL, NULL)); 4582 4556 free_states(env); 4557 + 4558 + if (ret == 0) 4559 + sanitize_dead_code(env); 4583 4560 4584 4561 if (ret == 0) 4585 4562 /* program is valid, convert *(u32*)(ctx + off) accesses */
+4 -8
kernel/trace/bpf_trace.c
··· 78 78 79 79 BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr) 80 80 { 81 - int ret = 0; 82 - 83 - if (unlikely(size == 0)) 84 - goto out; 81 + int ret; 85 82 86 83 ret = probe_kernel_read(dst, unsafe_ptr, size); 87 84 if (unlikely(ret < 0)) 88 85 memset(dst, 0, size); 89 86 90 - out: 91 87 return ret; 92 88 } 93 89 ··· 403 407 .arg2_type = ARG_CONST_MAP_PTR, 404 408 .arg3_type = ARG_ANYTHING, 405 409 .arg4_type = ARG_PTR_TO_MEM, 406 - .arg5_type = ARG_CONST_SIZE, 410 + .arg5_type = ARG_CONST_SIZE_OR_ZERO, 407 411 }; 408 412 409 413 static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs); ··· 494 498 .gpl_only = true, 495 499 .ret_type = RET_INTEGER, 496 500 .arg1_type = ARG_PTR_TO_UNINIT_MEM, 497 - .arg2_type = ARG_CONST_SIZE, 501 + .arg2_type = ARG_CONST_SIZE_OR_ZERO, 498 502 .arg3_type = ARG_ANYTHING, 499 503 }; 500 504 ··· 605 609 .arg2_type = ARG_CONST_MAP_PTR, 606 610 .arg3_type = ARG_ANYTHING, 607 611 .arg4_type = ARG_PTR_TO_MEM, 608 - .arg5_type = ARG_CONST_SIZE, 612 + .arg5_type = ARG_CONST_SIZE_OR_ZERO, 609 613 }; 610 614 611 615 BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
+9 -5
net/core/dev.c
··· 7140 7140 __dev_xdp_attached(dev, bpf_op, NULL)) 7141 7141 return -EBUSY; 7142 7142 7143 - if (bpf_op == ops->ndo_bpf) 7144 - prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP, 7145 - dev); 7146 - else 7147 - prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP); 7143 + prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP, 7144 + bpf_op == ops->ndo_bpf); 7148 7145 if (IS_ERR(prog)) 7149 7146 return PTR_ERR(prog); 7147 + 7148 + if (!(flags & XDP_FLAGS_HW_MODE) && 7149 + bpf_prog_is_dev_bound(prog->aux)) { 7150 + NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported"); 7151 + bpf_prog_put(prog); 7152 + return -EINVAL; 7153 + } 7150 7154 } 7151 7155 7152 7156 err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
+2 -2
net/core/filter.c
··· 1646 1646 .gpl_only = false, 1647 1647 .pkt_access = true, 1648 1648 .ret_type = RET_INTEGER, 1649 - .arg1_type = ARG_PTR_TO_MEM, 1649 + .arg1_type = ARG_PTR_TO_MEM_OR_NULL, 1650 1650 .arg2_type = ARG_CONST_SIZE_OR_ZERO, 1651 - .arg3_type = ARG_PTR_TO_MEM, 1651 + .arg3_type = ARG_PTR_TO_MEM_OR_NULL, 1652 1652 .arg4_type = ARG_CONST_SIZE_OR_ZERO, 1653 1653 .arg5_type = ARG_ANYTHING, 1654 1654 };
+3 -5
net/sched/cls_bpf.c
··· 382 382 { 383 383 struct bpf_prog *fp; 384 384 char *name = NULL; 385 + bool skip_sw; 385 386 u32 bpf_fd; 386 387 387 388 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]); 389 + skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW; 388 390 389 - if (gen_flags & TCA_CLS_FLAGS_SKIP_SW) 390 - fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, 391 - qdisc_dev(tp->q)); 392 - else 393 - fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS); 391 + fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw); 394 392 if (IS_ERR(fp)) 395 393 return PTR_ERR(fp); 396 394
-31
tools/bpf/bpftool/prog.c
··· 41 41 #include <string.h> 42 42 #include <time.h> 43 43 #include <unistd.h> 44 - #include <net/if.h> 45 44 #include <sys/types.h> 46 45 #include <sys/stat.h> 47 46 ··· 229 230 info->tag[0], info->tag[1], info->tag[2], info->tag[3], 230 231 info->tag[4], info->tag[5], info->tag[6], info->tag[7]); 231 232 232 - if (info->status & BPF_PROG_STATUS_DEV_BOUND) { 233 - jsonw_name(json_wtr, "dev"); 234 - if (info->ifindex) { 235 - char name[IF_NAMESIZE]; 236 - 237 - if (!if_indextoname(info->ifindex, name)) 238 - jsonw_printf(json_wtr, "\"ifindex:%d\"", 239 - info->ifindex); 240 - else 241 - jsonw_printf(json_wtr, "\"%s\"", name); 242 - } else { 243 - jsonw_printf(json_wtr, "\"unknown\""); 244 - } 245 - } 246 - 247 233 if (info->load_time) { 248 234 char buf[32]; 249 235 ··· 286 302 287 303 printf("tag "); 288 304 fprint_hex(stdout, info->tag, BPF_TAG_SIZE, ""); 289 - printf(" "); 290 - 291 - if (info->status & BPF_PROG_STATUS_DEV_BOUND) { 292 - printf("dev "); 293 - if (info->ifindex) { 294 - char name[IF_NAMESIZE]; 295 - 296 - if (!if_indextoname(info->ifindex, name)) 297 - printf("ifindex:%d ", info->ifindex); 298 - else 299 - printf("%s ", name); 300 - } else { 301 - printf("unknown "); 302 - } 303 - } 304 305 printf("\n"); 305 306 306 307 if (info->load_time) {
+1 -7
tools/include/uapi/linux/bpf.h
··· 262 262 __u32 kern_version; /* checked when prog_type=kprobe */ 263 263 __u32 prog_flags; 264 264 char prog_name[BPF_OBJ_NAME_LEN]; 265 - __u32 prog_target_ifindex; /* ifindex of netdev to prep for */ 265 + __u32 prog_ifindex; /* ifindex of netdev to prep for */ 266 266 }; 267 267 268 268 struct { /* anonymous struct used by BPF_OBJ_* commands */ ··· 897 897 898 898 #define BPF_TAG_SIZE 8 899 899 900 - enum bpf_prog_status { 901 - BPF_PROG_STATUS_DEV_BOUND = (1 << 0), 902 - }; 903 - 904 900 struct bpf_prog_info { 905 901 __u32 type; 906 902 __u32 id; ··· 910 914 __u32 nr_map_ids; 911 915 __aligned_u64 map_ids; 912 916 char name[BPF_OBJ_NAME_LEN]; 913 - __u32 ifindex; 914 - __u32 status; 915 917 } __attribute__((aligned(8))); 916 918 917 919 struct bpf_map_info {
+122 -30
tools/testing/selftests/bpf/test_verifier.c
··· 4377 4377 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4378 4378 BPF_LD_MAP_FD(BPF_REG_1, 0), 4379 4379 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4380 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 4381 - BPF_MOV64_IMM(BPF_REG_1, 0), 4382 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_0), 4383 - BPF_MOV64_IMM(BPF_REG_3, 0), 4384 - BPF_EMIT_CALL(BPF_FUNC_probe_write_user), 4380 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), 4381 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4382 + BPF_MOV64_IMM(BPF_REG_2, 0), 4383 + BPF_EMIT_CALL(BPF_FUNC_trace_printk), 4385 4384 BPF_EXIT_INSN(), 4386 4385 }, 4387 4386 .fixup_map2 = { 3 }, ··· 4480 4481 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4481 4482 BPF_LD_MAP_FD(BPF_REG_1, 0), 4482 4483 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4483 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 4484 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 4484 4485 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4485 4486 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 4486 4487 offsetof(struct test_val, foo)), 4487 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), 4488 - BPF_MOV64_IMM(BPF_REG_1, 0), 4489 - BPF_MOV64_IMM(BPF_REG_3, 0), 4490 - BPF_EMIT_CALL(BPF_FUNC_probe_write_user), 4488 + BPF_MOV64_IMM(BPF_REG_2, 0), 4489 + BPF_EMIT_CALL(BPF_FUNC_trace_printk), 4491 4490 BPF_EXIT_INSN(), 4492 4491 }, 4493 4492 .fixup_map2 = { 3 }, ··· 4615 4618 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4616 4619 BPF_LD_MAP_FD(BPF_REG_1, 0), 4617 4620 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4618 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 4621 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 4619 4622 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4620 4623 BPF_MOV64_IMM(BPF_REG_3, 0), 4621 4624 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4622 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), 4623 - BPF_MOV64_IMM(BPF_REG_1, 0), 4624 - BPF_MOV64_IMM(BPF_REG_3, 0), 4625 - BPF_EMIT_CALL(BPF_FUNC_probe_write_user), 4625 + BPF_MOV64_IMM(BPF_REG_2, 0), 4626 + BPF_EMIT_CALL(BPF_FUNC_trace_printk), 4626 4627 BPF_EXIT_INSN(), 4627 4628 }, 4628 4629 .fixup_map2 = { 3 }, 4629 - .errstr = "R2 min value is outside of the array range", 4630 + .errstr = "R1 min value is outside of the array range", 4630 4631 .result = REJECT, 4631 4632 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4632 4633 }, ··· 4755 4760 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0), 4756 4761 BPF_LD_MAP_FD(BPF_REG_1, 0), 4757 4762 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 4758 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7), 4763 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 4759 4764 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 4760 4765 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 4761 4766 BPF_JMP_IMM(BPF_JGT, BPF_REG_3, 4762 - offsetof(struct test_val, foo), 4), 4767 + offsetof(struct test_val, foo), 3), 4763 4768 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3), 4764 - BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), 4765 - BPF_MOV64_IMM(BPF_REG_1, 0), 4766 - BPF_MOV64_IMM(BPF_REG_3, 0), 4767 - BPF_EMIT_CALL(BPF_FUNC_probe_write_user), 4769 + BPF_MOV64_IMM(BPF_REG_2, 0), 4770 + BPF_EMIT_CALL(BPF_FUNC_trace_printk), 4768 4771 BPF_EXIT_INSN(), 4769 4772 }, 4770 4773 .fixup_map2 = { 3 }, 4771 - .errstr = "R2 min value is outside of the array range", 4774 + .errstr = "R1 min value is outside of the array range", 4772 4775 .result = REJECT, 4773 4776 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 4774 4777 }, ··· 5631 5638 .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5632 5639 }, 5633 5640 { 5634 - "helper access to variable memory: size = 0 allowed on NULL", 5641 + "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)", 5635 5642 .insns = { 5636 5643 BPF_MOV64_IMM(BPF_REG_1, 0), 5637 5644 BPF_MOV64_IMM(BPF_REG_2, 0), ··· 5645 5652 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5646 5653 }, 5647 5654 { 5648 - "helper access to variable memory: size > 0 not allowed on NULL", 5655 + "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)", 5649 5656 .insns = { 5650 5657 BPF_MOV64_IMM(BPF_REG_1, 0), 5651 5658 BPF_MOV64_IMM(BPF_REG_2, 0), ··· 5663 5670 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5664 5671 }, 5665 5672 { 5666 - "helper access to variable memory: size = 0 allowed on != NULL stack pointer", 5673 + "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)", 5667 5674 .insns = { 5668 5675 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5669 5676 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), ··· 5680 5687 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5681 5688 }, 5682 5689 { 5683 - "helper access to variable memory: size = 0 allowed on != NULL map pointer", 5690 + "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)", 5684 5691 .insns = { 5685 5692 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5686 5693 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), ··· 5702 5709 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5703 5710 }, 5704 5711 { 5705 - "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer", 5712 + "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)", 5706 5713 .insns = { 5707 5714 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5708 5715 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), ··· 5727 5734 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5728 5735 }, 5729 5736 { 5730 - "helper access to variable memory: size possible = 0 allowed on != NULL map pointer", 5737 + "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)", 5731 5738 .insns = { 5732 5739 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5733 5740 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), ··· 5750 5757 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5751 5758 }, 5752 5759 { 5753 - "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer", 5760 + "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)", 5754 5761 .insns = { 5755 5762 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, 5756 5763 offsetof(struct __sk_buff, data)), ··· 5770 5777 }, 5771 5778 .result = ACCEPT, 5772 5779 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 5780 + }, 5781 + { 5782 + "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)", 5783 + .insns = { 5784 + BPF_MOV64_IMM(BPF_REG_1, 0), 5785 + BPF_MOV64_IMM(BPF_REG_2, 0), 5786 + BPF_MOV64_IMM(BPF_REG_3, 0), 5787 + BPF_EMIT_CALL(BPF_FUNC_probe_read), 5788 + BPF_EXIT_INSN(), 5789 + }, 5790 + .errstr = "R1 type=inv expected=fp", 5791 + .result = REJECT, 5792 + .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5793 + }, 5794 + { 5795 + "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)", 5796 + .insns = { 5797 + BPF_MOV64_IMM(BPF_REG_1, 0), 5798 + BPF_MOV64_IMM(BPF_REG_2, 1), 5799 + BPF_MOV64_IMM(BPF_REG_3, 0), 5800 + BPF_EMIT_CALL(BPF_FUNC_probe_read), 5801 + BPF_EXIT_INSN(), 5802 + }, 5803 + .errstr = "R1 type=inv expected=fp", 5804 + .result = REJECT, 5805 + .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5806 + }, 5807 + { 5808 + "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)", 5809 + .insns = { 5810 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5811 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 5812 + BPF_MOV64_IMM(BPF_REG_2, 0), 5813 + BPF_MOV64_IMM(BPF_REG_3, 0), 5814 + BPF_EMIT_CALL(BPF_FUNC_probe_read), 5815 + BPF_EXIT_INSN(), 5816 + }, 5817 + .result = ACCEPT, 5818 + .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5819 + }, 5820 + { 5821 + "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)", 5822 + .insns = { 5823 + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5824 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5825 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5826 + BPF_LD_MAP_FD(BPF_REG_1, 0), 5827 + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 5828 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 5829 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 5830 + BPF_MOV64_IMM(BPF_REG_2, 0), 5831 + BPF_MOV64_IMM(BPF_REG_3, 0), 5832 + BPF_EMIT_CALL(BPF_FUNC_probe_read), 5833 + BPF_EXIT_INSN(), 5834 + }, 5835 + .fixup_map1 = { 3 }, 5836 + .result = ACCEPT, 5837 + .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5838 + }, 5839 + { 5840 + "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)", 5841 + .insns = { 5842 + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5843 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5844 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5845 + BPF_LD_MAP_FD(BPF_REG_1, 0), 5846 + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 5847 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6), 5848 + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), 5849 + BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4), 5850 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), 5851 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), 5852 + BPF_MOV64_IMM(BPF_REG_3, 0), 5853 + BPF_EMIT_CALL(BPF_FUNC_probe_read), 5854 + BPF_EXIT_INSN(), 5855 + }, 5856 + .fixup_map1 = { 3 }, 5857 + .result = ACCEPT, 5858 + .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5859 + }, 5860 + { 5861 + "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)", 5862 + .insns = { 5863 + BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 5864 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 5865 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 5866 + BPF_LD_MAP_FD(BPF_REG_1, 0), 5867 + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 5868 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5), 5869 + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), 5870 + BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), 5871 + BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2), 5872 + BPF_MOV64_IMM(BPF_REG_3, 0), 5873 + BPF_EMIT_CALL(BPF_FUNC_probe_read), 5874 + BPF_EXIT_INSN(), 5875 + }, 5876 + .fixup_map1 = { 3 }, 5877 + .result = ACCEPT, 5878 + .prog_type = BPF_PROG_TYPE_TRACEPOINT, 5773 5879 }, 5774 5880 { 5775 5881 "helper access to variable memory: 8 bytes leak",