Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'misc-improvements'

Daniel Borkmann says:

====================
Last batch of misc patches I had in queue: first one removes some left-over
bits from ULP, second is a fix in the verifier where we wrongly use register
number as type to fetch the string for the dump, third disables xadd on flow
keys and subsequent one removes the flow key type from check_helper_mem_access()
as they cannot be passed into any helper as of today. Next one lets map push,
pop, peek avoid having to go through retpoline, and last one has a couple of
minor fixes and cleanups for the ring buffer walk.
====================

Signed-off-by: Alexei Starovoitov <ast@kernel.org>

+96 -73
-7
include/net/tcp.h
··· 2051 2051 #define TCP_ULP_MAX 128 2052 2052 #define TCP_ULP_BUF_MAX (TCP_ULP_NAME_MAX*TCP_ULP_MAX) 2053 2053 2054 - enum { 2055 - TCP_ULP_TLS, 2056 - TCP_ULP_BPF, 2057 - }; 2058 - 2059 2054 struct tcp_ulp_ops { 2060 2055 struct list_head list; 2061 2056 ··· 2059 2064 /* cleanup ulp */ 2060 2065 void (*release)(struct sock *sk); 2061 2066 2062 - int uid; 2063 2067 char name[TCP_ULP_NAME_MAX]; 2064 - bool user_visible; 2065 2068 struct module *owner; 2066 2069 }; 2067 2070 int tcp_register_ulp(struct tcp_ulp_ops *type);
+47 -10
kernel/bpf/verifier.c
··· 1528 1528 return reg->type != SCALAR_VALUE; 1529 1529 } 1530 1530 1531 + static struct bpf_reg_state *reg_state(struct bpf_verifier_env *env, int regno) 1532 + { 1533 + return cur_regs(env) + regno; 1534 + } 1535 + 1531 1536 static bool is_pointer_value(struct bpf_verifier_env *env, int regno) 1532 1537 { 1533 - return __is_pointer_value(env->allow_ptr_leaks, cur_regs(env) + regno); 1538 + return __is_pointer_value(env->allow_ptr_leaks, reg_state(env, regno)); 1534 1539 } 1535 1540 1536 1541 static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) 1537 1542 { 1538 - const struct bpf_reg_state *reg = cur_regs(env) + regno; 1543 + const struct bpf_reg_state *reg = reg_state(env, regno); 1539 1544 1540 1545 return reg->type == PTR_TO_CTX || 1541 1546 reg->type == PTR_TO_SOCKET; ··· 1548 1543 1549 1544 static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) 1550 1545 { 1551 - const struct bpf_reg_state *reg = cur_regs(env) + regno; 1546 + const struct bpf_reg_state *reg = reg_state(env, regno); 1552 1547 1553 1548 return type_is_pkt_pointer(reg->type); 1549 + } 1550 + 1551 + static bool is_flow_key_reg(struct bpf_verifier_env *env, int regno) 1552 + { 1553 + const struct bpf_reg_state *reg = reg_state(env, regno); 1554 + 1555 + /* Separate to is_ctx_reg() since we still want to allow BPF_ST here. */ 1556 + return reg->type == PTR_TO_FLOW_KEYS; 1554 1557 } 1555 1558 1556 1559 static int check_pkt_ptr_alignment(struct bpf_verifier_env *env, ··· 1969 1956 } 1970 1957 1971 1958 if (is_ctx_reg(env, insn->dst_reg) || 1972 - is_pkt_reg(env, insn->dst_reg)) { 1959 + is_pkt_reg(env, insn->dst_reg) || 1960 + is_flow_key_reg(env, insn->dst_reg)) { 1973 1961 verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", 1974 - insn->dst_reg, reg_type_str[insn->dst_reg]); 1962 + insn->dst_reg, 1963 + reg_type_str[reg_state(env, insn->dst_reg)->type]); 1975 1964 return -EACCES; 1976 1965 } 1977 1966 ··· 1998 1983 int access_size, bool zero_size_allowed, 1999 1984 struct bpf_call_arg_meta *meta) 2000 1985 { 2001 - struct bpf_reg_state *reg = cur_regs(env) + regno; 1986 + struct bpf_reg_state *reg = reg_state(env, regno); 2002 1987 struct bpf_func_state *state = func(env, reg); 2003 1988 int off, i, slot, spi; 2004 1989 ··· 2077 2062 case PTR_TO_PACKET_META: 2078 2063 return check_packet_access(env, regno, reg->off, access_size, 2079 2064 zero_size_allowed); 2080 - case PTR_TO_FLOW_KEYS: 2081 - return check_flow_keys_access(env, reg->off, access_size); 2082 2065 case PTR_TO_MAP_VALUE: 2083 2066 return check_map_access(env, regno, reg->off, access_size, 2084 2067 zero_size_allowed); ··· 5277 5264 5278 5265 if (is_ctx_reg(env, insn->dst_reg)) { 5279 5266 verbose(env, "BPF_ST stores into R%d %s is not allowed\n", 5280 - insn->dst_reg, reg_type_str[insn->dst_reg]); 5267 + insn->dst_reg, 5268 + reg_type_str[reg_state(env, insn->dst_reg)->type]); 5281 5269 return -EACCES; 5282 5270 } 5283 5271 ··· 6178 6164 if (prog->jit_requested && BITS_PER_LONG == 64 && 6179 6165 (insn->imm == BPF_FUNC_map_lookup_elem || 6180 6166 insn->imm == BPF_FUNC_map_update_elem || 6181 - insn->imm == BPF_FUNC_map_delete_elem)) { 6167 + insn->imm == BPF_FUNC_map_delete_elem || 6168 + insn->imm == BPF_FUNC_map_push_elem || 6169 + insn->imm == BPF_FUNC_map_pop_elem || 6170 + insn->imm == BPF_FUNC_map_peek_elem)) { 6182 6171 aux = &env->insn_aux_data[i + delta]; 6183 6172 if (bpf_map_ptr_poisoned(aux)) 6184 6173 goto patch_call_imm; ··· 6214 6197 BUILD_BUG_ON(!__same_type(ops->map_update_elem, 6215 6198 (int (*)(struct bpf_map *map, void *key, void *value, 6216 6199 u64 flags))NULL)); 6200 + BUILD_BUG_ON(!__same_type(ops->map_push_elem, 6201 + (int (*)(struct bpf_map *map, void *value, 6202 + u64 flags))NULL)); 6203 + BUILD_BUG_ON(!__same_type(ops->map_pop_elem, 6204 + (int (*)(struct bpf_map *map, void *value))NULL)); 6205 + BUILD_BUG_ON(!__same_type(ops->map_peek_elem, 6206 + (int (*)(struct bpf_map *map, void *value))NULL)); 6207 + 6217 6208 switch (insn->imm) { 6218 6209 case BPF_FUNC_map_lookup_elem: 6219 6210 insn->imm = BPF_CAST_CALL(ops->map_lookup_elem) - ··· 6233 6208 continue; 6234 6209 case BPF_FUNC_map_delete_elem: 6235 6210 insn->imm = BPF_CAST_CALL(ops->map_delete_elem) - 6211 + __bpf_call_base; 6212 + continue; 6213 + case BPF_FUNC_map_push_elem: 6214 + insn->imm = BPF_CAST_CALL(ops->map_push_elem) - 6215 + __bpf_call_base; 6216 + continue; 6217 + case BPF_FUNC_map_pop_elem: 6218 + insn->imm = BPF_CAST_CALL(ops->map_pop_elem) - 6219 + __bpf_call_base; 6220 + continue; 6221 + case BPF_FUNC_map_peek_elem: 6222 + insn->imm = BPF_CAST_CALL(ops->map_peek_elem) - 6236 6223 __bpf_call_base; 6237 6224 continue; 6238 6225 }
-2
net/tls/tls_main.c
··· 715 715 716 716 static struct tcp_ulp_ops tcp_tls_ulp_ops __read_mostly = { 717 717 .name = "tls", 718 - .uid = TCP_ULP_TLS, 719 - .user_visible = true, 720 718 .owner = THIS_MODULE, 721 719 .init = tls_init, 722 720 };
+6 -4
tools/bpf/bpftool/map_perf_ring.c
··· 50 50 stop = true; 51 51 } 52 52 53 - static enum bpf_perf_event_ret print_bpf_output(void *event, void *priv) 53 + static enum bpf_perf_event_ret 54 + print_bpf_output(struct perf_event_header *event, void *private_data) 54 55 { 55 - struct event_ring_info *ring = priv; 56 - struct perf_event_sample *e = event; 56 + struct perf_event_sample *e = container_of(event, struct perf_event_sample, 57 + header); 58 + struct event_ring_info *ring = private_data; 57 59 struct { 58 60 struct perf_event_header header; 59 61 __u64 id; 60 62 __u64 lost; 61 - } *lost = event; 63 + } *lost = (typeof(lost))event; 62 64 63 65 if (json_output) { 64 66 jsonw_start_object(json_wtr);
+26 -35
tools/lib/bpf/libbpf.c
··· 2415 2415 } 2416 2416 2417 2417 enum bpf_perf_event_ret 2418 - bpf_perf_event_read_simple(void *mem, unsigned long size, 2419 - unsigned long page_size, void **buf, size_t *buf_len, 2420 - bpf_perf_event_print_t fn, void *priv) 2418 + bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, 2419 + void **copy_mem, size_t *copy_size, 2420 + bpf_perf_event_print_t fn, void *private_data) 2421 2421 { 2422 - struct perf_event_mmap_page *header = mem; 2422 + struct perf_event_mmap_page *header = mmap_mem; 2423 2423 __u64 data_head = ring_buffer_read_head(header); 2424 2424 __u64 data_tail = header->data_tail; 2425 - int ret = LIBBPF_PERF_EVENT_ERROR; 2426 - void *base, *begin, *end; 2425 + void *base = ((__u8 *)header) + page_size; 2426 + int ret = LIBBPF_PERF_EVENT_CONT; 2427 + struct perf_event_header *ehdr; 2428 + size_t ehdr_size; 2427 2429 2428 - if (data_head == data_tail) 2429 - return LIBBPF_PERF_EVENT_CONT; 2430 + while (data_head != data_tail) { 2431 + ehdr = base + (data_tail & (mmap_size - 1)); 2432 + ehdr_size = ehdr->size; 2430 2433 2431 - base = ((char *)header) + page_size; 2434 + if (((void *)ehdr) + ehdr_size > base + mmap_size) { 2435 + void *copy_start = ehdr; 2436 + size_t len_first = base + mmap_size - copy_start; 2437 + size_t len_secnd = ehdr_size - len_first; 2432 2438 2433 - begin = base + data_tail % size; 2434 - end = base + data_head % size; 2435 - 2436 - while (begin != end) { 2437 - struct perf_event_header *ehdr; 2438 - 2439 - ehdr = begin; 2440 - if (begin + ehdr->size > base + size) { 2441 - long len = base + size - begin; 2442 - 2443 - if (*buf_len < ehdr->size) { 2444 - free(*buf); 2445 - *buf = malloc(ehdr->size); 2446 - if (!*buf) { 2439 + if (*copy_size < ehdr_size) { 2440 + free(*copy_mem); 2441 + *copy_mem = malloc(ehdr_size); 2442 + if (!*copy_mem) { 2443 + *copy_size = 0; 2447 2444 ret = LIBBPF_PERF_EVENT_ERROR; 2448 2445 break; 2449 2446 } 2450 - *buf_len = ehdr->size; 2447 + *copy_size = ehdr_size; 2451 2448 } 2452 2449 2453 - memcpy(*buf, begin, len); 2454 - memcpy(*buf + len, base, ehdr->size - len); 2455 - ehdr = (void *)*buf; 2456 - begin = base + ehdr->size - len; 2457 - } else if (begin + ehdr->size == base + size) { 2458 - begin = base; 2459 - } else { 2460 - begin += ehdr->size; 2450 + memcpy(*copy_mem, copy_start, len_first); 2451 + memcpy(*copy_mem + len_first, base, len_secnd); 2452 + ehdr = *copy_mem; 2461 2453 } 2462 2454 2463 - ret = fn(ehdr, priv); 2455 + ret = fn(ehdr, private_data); 2456 + data_tail += ehdr_size; 2464 2457 if (ret != LIBBPF_PERF_EVENT_CONT) 2465 2458 break; 2466 - 2467 - data_tail += ehdr->size; 2468 2459 } 2469 2460 2470 2461 ring_buffer_write_tail(header, data_tail);
+8 -7
tools/lib/bpf/libbpf.h
··· 297 297 LIBBPF_PERF_EVENT_CONT = -2, 298 298 }; 299 299 300 - typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(void *event, 301 - void *priv); 302 - LIBBPF_API int bpf_perf_event_read_simple(void *mem, unsigned long size, 303 - unsigned long page_size, 304 - void **buf, size_t *buf_len, 305 - bpf_perf_event_print_t fn, 306 - void *priv); 300 + struct perf_event_header; 301 + typedef enum bpf_perf_event_ret 302 + (*bpf_perf_event_print_t)(struct perf_event_header *hdr, 303 + void *private_data); 304 + LIBBPF_API enum bpf_perf_event_ret 305 + bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, 306 + void **copy_mem, size_t *copy_size, 307 + bpf_perf_event_print_t fn, void *private_data); 307 308 308 309 struct nlattr; 309 310 typedef int (*libbpf_dump_nlmsg_t)(void *cookie, void *msg, struct nlattr **tb);
+5 -5
tools/testing/selftests/bpf/test_verifier.c
··· 3430 3430 BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0), 3431 3431 BPF_EXIT_INSN(), 3432 3432 }, 3433 - .errstr = "BPF_ST stores into R1 inv is not allowed", 3433 + .errstr = "BPF_ST stores into R1 ctx is not allowed", 3434 3434 .result = REJECT, 3435 3435 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3436 3436 }, ··· 3442 3442 BPF_REG_0, offsetof(struct __sk_buff, mark), 0), 3443 3443 BPF_EXIT_INSN(), 3444 3444 }, 3445 - .errstr = "BPF_XADD stores into R1 inv is not allowed", 3445 + .errstr = "BPF_XADD stores into R1 ctx is not allowed", 3446 3446 .result = REJECT, 3447 3447 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 3448 3448 }, ··· 5670 5670 .errstr_unpriv = "R2 leaks addr into mem", 5671 5671 .result_unpriv = REJECT, 5672 5672 .result = REJECT, 5673 - .errstr = "BPF_XADD stores into R1 inv is not allowed", 5673 + .errstr = "BPF_XADD stores into R1 ctx is not allowed", 5674 5674 }, 5675 5675 { 5676 5676 "leak pointer into ctx 2", ··· 5685 5685 .errstr_unpriv = "R10 leaks addr into mem", 5686 5686 .result_unpriv = REJECT, 5687 5687 .result = REJECT, 5688 - .errstr = "BPF_XADD stores into R1 inv is not allowed", 5688 + .errstr = "BPF_XADD stores into R1 ctx is not allowed", 5689 5689 }, 5690 5690 { 5691 5691 "leak pointer into ctx 3", ··· 12634 12634 BPF_EXIT_INSN(), 12635 12635 }, 12636 12636 .result = REJECT, 12637 - .errstr = "BPF_XADD stores into R2 ctx", 12637 + .errstr = "BPF_XADD stores into R2 pkt is not allowed", 12638 12638 .prog_type = BPF_PROG_TYPE_XDP, 12639 12639 }, 12640 12640 {
+4 -3
tools/testing/selftests/bpf/trace_helpers.c
··· 125 125 char data[]; 126 126 }; 127 127 128 - static enum bpf_perf_event_ret bpf_perf_event_print(void *event, void *priv) 128 + static enum bpf_perf_event_ret 129 + bpf_perf_event_print(struct perf_event_header *hdr, void *private_data) 129 130 { 130 - struct perf_event_sample *e = event; 131 - perf_event_print_fn fn = priv; 131 + struct perf_event_sample *e = (struct perf_event_sample *)hdr; 132 + perf_event_print_fn fn = private_data; 132 133 int ret; 133 134 134 135 if (e->header.type == PERF_RECORD_SAMPLE) {