Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ebpf: misc core cleanup

Besides others, move bpf_tail_call_proto to the remaining definitions
of other protos, improve comments a bit (i.e. remove some obvious ones,
where the code is already self-documenting, add objectives for others),
simplify bpf_prog_array_compatible() a bit.

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Daniel Borkmann and committed by
David S. Miller
3324b584 17ca8cbf

+58 -48
+41 -31
kernel/bpf/core.c
··· 26 26 #include <linux/vmalloc.h> 27 27 #include <linux/random.h> 28 28 #include <linux/moduleloader.h> 29 - #include <asm/unaligned.h> 30 29 #include <linux/bpf.h> 30 + 31 + #include <asm/unaligned.h> 31 32 32 33 /* Registers */ 33 34 #define BPF_R0 regs[BPF_REG_0] ··· 63 62 ptr = skb_network_header(skb) + k - SKF_NET_OFF; 64 63 else if (k >= SKF_LL_OFF) 65 64 ptr = skb_mac_header(skb) + k - SKF_LL_OFF; 65 + 66 66 if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) 67 67 return ptr; 68 68 ··· 177 175 { 178 176 return 0; 179 177 } 180 - 181 - const struct bpf_func_proto bpf_tail_call_proto = { 182 - .func = NULL, 183 - .gpl_only = false, 184 - .ret_type = RET_VOID, 185 - .arg1_type = ARG_PTR_TO_CTX, 186 - .arg2_type = ARG_CONST_MAP_PTR, 187 - .arg3_type = ARG_ANYTHING, 188 - }; 189 178 190 179 /** 191 180 * __bpf_prog_run - run eBPF program on a given context ··· 643 650 return 0; 644 651 } 645 652 646 - void __weak bpf_int_jit_compile(struct bpf_prog *prog) 653 + bool bpf_prog_array_compatible(struct bpf_array *array, 654 + const struct bpf_prog *fp) 647 655 { 648 - } 649 - 650 - bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp) 651 - { 652 - if (array->owner_prog_type) { 653 - if (array->owner_prog_type != fp->type) 654 - return false; 655 - if (array->owner_jited != fp->jited) 656 - return false; 657 - } else { 656 + if (!array->owner_prog_type) { 657 + /* There's no owner yet where we could check for 658 + * compatibility. 659 + */ 658 660 array->owner_prog_type = fp->type; 659 661 array->owner_jited = fp->jited; 662 + 663 + return true; 660 664 } 661 - return true; 665 + 666 + return array->owner_prog_type == fp->type && 667 + array->owner_jited == fp->jited; 662 668 } 663 669 664 - static int check_tail_call(const struct bpf_prog *fp) 670 + static int bpf_check_tail_call(const struct bpf_prog *fp) 665 671 { 666 672 struct bpf_prog_aux *aux = fp->aux; 667 673 int i; 668 674 669 675 for (i = 0; i < aux->used_map_cnt; i++) { 676 + struct bpf_map *map = aux->used_maps[i]; 670 677 struct bpf_array *array; 671 - struct bpf_map *map; 672 678 673 - map = aux->used_maps[i]; 674 679 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 675 680 continue; 681 + 676 682 array = container_of(map, struct bpf_array, map); 677 683 if (!bpf_prog_array_compatible(array, fp)) 678 684 return -EINVAL; ··· 681 689 } 682 690 683 691 /** 684 - * bpf_prog_select_runtime - select execution runtime for BPF program 692 + * bpf_prog_select_runtime - select exec runtime for BPF program 685 693 * @fp: bpf_prog populated with internal BPF program 686 694 * 687 - * try to JIT internal BPF program, if JIT is not available select interpreter 688 - * BPF program will be executed via BPF_PROG_RUN() macro 695 + * Try to JIT eBPF program, if JIT is not available, use interpreter. 696 + * The BPF program will be executed via BPF_PROG_RUN() macro. 689 697 */ 690 698 int bpf_prog_select_runtime(struct bpf_prog *fp) 691 699 { 692 700 fp->bpf_func = (void *) __bpf_prog_run; 693 701 694 - /* Probe if internal BPF can be JITed */ 695 702 bpf_int_jit_compile(fp); 696 - /* Lock whole bpf_prog as read-only */ 697 703 bpf_prog_lock_ro(fp); 698 704 699 - return check_tail_call(fp); 705 + /* The tail call compatibility check can only be done at 706 + * this late stage as we need to determine, if we deal 707 + * with JITed or non JITed program concatenations and not 708 + * all eBPF JITs might immediately support all features. 709 + */ 710 + return bpf_check_tail_call(fp); 700 711 } 701 712 EXPORT_SYMBOL_GPL(bpf_prog_select_runtime); 702 713 ··· 730 735 const struct bpf_func_proto bpf_get_prandom_u32_proto __weak; 731 736 const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak; 732 737 const struct bpf_func_proto bpf_ktime_get_ns_proto __weak; 738 + 739 + /* Always built-in helper functions. */ 740 + const struct bpf_func_proto bpf_tail_call_proto = { 741 + .func = NULL, 742 + .gpl_only = false, 743 + .ret_type = RET_VOID, 744 + .arg1_type = ARG_PTR_TO_CTX, 745 + .arg2_type = ARG_CONST_MAP_PTR, 746 + .arg3_type = ARG_ANYTHING, 747 + }; 748 + 749 + /* For classic BPF JITs that don't implement bpf_int_jit_compile(). */ 750 + void __weak bpf_int_jit_compile(struct bpf_prog *prog) 751 + { 752 + } 733 753 734 754 /* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call 735 755 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
+17 -17
kernel/bpf/helpers.c
··· 45 45 } 46 46 47 47 const struct bpf_func_proto bpf_map_lookup_elem_proto = { 48 - .func = bpf_map_lookup_elem, 49 - .gpl_only = false, 50 - .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 51 - .arg1_type = ARG_CONST_MAP_PTR, 52 - .arg2_type = ARG_PTR_TO_MAP_KEY, 48 + .func = bpf_map_lookup_elem, 49 + .gpl_only = false, 50 + .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, 51 + .arg1_type = ARG_CONST_MAP_PTR, 52 + .arg2_type = ARG_PTR_TO_MAP_KEY, 53 53 }; 54 54 55 55 static u64 bpf_map_update_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) ··· 64 64 } 65 65 66 66 const struct bpf_func_proto bpf_map_update_elem_proto = { 67 - .func = bpf_map_update_elem, 68 - .gpl_only = false, 69 - .ret_type = RET_INTEGER, 70 - .arg1_type = ARG_CONST_MAP_PTR, 71 - .arg2_type = ARG_PTR_TO_MAP_KEY, 72 - .arg3_type = ARG_PTR_TO_MAP_VALUE, 73 - .arg4_type = ARG_ANYTHING, 67 + .func = bpf_map_update_elem, 68 + .gpl_only = false, 69 + .ret_type = RET_INTEGER, 70 + .arg1_type = ARG_CONST_MAP_PTR, 71 + .arg2_type = ARG_PTR_TO_MAP_KEY, 72 + .arg3_type = ARG_PTR_TO_MAP_VALUE, 73 + .arg4_type = ARG_ANYTHING, 74 74 }; 75 75 76 76 static u64 bpf_map_delete_elem(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) ··· 84 84 } 85 85 86 86 const struct bpf_func_proto bpf_map_delete_elem_proto = { 87 - .func = bpf_map_delete_elem, 88 - .gpl_only = false, 89 - .ret_type = RET_INTEGER, 90 - .arg1_type = ARG_CONST_MAP_PTR, 91 - .arg2_type = ARG_PTR_TO_MAP_KEY, 87 + .func = bpf_map_delete_elem, 88 + .gpl_only = false, 89 + .ret_type = RET_INTEGER, 90 + .arg1_type = ARG_CONST_MAP_PTR, 91 + .arg2_type = ARG_PTR_TO_MAP_KEY, 92 92 }; 93 93 94 94 static u64 bpf_get_prandom_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)