Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bpf, x86: add new map type: instructions array

On bpf(BPF_PROG_LOAD) syscall user-supplied BPF programs are
translated by the verifier into "xlated" BPF programs. During this
process the original instructions offsets might be adjusted and/or
individual instructions might be replaced by new sets of instructions,
or deleted.

Add a new BPF map type which is aimed to keep track of how, for a
given program, the original instructions were relocated during the
verification. Also, besides keeping track of the original -> xlated
mapping, make x86 JIT to build the xlated -> jitted mapping for every
instruction listed in an instruction array. This is required for every
future application of instruction arrays: static keys, indirect jumps
and indirect calls.

A map of the BPF_MAP_TYPE_INSN_ARRAY type must be created with a u32
keys and value of size 8. The values have different semantics for
userspace and for BPF space. For userspace a value consists of two
u32 values – xlated and jitted offsets. For BPF side the value is
a real pointer to a jitted instruction.

On map creation/initialization, before loading the program, each
element of the map should be initialized to point to an instruction
offset within the program. Before the program load such maps should
be made frozen. After the program verification xlated and jitted
offsets can be read via the bpf(2) syscall.

If a tracked instruction is removed by the verifier, then the xlated
offset is set to (u32)-1 which is considered to be too big for a valid
BPF program offset.

One such a map can, obviously, be used to track one and only one BPF
program. If the verification process was unsuccessful, then the same
map can be re-used to verify the program with a different log level.
However, if the program was loaded fine, then such a map, being
frozen in any case, can't be reused by other programs even after the
program release.

Example. Consider the following original and xlated programs:

Original prog: Xlated prog:

0: r1 = 0x0 0: r1 = 0
1: *(u32 *)(r10 - 0x4) = r1 1: *(u32 *)(r10 -4) = r1
2: r2 = r10 2: r2 = r10
3: r2 += -0x4 3: r2 += -4
4: r1 = 0x0 ll 4: r1 = map[id:88]
6: call 0x1 6: r1 += 272
7: r0 = *(u32 *)(r2 +0)
8: if r0 >= 0x1 goto pc+3
9: r0 <<= 3
10: r0 += r1
11: goto pc+1
12: r0 = 0
7: r6 = r0 13: r6 = r0
8: if r6 == 0x0 goto +0x2 14: if r6 == 0x0 goto pc+4
9: call 0x76 15: r0 = 0xffffffff8d2079c0
17: r0 = *(u64 *)(r0 +0)
10: *(u64 *)(r6 + 0x0) = r0 18: *(u64 *)(r6 +0) = r0
11: r0 = 0x0 19: r0 = 0x0
12: exit 20: exit

An instruction array map, containing, e.g., instructions [0,4,7,12]
will be translated by the verifier to [0,4,13,20]. A map with
index 5 (the middle of 16-byte instruction) or indexes greater than 12
(outside the program boundaries) would be rejected.

The functionality provided by this patch will be extended in consequent
patches to implement BPF Static Keys, indirect jumps, and indirect calls.

Signed-off-by: Anton Protopopov <a.s.protopopov@gmail.com>
Reviewed-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20251105090410.1250500-2-a.s.protopopov@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>

authored by

Anton Protopopov and committed by
Alexei Starovoitov
b4ce5923 4cb4897b

+423 -1
+9
arch/x86/net/bpf_jit_comp.c
··· 3827 3827 jit_data->header = header; 3828 3828 jit_data->rw_header = rw_header; 3829 3829 } 3830 + 3831 + /* 3832 + * The bpf_prog_update_insn_ptrs function expects addrs to 3833 + * point to the first byte of the jitted instruction (unlike 3834 + * the bpf_prog_fill_jited_linfo below, which, for historical 3835 + * reasons, expects to point to the next instruction) 3836 + */ 3837 + bpf_prog_update_insn_ptrs(prog, addrs, image); 3838 + 3830 3839 /* 3831 3840 * ctx.prog_offset is used when CFI preambles put code *before* 3832 3841 * the function. See emit_cfi(). For FineIBT specifically this code
+15
include/linux/bpf.h
··· 3797 3797 const char **linep, int *nump); 3798 3798 struct bpf_prog *bpf_prog_find_from_stack(void); 3799 3799 3800 + int bpf_insn_array_init(struct bpf_map *map, const struct bpf_prog *prog); 3801 + int bpf_insn_array_ready(struct bpf_map *map); 3802 + void bpf_insn_array_release(struct bpf_map *map); 3803 + void bpf_insn_array_adjust(struct bpf_map *map, u32 off, u32 len); 3804 + void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len); 3805 + 3806 + #ifdef CONFIG_BPF_SYSCALL 3807 + void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image); 3808 + #else 3809 + static inline void 3810 + bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image) 3811 + { 3812 + } 3813 + #endif 3814 + 3800 3815 #endif /* _LINUX_BPF_H */
+1
include/linux/bpf_types.h
··· 133 133 BPF_MAP_TYPE(BPF_MAP_TYPE_BLOOM_FILTER, bloom_filter_map_ops) 134 134 BPF_MAP_TYPE(BPF_MAP_TYPE_USER_RINGBUF, user_ringbuf_map_ops) 135 135 BPF_MAP_TYPE(BPF_MAP_TYPE_ARENA, arena_map_ops) 136 + BPF_MAP_TYPE(BPF_MAP_TYPE_INSN_ARRAY, insn_array_map_ops) 136 137 137 138 BPF_LINK_TYPE(BPF_LINK_TYPE_RAW_TRACEPOINT, raw_tracepoint) 138 139 BPF_LINK_TYPE(BPF_LINK_TYPE_TRACING, tracing)
+2
include/linux/bpf_verifier.h
··· 754 754 struct list_head free_list; /* list of struct bpf_verifier_state_list */ 755 755 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ 756 756 struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */ 757 + struct bpf_map *insn_array_maps[MAX_USED_MAPS]; /* array of INSN_ARRAY map's to be relocated */ 757 758 u32 used_map_cnt; /* number of used maps */ 758 759 u32 used_btf_cnt; /* number of used BTF objects */ 760 + u32 insn_array_map_cnt; /* number of used maps of type BPF_MAP_TYPE_INSN_ARRAY */ 759 761 u32 id_gen; /* used to generate unique reg IDs */ 760 762 u32 hidden_subprog_cnt; /* number of hidden subprogs */ 761 763 int exception_callback_subprog;
+21
include/uapi/linux/bpf.h
··· 1026 1026 BPF_MAP_TYPE_USER_RINGBUF, 1027 1027 BPF_MAP_TYPE_CGRP_STORAGE, 1028 1028 BPF_MAP_TYPE_ARENA, 1029 + BPF_MAP_TYPE_INSN_ARRAY, 1029 1030 __MAX_BPF_MAP_TYPE 1030 1031 }; 1031 1032 ··· 7648 7647 */ 7649 7648 enum bpf_kfunc_flags { 7650 7649 BPF_F_PAD_ZEROS = (1ULL << 0), 7650 + }; 7651 + 7652 + /* 7653 + * Values of a BPF_MAP_TYPE_INSN_ARRAY entry must be of this type. 7654 + * 7655 + * Before the map is used the orig_off field should point to an 7656 + * instruction inside the program being loaded. The other fields 7657 + * must be set to 0. 7658 + * 7659 + * After the program is loaded, the xlated_off will be adjusted 7660 + * by the verifier to point to the index of the original instruction 7661 + * in the xlated program. If the instruction is deleted, it will 7662 + * be set to (u32)-1. The jitted_off will be set to the corresponding 7663 + * offset in the jitted image of the program. 7664 + */ 7665 + struct bpf_insn_array_value { 7666 + __u32 orig_off; 7667 + __u32 xlated_off; 7668 + __u32 jitted_off; 7669 + __u32 :32; 7651 7670 }; 7652 7671 7653 7672 #endif /* _UAPI__LINUX_BPF_H__ */
+1 -1
kernel/bpf/Makefile
··· 9 9 obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o log.o token.o liveness.o 10 10 obj-$(CONFIG_BPF_SYSCALL) += bpf_iter.o map_iter.o task_iter.o prog_iter.o link_iter.o 11 11 obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o bloom_filter.o 12 - obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o 12 + obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o bpf_insn_array.o 13 13 obj-$(CONFIG_BPF_SYSCALL) += bpf_local_storage.o bpf_task_storage.o 14 14 obj-${CONFIG_BPF_LSM} += bpf_inode_storage.o 15 15 obj-$(CONFIG_BPF_SYSCALL) += disasm.o mprog.o
+286
kernel/bpf/bpf_insn_array.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only 2 + /* Copyright (c) 2025 Isovalent */ 3 + 4 + #include <linux/bpf.h> 5 + 6 + struct bpf_insn_array { 7 + struct bpf_map map; 8 + atomic_t used; 9 + long *ips; 10 + DECLARE_FLEX_ARRAY(struct bpf_insn_array_value, values); 11 + }; 12 + 13 + #define cast_insn_array(MAP_PTR) \ 14 + container_of((MAP_PTR), struct bpf_insn_array, map) 15 + 16 + #define INSN_DELETED ((u32)-1) 17 + 18 + static inline u64 insn_array_alloc_size(u32 max_entries) 19 + { 20 + const u64 base_size = sizeof(struct bpf_insn_array); 21 + const u64 entry_size = sizeof(struct bpf_insn_array_value); 22 + 23 + return base_size + max_entries * (entry_size + sizeof(long)); 24 + } 25 + 26 + static int insn_array_alloc_check(union bpf_attr *attr) 27 + { 28 + u32 value_size = sizeof(struct bpf_insn_array_value); 29 + 30 + if (attr->max_entries == 0 || attr->key_size != 4 || 31 + attr->value_size != value_size || attr->map_flags != 0) 32 + return -EINVAL; 33 + 34 + return 0; 35 + } 36 + 37 + static void insn_array_free(struct bpf_map *map) 38 + { 39 + struct bpf_insn_array *insn_array = cast_insn_array(map); 40 + 41 + bpf_map_area_free(insn_array); 42 + } 43 + 44 + static struct bpf_map *insn_array_alloc(union bpf_attr *attr) 45 + { 46 + u64 size = insn_array_alloc_size(attr->max_entries); 47 + struct bpf_insn_array *insn_array; 48 + 49 + insn_array = bpf_map_area_alloc(size, NUMA_NO_NODE); 50 + if (!insn_array) 51 + return ERR_PTR(-ENOMEM); 52 + 53 + /* ips are allocated right after the insn_array->values[] array */ 54 + insn_array->ips = (void *)&insn_array->values[attr->max_entries]; 55 + 56 + bpf_map_init_from_attr(&insn_array->map, attr); 57 + 58 + return &insn_array->map; 59 + } 60 + 61 + static void *insn_array_lookup_elem(struct bpf_map *map, void *key) 62 + { 63 + struct bpf_insn_array *insn_array = cast_insn_array(map); 64 + u32 index = *(u32 *)key; 65 + 66 + if (unlikely(index >= insn_array->map.max_entries)) 67 + return NULL; 68 + 69 + return &insn_array->values[index]; 70 + } 71 + 72 + static long insn_array_update_elem(struct bpf_map *map, void *key, void *value, u64 map_flags) 73 + { 74 + struct bpf_insn_array *insn_array = cast_insn_array(map); 75 + u32 index = *(u32 *)key; 76 + struct bpf_insn_array_value val = {}; 77 + 78 + if (unlikely(index >= insn_array->map.max_entries)) 79 + return -E2BIG; 80 + 81 + if (unlikely(map_flags & BPF_NOEXIST)) 82 + return -EEXIST; 83 + 84 + copy_map_value(map, &val, value); 85 + if (val.jitted_off || val.xlated_off) 86 + return -EINVAL; 87 + 88 + insn_array->values[index].orig_off = val.orig_off; 89 + 90 + return 0; 91 + } 92 + 93 + static long insn_array_delete_elem(struct bpf_map *map, void *key) 94 + { 95 + return -EINVAL; 96 + } 97 + 98 + static int insn_array_check_btf(const struct bpf_map *map, 99 + const struct btf *btf, 100 + const struct btf_type *key_type, 101 + const struct btf_type *value_type) 102 + { 103 + if (!btf_type_is_i32(key_type)) 104 + return -EINVAL; 105 + 106 + if (!btf_type_is_i64(value_type)) 107 + return -EINVAL; 108 + 109 + return 0; 110 + } 111 + 112 + static u64 insn_array_mem_usage(const struct bpf_map *map) 113 + { 114 + return insn_array_alloc_size(map->max_entries); 115 + } 116 + 117 + BTF_ID_LIST_SINGLE(insn_array_btf_ids, struct, bpf_insn_array) 118 + 119 + const struct bpf_map_ops insn_array_map_ops = { 120 + .map_alloc_check = insn_array_alloc_check, 121 + .map_alloc = insn_array_alloc, 122 + .map_free = insn_array_free, 123 + .map_get_next_key = bpf_array_get_next_key, 124 + .map_lookup_elem = insn_array_lookup_elem, 125 + .map_update_elem = insn_array_update_elem, 126 + .map_delete_elem = insn_array_delete_elem, 127 + .map_check_btf = insn_array_check_btf, 128 + .map_mem_usage = insn_array_mem_usage, 129 + .map_btf_id = &insn_array_btf_ids[0], 130 + }; 131 + 132 + static inline bool is_frozen(struct bpf_map *map) 133 + { 134 + guard(mutex)(&map->freeze_mutex); 135 + 136 + return map->frozen; 137 + } 138 + 139 + static bool is_insn_array(const struct bpf_map *map) 140 + { 141 + return map->map_type == BPF_MAP_TYPE_INSN_ARRAY; 142 + } 143 + 144 + static inline bool valid_offsets(const struct bpf_insn_array *insn_array, 145 + const struct bpf_prog *prog) 146 + { 147 + u32 off; 148 + int i; 149 + 150 + for (i = 0; i < insn_array->map.max_entries; i++) { 151 + off = insn_array->values[i].orig_off; 152 + 153 + if (off >= prog->len) 154 + return false; 155 + 156 + if (off > 0) { 157 + if (prog->insnsi[off-1].code == (BPF_LD | BPF_DW | BPF_IMM)) 158 + return false; 159 + } 160 + } 161 + 162 + return true; 163 + } 164 + 165 + int bpf_insn_array_init(struct bpf_map *map, const struct bpf_prog *prog) 166 + { 167 + struct bpf_insn_array *insn_array = cast_insn_array(map); 168 + struct bpf_insn_array_value *values = insn_array->values; 169 + int i; 170 + 171 + if (!is_frozen(map)) 172 + return -EINVAL; 173 + 174 + if (!valid_offsets(insn_array, prog)) 175 + return -EINVAL; 176 + 177 + /* 178 + * There can be only one program using the map 179 + */ 180 + if (atomic_xchg(&insn_array->used, 1)) 181 + return -EBUSY; 182 + 183 + /* 184 + * Reset all the map indexes to the original values. This is needed, 185 + * e.g., when a replay of verification with different log level should 186 + * be performed. 187 + */ 188 + for (i = 0; i < map->max_entries; i++) 189 + values[i].xlated_off = values[i].orig_off; 190 + 191 + return 0; 192 + } 193 + 194 + int bpf_insn_array_ready(struct bpf_map *map) 195 + { 196 + struct bpf_insn_array *insn_array = cast_insn_array(map); 197 + int i; 198 + 199 + for (i = 0; i < map->max_entries; i++) { 200 + if (insn_array->values[i].xlated_off == INSN_DELETED) 201 + continue; 202 + if (!insn_array->ips[i]) 203 + return -EFAULT; 204 + } 205 + 206 + return 0; 207 + } 208 + 209 + void bpf_insn_array_release(struct bpf_map *map) 210 + { 211 + struct bpf_insn_array *insn_array = cast_insn_array(map); 212 + 213 + atomic_set(&insn_array->used, 0); 214 + } 215 + 216 + void bpf_insn_array_adjust(struct bpf_map *map, u32 off, u32 len) 217 + { 218 + struct bpf_insn_array *insn_array = cast_insn_array(map); 219 + int i; 220 + 221 + if (len <= 1) 222 + return; 223 + 224 + for (i = 0; i < map->max_entries; i++) { 225 + if (insn_array->values[i].xlated_off <= off) 226 + continue; 227 + if (insn_array->values[i].xlated_off == INSN_DELETED) 228 + continue; 229 + insn_array->values[i].xlated_off += len - 1; 230 + } 231 + } 232 + 233 + void bpf_insn_array_adjust_after_remove(struct bpf_map *map, u32 off, u32 len) 234 + { 235 + struct bpf_insn_array *insn_array = cast_insn_array(map); 236 + int i; 237 + 238 + for (i = 0; i < map->max_entries; i++) { 239 + if (insn_array->values[i].xlated_off < off) 240 + continue; 241 + if (insn_array->values[i].xlated_off == INSN_DELETED) 242 + continue; 243 + if (insn_array->values[i].xlated_off < off + len) 244 + insn_array->values[i].xlated_off = INSN_DELETED; 245 + else 246 + insn_array->values[i].xlated_off -= len; 247 + } 248 + } 249 + 250 + /* 251 + * This function is called by JITs. The image is the real program 252 + * image, the offsets array set up the xlated -> jitted mapping. 253 + * The offsets[xlated] offset should point to the beginning of 254 + * the jitted instruction. 255 + */ 256 + void bpf_prog_update_insn_ptrs(struct bpf_prog *prog, u32 *offsets, void *image) 257 + { 258 + struct bpf_insn_array *insn_array; 259 + struct bpf_map *map; 260 + u32 xlated_off; 261 + int i, j; 262 + 263 + if (!offsets || !image) 264 + return; 265 + 266 + for (i = 0; i < prog->aux->used_map_cnt; i++) { 267 + map = prog->aux->used_maps[i]; 268 + if (!is_insn_array(map)) 269 + continue; 270 + 271 + insn_array = cast_insn_array(map); 272 + for (j = 0; j < map->max_entries; j++) { 273 + xlated_off = insn_array->values[j].xlated_off; 274 + if (xlated_off == INSN_DELETED) 275 + continue; 276 + if (xlated_off < prog->aux->subprog_start) 277 + continue; 278 + xlated_off -= prog->aux->subprog_start; 279 + if (xlated_off >= prog->len) 280 + continue; 281 + 282 + insn_array->values[j].jitted_off = offsets[xlated_off]; 283 + insn_array->ips[j] = (long)(image + offsets[xlated_off]); 284 + } 285 + } 286 + }
+22
kernel/bpf/syscall.c
··· 1493 1493 case BPF_MAP_TYPE_STRUCT_OPS: 1494 1494 case BPF_MAP_TYPE_CPUMAP: 1495 1495 case BPF_MAP_TYPE_ARENA: 1496 + case BPF_MAP_TYPE_INSN_ARRAY: 1496 1497 if (!bpf_token_capable(token, CAP_BPF)) 1497 1498 goto put_token; 1498 1499 break; ··· 2854 2853 return err; 2855 2854 } 2856 2855 2856 + static int bpf_prog_mark_insn_arrays_ready(struct bpf_prog *prog) 2857 + { 2858 + int err; 2859 + int i; 2860 + 2861 + for (i = 0; i < prog->aux->used_map_cnt; i++) { 2862 + if (prog->aux->used_maps[i]->map_type != BPF_MAP_TYPE_INSN_ARRAY) 2863 + continue; 2864 + 2865 + err = bpf_insn_array_ready(prog->aux->used_maps[i]); 2866 + if (err) 2867 + return err; 2868 + } 2869 + 2870 + return 0; 2871 + } 2872 + 2857 2873 /* last field in 'union bpf_attr' used by this command */ 2858 2874 #define BPF_PROG_LOAD_LAST_FIELD keyring_id 2859 2875 ··· 3097 3079 goto free_used_maps; 3098 3080 3099 3081 prog = bpf_prog_select_runtime(prog, &err); 3082 + if (err < 0) 3083 + goto free_used_maps; 3084 + 3085 + err = bpf_prog_mark_insn_arrays_ready(prog); 3100 3086 if (err < 0) 3101 3087 goto free_used_maps; 3102 3088
+45
kernel/bpf/verifier.c
··· 10086 10086 func_id != BPF_FUNC_map_push_elem) 10087 10087 goto error; 10088 10088 break; 10089 + case BPF_MAP_TYPE_INSN_ARRAY: 10090 + goto error; 10089 10091 default: 10090 10092 break; 10091 10093 } ··· 20584 20582 20585 20583 env->used_maps[env->used_map_cnt++] = map; 20586 20584 20585 + if (map->map_type == BPF_MAP_TYPE_INSN_ARRAY) { 20586 + err = bpf_insn_array_init(map, env->prog); 20587 + if (err) { 20588 + verbose(env, "Failed to properly initialize insn array\n"); 20589 + return err; 20590 + } 20591 + env->insn_array_maps[env->insn_array_map_cnt++] = map; 20592 + } 20593 + 20587 20594 return env->used_map_cnt - 1; 20588 20595 } 20589 20596 ··· 20839 20828 } 20840 20829 } 20841 20830 20831 + static void release_insn_arrays(struct bpf_verifier_env *env) 20832 + { 20833 + int i; 20834 + 20835 + for (i = 0; i < env->insn_array_map_cnt; i++) 20836 + bpf_insn_array_release(env->insn_array_maps[i]); 20837 + } 20838 + 20839 + static void adjust_insn_arrays(struct bpf_verifier_env *env, u32 off, u32 len) 20840 + { 20841 + int i; 20842 + 20843 + if (len == 1) 20844 + return; 20845 + 20846 + for (i = 0; i < env->insn_array_map_cnt; i++) 20847 + bpf_insn_array_adjust(env->insn_array_maps[i], off, len); 20848 + } 20849 + 20850 + static void adjust_insn_arrays_after_remove(struct bpf_verifier_env *env, u32 off, u32 len) 20851 + { 20852 + int i; 20853 + 20854 + for (i = 0; i < env->insn_array_map_cnt; i++) 20855 + bpf_insn_array_adjust_after_remove(env->insn_array_maps[i], off, len); 20856 + } 20857 + 20842 20858 static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len) 20843 20859 { 20844 20860 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; ··· 20907 20869 } 20908 20870 adjust_insn_aux_data(env, new_prog, off, len); 20909 20871 adjust_subprog_starts(env, off, len); 20872 + adjust_insn_arrays(env, off, len); 20910 20873 adjust_poke_descs(new_prog, off, len); 20911 20874 return new_prog; 20912 20875 } ··· 21090 21051 err = bpf_adj_linfo_after_remove(env, off, cnt); 21091 21052 if (err) 21092 21053 return err; 21054 + 21055 + adjust_insn_arrays_after_remove(env, off, cnt); 21093 21056 21094 21057 memmove(aux_data + off, aux_data + off + cnt, 21095 21058 sizeof(*aux_data) * (orig_prog_len - off - cnt)); ··· 21736 21695 func[i]->aux->jited_linfo = prog->aux->jited_linfo; 21737 21696 func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx; 21738 21697 func[i]->aux->arena = prog->aux->arena; 21698 + func[i]->aux->used_maps = env->used_maps; 21699 + func[i]->aux->used_map_cnt = env->used_map_cnt; 21739 21700 num_exentries = 0; 21740 21701 insn = func[i]->insnsi; 21741 21702 for (j = 0; j < func[i]->len; j++, insn++) { ··· 24914 24871 adjust_btf_func(env); 24915 24872 24916 24873 err_release_maps: 24874 + if (ret) 24875 + release_insn_arrays(env); 24917 24876 if (!env->prog->aux->used_maps) 24918 24877 /* if we didn't copy map pointers into bpf_prog_info, release 24919 24878 * them now. Otherwise free_used_maps() will release them.
+21
tools/include/uapi/linux/bpf.h
··· 1026 1026 BPF_MAP_TYPE_USER_RINGBUF, 1027 1027 BPF_MAP_TYPE_CGRP_STORAGE, 1028 1028 BPF_MAP_TYPE_ARENA, 1029 + BPF_MAP_TYPE_INSN_ARRAY, 1029 1030 __MAX_BPF_MAP_TYPE 1030 1031 }; 1031 1032 ··· 7648 7647 */ 7649 7648 enum bpf_kfunc_flags { 7650 7649 BPF_F_PAD_ZEROS = (1ULL << 0), 7650 + }; 7651 + 7652 + /* 7653 + * Values of a BPF_MAP_TYPE_INSN_ARRAY entry must be of this type. 7654 + * 7655 + * Before the map is used the orig_off field should point to an 7656 + * instruction inside the program being loaded. The other fields 7657 + * must be set to 0. 7658 + * 7659 + * After the program is loaded, the xlated_off will be adjusted 7660 + * by the verifier to point to the index of the original instruction 7661 + * in the xlated program. If the instruction is deleted, it will 7662 + * be set to (u32)-1. The jitted_off will be set to the corresponding 7663 + * offset in the jitted image of the program. 7664 + */ 7665 + struct bpf_insn_array_value { 7666 + __u32 orig_off; 7667 + __u32 xlated_off; 7668 + __u32 jitted_off; 7669 + __u32 :32; 7651 7670 }; 7652 7671 7653 7672 #endif /* _UAPI__LINUX_BPF_H__ */