Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

libbpf: Add "map_extra" as a per-map-type extra flag

This patch adds the libbpf infrastructure for supporting a
per-map-type "map_extra" field, whose definition will be
idiosyncratic depending on map type.

For example, for the bloom filter map, the lower 4 bits of
map_extra is used to denote the number of hash functions.

Please note that until libbpf 1.0 is here, the
"bpf_create_map_params" struct is used as a temporary
means for propagating the map_extra field to the kernel.

Signed-off-by: Joanne Koong <joannekoong@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20211027234504.30744-3-joannekoong@fb.com

authored by

Joanne Koong and committed by
Alexei Starovoitov
47512102 9330986c

+91 -9
+26 -1
tools/lib/bpf/bpf.c
··· 77 77 return fd; 78 78 } 79 79 80 - int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) 80 + int libbpf__bpf_create_map_xattr(const struct bpf_create_map_params *create_attr) 81 81 { 82 82 union bpf_attr attr; 83 83 int fd; ··· 102 102 create_attr->btf_vmlinux_value_type_id; 103 103 else 104 104 attr.inner_map_fd = create_attr->inner_map_fd; 105 + attr.map_extra = create_attr->map_extra; 105 106 106 107 fd = sys_bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); 107 108 return libbpf_err_errno(fd); 109 + } 110 + 111 + int bpf_create_map_xattr(const struct bpf_create_map_attr *create_attr) 112 + { 113 + struct bpf_create_map_params p = {}; 114 + 115 + p.map_type = create_attr->map_type; 116 + p.key_size = create_attr->key_size; 117 + p.value_size = create_attr->value_size; 118 + p.max_entries = create_attr->max_entries; 119 + p.map_flags = create_attr->map_flags; 120 + p.name = create_attr->name; 121 + p.numa_node = create_attr->numa_node; 122 + p.btf_fd = create_attr->btf_fd; 123 + p.btf_key_type_id = create_attr->btf_key_type_id; 124 + p.btf_value_type_id = create_attr->btf_value_type_id; 125 + p.map_ifindex = create_attr->map_ifindex; 126 + if (p.map_type == BPF_MAP_TYPE_STRUCT_OPS) 127 + p.btf_vmlinux_value_type_id = 128 + create_attr->btf_vmlinux_value_type_id; 129 + else 130 + p.inner_map_fd = create_attr->inner_map_fd; 131 + 132 + return libbpf__bpf_create_map_xattr(&p); 108 133 } 109 134 110 135 int bpf_create_map_node(enum bpf_map_type map_type, const char *name,
+1 -1
tools/lib/bpf/bpf_gen_internal.h
··· 43 43 int bpf_gen__finish(struct bpf_gen *gen); 44 44 void bpf_gen__free(struct bpf_gen *gen); 45 45 void bpf_gen__load_btf(struct bpf_gen *gen, const void *raw_data, __u32 raw_size); 46 - void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_attr *map_attr, int map_idx); 46 + void bpf_gen__map_create(struct bpf_gen *gen, struct bpf_create_map_params *map_attr, int map_idx); 47 47 struct bpf_prog_load_params; 48 48 void bpf_gen__prog_load(struct bpf_gen *gen, struct bpf_prog_load_params *load_attr, int prog_idx); 49 49 void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *value, __u32 value_size);
+2 -1
tools/lib/bpf/gen_loader.c
··· 431 431 } 432 432 433 433 void bpf_gen__map_create(struct bpf_gen *gen, 434 - struct bpf_create_map_attr *map_attr, int map_idx) 434 + struct bpf_create_map_params *map_attr, int map_idx) 435 435 { 436 436 int attr_size = offsetofend(union bpf_attr, btf_vmlinux_value_type_id); 437 437 bool close_inner_map_fd = false; ··· 443 443 attr.key_size = map_attr->key_size; 444 444 attr.value_size = map_attr->value_size; 445 445 attr.map_flags = map_attr->map_flags; 446 + attr.map_extra = map_attr->map_extra; 446 447 memcpy(attr.map_name, map_attr->name, 447 448 min((unsigned)strlen(map_attr->name), BPF_OBJ_NAME_LEN - 1)); 448 449 attr.numa_node = map_attr->numa_node;
+33 -5
tools/lib/bpf/libbpf.c
··· 400 400 char *pin_path; 401 401 bool pinned; 402 402 bool reused; 403 + __u64 map_extra; 403 404 }; 404 405 405 406 enum extern_type { ··· 2325 2324 } 2326 2325 map_def->pinning = val; 2327 2326 map_def->parts |= MAP_DEF_PINNING; 2327 + } else if (strcmp(name, "map_extra") == 0) { 2328 + __u32 map_extra; 2329 + 2330 + if (!get_map_field_int(map_name, btf, m, &map_extra)) 2331 + return -EINVAL; 2332 + map_def->map_extra = map_extra; 2333 + map_def->parts |= MAP_DEF_MAP_EXTRA; 2328 2334 } else { 2329 2335 if (strict) { 2330 2336 pr_warn("map '%s': unknown field '%s'.\n", map_name, name); ··· 2356 2348 map->def.value_size = def->value_size; 2357 2349 map->def.max_entries = def->max_entries; 2358 2350 map->def.map_flags = def->map_flags; 2351 + map->map_extra = def->map_extra; 2359 2352 2360 2353 map->numa_node = def->numa_node; 2361 2354 map->btf_key_type_id = def->key_type_id; ··· 2380 2371 if (def->parts & MAP_DEF_MAX_ENTRIES) 2381 2372 pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries); 2382 2373 if (def->parts & MAP_DEF_MAP_FLAGS) 2383 - pr_debug("map '%s': found map_flags = %u.\n", map->name, def->map_flags); 2374 + pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags); 2375 + if (def->parts & MAP_DEF_MAP_EXTRA) 2376 + pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name, 2377 + (unsigned long long)def->map_extra); 2384 2378 if (def->parts & MAP_DEF_PINNING) 2385 2379 pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning); 2386 2380 if (def->parts & MAP_DEF_NUMA_NODE) ··· 4222 4210 map->btf_key_type_id = info.btf_key_type_id; 4223 4211 map->btf_value_type_id = info.btf_value_type_id; 4224 4212 map->reused = true; 4213 + map->map_extra = info.map_extra; 4225 4214 4226 4215 return 0; 4227 4216 ··· 4737 4724 map_info.key_size == map->def.key_size && 4738 4725 map_info.value_size == map->def.value_size && 4739 4726 map_info.max_entries == map->def.max_entries && 4740 - map_info.map_flags == map->def.map_flags); 4727 + map_info.map_flags == map->def.map_flags && 4728 + map_info.map_extra == map->map_extra); 4741 4729 } 4742 4730 4743 4731 static int ··· 4821 4807 4822 4808 static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner) 4823 4809 { 4824 - struct bpf_create_map_attr create_attr; 4810 + struct bpf_create_map_params create_attr; 4825 4811 struct bpf_map_def *def = &map->def; 4826 4812 int err = 0; 4827 4813 ··· 4835 4821 create_attr.key_size = def->key_size; 4836 4822 create_attr.value_size = def->value_size; 4837 4823 create_attr.numa_node = map->numa_node; 4824 + create_attr.map_extra = map->map_extra; 4838 4825 4839 4826 if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) { 4840 4827 int nr_cpus; ··· 4910 4895 */ 4911 4896 map->fd = 0; 4912 4897 } else { 4913 - map->fd = bpf_create_map_xattr(&create_attr); 4898 + map->fd = libbpf__bpf_create_map_xattr(&create_attr); 4914 4899 } 4915 4900 if (map->fd < 0 && (create_attr.btf_key_type_id || 4916 4901 create_attr.btf_value_type_id)) { ··· 4925 4910 create_attr.btf_value_type_id = 0; 4926 4911 map->btf_key_type_id = 0; 4927 4912 map->btf_value_type_id = 0; 4928 - map->fd = bpf_create_map_xattr(&create_attr); 4913 + map->fd = libbpf__bpf_create_map_xattr(&create_attr); 4929 4914 } 4930 4915 4931 4916 err = map->fd < 0 ? -errno : 0; ··· 8892 8877 if (map->fd >= 0) 8893 8878 return libbpf_err(-EBUSY); 8894 8879 map->def.map_flags = flags; 8880 + return 0; 8881 + } 8882 + 8883 + __u64 bpf_map__map_extra(const struct bpf_map *map) 8884 + { 8885 + return map->map_extra; 8886 + } 8887 + 8888 + int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra) 8889 + { 8890 + if (map->fd >= 0) 8891 + return libbpf_err(-EBUSY); 8892 + map->map_extra = map_extra; 8895 8893 return 0; 8896 8894 } 8897 8895
+3
tools/lib/bpf/libbpf.h
··· 600 600 /* get/set map if_index */ 601 601 LIBBPF_API __u32 bpf_map__ifindex(const struct bpf_map *map); 602 602 LIBBPF_API int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex); 603 + /* get/set map map_extra flags */ 604 + LIBBPF_API __u64 bpf_map__map_extra(const struct bpf_map *map); 605 + LIBBPF_API int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra); 603 606 604 607 typedef void (*bpf_map_clear_priv_t)(struct bpf_map *, void *); 605 608 LIBBPF_API int bpf_map__set_priv(struct bpf_map *map, void *priv,
+2
tools/lib/bpf/libbpf.map
··· 389 389 390 390 LIBBPF_0.6.0 { 391 391 global: 392 + bpf_map__map_extra; 393 + bpf_map__set_map_extra; 392 394 bpf_object__next_map; 393 395 bpf_object__next_program; 394 396 bpf_object__prev_map;
+24 -1
tools/lib/bpf/libbpf_internal.h
··· 193 193 MAP_DEF_NUMA_NODE = 0x080, 194 194 MAP_DEF_PINNING = 0x100, 195 195 MAP_DEF_INNER_MAP = 0x200, 196 + MAP_DEF_MAP_EXTRA = 0x400, 196 197 197 - MAP_DEF_ALL = 0x3ff, /* combination of all above */ 198 + MAP_DEF_ALL = 0x7ff, /* combination of all above */ 198 199 }; 199 200 200 201 struct btf_map_def { ··· 209 208 __u32 map_flags; 210 209 __u32 numa_node; 211 210 __u32 pinning; 211 + __u64 map_extra; 212 212 }; 213 213 214 214 int parse_btf_map_def(const char *map_name, struct btf *btf, ··· 304 302 }; 305 303 306 304 int libbpf__bpf_prog_load(const struct bpf_prog_load_params *load_attr); 305 + 306 + struct bpf_create_map_params { 307 + const char *name; 308 + enum bpf_map_type map_type; 309 + __u32 map_flags; 310 + __u32 key_size; 311 + __u32 value_size; 312 + __u32 max_entries; 313 + __u32 numa_node; 314 + __u32 btf_fd; 315 + __u32 btf_key_type_id; 316 + __u32 btf_value_type_id; 317 + __u32 map_ifindex; 318 + union { 319 + __u32 inner_map_fd; 320 + __u32 btf_vmlinux_value_type_id; 321 + }; 322 + __u64 map_extra; 323 + }; 324 + 325 + int libbpf__bpf_create_map_xattr(const struct bpf_create_map_params *create_attr); 307 326 308 327 struct btf *btf_get_from_fd(int btf_fd, struct btf *base_btf); 309 328 void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,