Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

libbpf: Hashmap interface update to allow both long and void* keys/values

An update for libbpf's hashmap interface from void* -> void* to a
polymorphic one, allowing both long and void* keys and values.

This simplifies many use cases in libbpf as hashmaps there are mostly
integer to integer.

Perf copies hashmap implementation from libbpf and has to be
updated as well.

Changes to libbpf, selftests/bpf and perf are packed as a single
commit to avoid compilation issues with any future bisect.

Polymorphic interface is acheived by hiding hashmap interface
functions behind auxiliary macros that take care of necessary
type casts, for example:

#define hashmap_cast_ptr(p) \
({ \
_Static_assert((p) == NULL || sizeof(*(p)) == sizeof(long),\
#p " pointee should be a long-sized integer or a pointer"); \
(long *)(p); \
})

bool hashmap_find(const struct hashmap *map, long key, long *value);

#define hashmap__find(map, key, value) \
hashmap_find((map), (long)(key), hashmap_cast_ptr(value))

- hashmap__find macro casts key and value parameters to long
and long* respectively
- hashmap_cast_ptr ensures that value pointer points to a memory
of appropriate size.

This hack was suggested by Andrii Nakryiko in [1].
This is a follow up for [2].

[1] https://lore.kernel.org/bpf/CAEf4BzZ8KFneEJxFAaNCCFPGqp20hSpS2aCj76uRk3-qZUH5xg@mail.gmail.com/
[2] https://lore.kernel.org/bpf/af1facf9-7bc8-8a3d-0db4-7b3f333589a2@meta.com/T/#m65b28f1d6d969fcd318b556db6a3ad499a42607d

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20221109142611.879983-2-eddyz87@gmail.com

authored by

Eduard Zingerman and committed by
Andrii Nakryiko
c302378b e5659e4e

+410 -340
+9 -16
tools/bpf/bpftool/btf.c
··· 815 815 if (!btf_id) 816 816 continue; 817 817 818 - err = hashmap__append(tab, u32_as_hash_field(btf_id), 819 - u32_as_hash_field(id)); 818 + err = hashmap__append(tab, btf_id, id); 820 819 if (err) { 821 820 p_err("failed to append entry to hashmap for BTF ID %u, object ID %u: %s", 822 821 btf_id, id, strerror(-err)); ··· 874 875 printf("size %uB", info->btf_size); 875 876 876 877 n = 0; 877 - hashmap__for_each_key_entry(btf_prog_table, entry, 878 - u32_as_hash_field(info->id)) { 879 - printf("%s%u", n++ == 0 ? " prog_ids " : ",", 880 - hash_field_as_u32(entry->value)); 878 + hashmap__for_each_key_entry(btf_prog_table, entry, info->id) { 879 + printf("%s%lu", n++ == 0 ? " prog_ids " : ",", entry->value); 881 880 } 882 881 883 882 n = 0; 884 - hashmap__for_each_key_entry(btf_map_table, entry, 885 - u32_as_hash_field(info->id)) { 886 - printf("%s%u", n++ == 0 ? " map_ids " : ",", 887 - hash_field_as_u32(entry->value)); 883 + hashmap__for_each_key_entry(btf_map_table, entry, info->id) { 884 + printf("%s%lu", n++ == 0 ? " map_ids " : ",", entry->value); 888 885 } 889 886 890 887 emit_obj_refs_plain(refs_table, info->id, "\n\tpids "); ··· 902 907 903 908 jsonw_name(json_wtr, "prog_ids"); 904 909 jsonw_start_array(json_wtr); /* prog_ids */ 905 - hashmap__for_each_key_entry(btf_prog_table, entry, 906 - u32_as_hash_field(info->id)) { 907 - jsonw_uint(json_wtr, hash_field_as_u32(entry->value)); 910 + hashmap__for_each_key_entry(btf_prog_table, entry, info->id) { 911 + jsonw_uint(json_wtr, entry->value); 908 912 } 909 913 jsonw_end_array(json_wtr); /* prog_ids */ 910 914 911 915 jsonw_name(json_wtr, "map_ids"); 912 916 jsonw_start_array(json_wtr); /* map_ids */ 913 - hashmap__for_each_key_entry(btf_map_table, entry, 914 - u32_as_hash_field(info->id)) { 915 - jsonw_uint(json_wtr, hash_field_as_u32(entry->value)); 917 + hashmap__for_each_key_entry(btf_map_table, entry, info->id) { 918 + jsonw_uint(json_wtr, entry->value); 916 919 } 917 920 jsonw_end_array(json_wtr); /* map_ids */ 918 921
+5 -5
tools/bpf/bpftool/common.c
··· 494 494 goto out_close; 495 495 } 496 496 497 - err = hashmap__append(build_fn_table, u32_as_hash_field(pinned_info.id), path); 497 + err = hashmap__append(build_fn_table, pinned_info.id, path); 498 498 if (err) { 499 499 p_err("failed to append entry to hashmap for ID %u, path '%s': %s", 500 500 pinned_info.id, path, strerror(errno)); ··· 545 545 return; 546 546 547 547 hashmap__for_each_entry(map, entry, bkt) 548 - free(entry->value); 548 + free(entry->pvalue); 549 549 550 550 hashmap__free(map); 551 551 } ··· 1041 1041 return fd; 1042 1042 } 1043 1043 1044 - size_t hash_fn_for_key_as_id(const void *key, void *ctx) 1044 + size_t hash_fn_for_key_as_id(long key, void *ctx) 1045 1045 { 1046 - return (size_t)key; 1046 + return key; 1047 1047 } 1048 1048 1049 - bool equal_fn_for_key_as_id(const void *k1, const void *k2, void *ctx) 1049 + bool equal_fn_for_key_as_id(long k1, long k2, void *ctx) 1050 1050 { 1051 1051 return k1 == k2; 1052 1052 }
+7 -12
tools/bpf/bpftool/gen.c
··· 1660 1660 struct btf *marked_btf; /* btf structure used to mark used types */ 1661 1661 }; 1662 1662 1663 - static size_t btfgen_hash_fn(const void *key, void *ctx) 1663 + static size_t btfgen_hash_fn(long key, void *ctx) 1664 1664 { 1665 - return (size_t)key; 1665 + return key; 1666 1666 } 1667 1667 1668 - static bool btfgen_equal_fn(const void *k1, const void *k2, void *ctx) 1668 + static bool btfgen_equal_fn(long k1, long k2, void *ctx) 1669 1669 { 1670 1670 return k1 == k2; 1671 - } 1672 - 1673 - static void *u32_as_hash_key(__u32 x) 1674 - { 1675 - return (void *)(uintptr_t)x; 1676 1671 } 1677 1672 1678 1673 static void btfgen_free_info(struct btfgen_info *info) ··· 2081 2086 struct bpf_core_spec specs_scratch[3] = {}; 2082 2087 struct bpf_core_relo_res targ_res = {}; 2083 2088 struct bpf_core_cand_list *cands = NULL; 2084 - const void *type_key = u32_as_hash_key(relo->type_id); 2085 2089 const char *sec_name = btf__name_by_offset(btf, sec->sec_name_off); 2086 2090 2087 2091 if (relo->kind != BPF_CORE_TYPE_ID_LOCAL && 2088 - !hashmap__find(cand_cache, type_key, (void **)&cands)) { 2092 + !hashmap__find(cand_cache, relo->type_id, &cands)) { 2089 2093 cands = btfgen_find_cands(btf, info->src_btf, relo->type_id); 2090 2094 if (!cands) { 2091 2095 err = -errno; 2092 2096 goto out; 2093 2097 } 2094 2098 2095 - err = hashmap__set(cand_cache, type_key, cands, NULL, NULL); 2099 + err = hashmap__set(cand_cache, relo->type_id, cands, 2100 + NULL, NULL); 2096 2101 if (err) 2097 2102 goto out; 2098 2103 } ··· 2115 2120 2116 2121 if (!IS_ERR_OR_NULL(cand_cache)) { 2117 2122 hashmap__for_each_entry(cand_cache, entry, i) { 2118 - bpf_core_free_cands(entry->value); 2123 + bpf_core_free_cands(entry->pvalue); 2119 2124 } 2120 2125 hashmap__free(cand_cache); 2121 2126 }
+4 -6
tools/bpf/bpftool/link.c
··· 204 204 205 205 jsonw_name(json_wtr, "pinned"); 206 206 jsonw_start_array(json_wtr); 207 - hashmap__for_each_key_entry(link_table, entry, 208 - u32_as_hash_field(info->id)) 209 - jsonw_string(json_wtr, entry->value); 207 + hashmap__for_each_key_entry(link_table, entry, info->id) 208 + jsonw_string(json_wtr, entry->pvalue); 210 209 jsonw_end_array(json_wtr); 211 210 } 212 211 ··· 308 309 if (!hashmap__empty(link_table)) { 309 310 struct hashmap_entry *entry; 310 311 311 - hashmap__for_each_key_entry(link_table, entry, 312 - u32_as_hash_field(info->id)) 313 - printf("\n\tpinned %s", (char *)entry->value); 312 + hashmap__for_each_key_entry(link_table, entry, info->id) 313 + printf("\n\tpinned %s", (char *)entry->pvalue); 314 314 } 315 315 emit_obj_refs_plain(refs_table, info->id, "\n\tpids "); 316 316
+2 -12
tools/bpf/bpftool/main.h
··· 240 240 int print_all_levels(__maybe_unused enum libbpf_print_level level, 241 241 const char *format, va_list args); 242 242 243 - size_t hash_fn_for_key_as_id(const void *key, void *ctx); 244 - bool equal_fn_for_key_as_id(const void *k1, const void *k2, void *ctx); 243 + size_t hash_fn_for_key_as_id(long key, void *ctx); 244 + bool equal_fn_for_key_as_id(long k1, long k2, void *ctx); 245 245 246 246 /* bpf_attach_type_input_str - convert the provided attach type value into a 247 247 * textual representation that we accept for input purposes. ··· 256 256 * returned for unknown bpf_attach_type values. 257 257 */ 258 258 const char *bpf_attach_type_input_str(enum bpf_attach_type t); 259 - 260 - static inline void *u32_as_hash_field(__u32 x) 261 - { 262 - return (void *)(uintptr_t)x; 263 - } 264 - 265 - static inline __u32 hash_field_as_u32(const void *x) 266 - { 267 - return (__u32)(uintptr_t)x; 268 - } 269 259 270 260 static inline bool hashmap__empty(struct hashmap *map) 271 261 {
+4 -6
tools/bpf/bpftool/map.c
··· 518 518 519 519 jsonw_name(json_wtr, "pinned"); 520 520 jsonw_start_array(json_wtr); 521 - hashmap__for_each_key_entry(map_table, entry, 522 - u32_as_hash_field(info->id)) 523 - jsonw_string(json_wtr, entry->value); 521 + hashmap__for_each_key_entry(map_table, entry, info->id) 522 + jsonw_string(json_wtr, entry->pvalue); 524 523 jsonw_end_array(json_wtr); 525 524 } 526 525 ··· 594 595 if (!hashmap__empty(map_table)) { 595 596 struct hashmap_entry *entry; 596 597 597 - hashmap__for_each_key_entry(map_table, entry, 598 - u32_as_hash_field(info->id)) 599 - printf("\n\tpinned %s", (char *)entry->value); 598 + hashmap__for_each_key_entry(map_table, entry, info->id) 599 + printf("\n\tpinned %s", (char *)entry->pvalue); 600 600 } 601 601 602 602 if (frozen_str) {
+8 -8
tools/bpf/bpftool/pids.c
··· 36 36 int err, i; 37 37 void *tmp; 38 38 39 - hashmap__for_each_key_entry(map, entry, u32_as_hash_field(e->id)) { 40 - refs = entry->value; 39 + hashmap__for_each_key_entry(map, entry, e->id) { 40 + refs = entry->pvalue; 41 41 42 42 for (i = 0; i < refs->ref_cnt; i++) { 43 43 if (refs->refs[i].pid == e->pid) ··· 81 81 refs->has_bpf_cookie = e->has_bpf_cookie; 82 82 refs->bpf_cookie = e->bpf_cookie; 83 83 84 - err = hashmap__append(map, u32_as_hash_field(e->id), refs); 84 + err = hashmap__append(map, e->id, refs); 85 85 if (err) 86 86 p_err("failed to append entry to hashmap for ID %u: %s", 87 87 e->id, strerror(errno)); ··· 183 183 return; 184 184 185 185 hashmap__for_each_entry(map, entry, bkt) { 186 - struct obj_refs *refs = entry->value; 186 + struct obj_refs *refs = entry->pvalue; 187 187 188 188 free(refs->refs); 189 189 free(refs); ··· 200 200 if (hashmap__empty(map)) 201 201 return; 202 202 203 - hashmap__for_each_key_entry(map, entry, u32_as_hash_field(id)) { 204 - struct obj_refs *refs = entry->value; 203 + hashmap__for_each_key_entry(map, entry, id) { 204 + struct obj_refs *refs = entry->pvalue; 205 205 int i; 206 206 207 207 if (refs->ref_cnt == 0) ··· 232 232 if (hashmap__empty(map)) 233 233 return; 234 234 235 - hashmap__for_each_key_entry(map, entry, u32_as_hash_field(id)) { 236 - struct obj_refs *refs = entry->value; 235 + hashmap__for_each_key_entry(map, entry, id) { 236 + struct obj_refs *refs = entry->pvalue; 237 237 int i; 238 238 239 239 if (refs->ref_cnt == 0)
+4 -6
tools/bpf/bpftool/prog.c
··· 486 486 487 487 jsonw_name(json_wtr, "pinned"); 488 488 jsonw_start_array(json_wtr); 489 - hashmap__for_each_key_entry(prog_table, entry, 490 - u32_as_hash_field(info->id)) 491 - jsonw_string(json_wtr, entry->value); 489 + hashmap__for_each_key_entry(prog_table, entry, info->id) 490 + jsonw_string(json_wtr, entry->pvalue); 492 491 jsonw_end_array(json_wtr); 493 492 } 494 493 ··· 560 561 if (!hashmap__empty(prog_table)) { 561 562 struct hashmap_entry *entry; 562 563 563 - hashmap__for_each_key_entry(prog_table, entry, 564 - u32_as_hash_field(info->id)) 565 - printf("\n\tpinned %s", (char *)entry->value); 564 + hashmap__for_each_key_entry(prog_table, entry, info->id) 565 + printf("\n\tpinned %s", (char *)entry->pvalue); 566 566 } 567 567 568 568 if (info->btf_id)
+20 -21
tools/lib/bpf/btf.c
··· 1559 1559 static int btf_rewrite_str(__u32 *str_off, void *ctx) 1560 1560 { 1561 1561 struct btf_pipe *p = ctx; 1562 - void *mapped_off; 1562 + long mapped_off; 1563 1563 int off, err; 1564 1564 1565 1565 if (!*str_off) /* nothing to do for empty strings */ 1566 1566 return 0; 1567 1567 1568 1568 if (p->str_off_map && 1569 - hashmap__find(p->str_off_map, (void *)(long)*str_off, &mapped_off)) { 1570 - *str_off = (__u32)(long)mapped_off; 1569 + hashmap__find(p->str_off_map, *str_off, &mapped_off)) { 1570 + *str_off = mapped_off; 1571 1571 return 0; 1572 1572 } 1573 1573 ··· 1579 1579 * performing expensive string comparisons. 1580 1580 */ 1581 1581 if (p->str_off_map) { 1582 - err = hashmap__append(p->str_off_map, (void *)(long)*str_off, (void *)(long)off); 1582 + err = hashmap__append(p->str_off_map, *str_off, off); 1583 1583 if (err) 1584 1584 return err; 1585 1585 } ··· 1630 1630 return 0; 1631 1631 } 1632 1632 1633 - static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx); 1634 - static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx); 1633 + static size_t btf_dedup_identity_hash_fn(long key, void *ctx); 1634 + static bool btf_dedup_equal_fn(long k1, long k2, void *ctx); 1635 1635 1636 1636 int btf__add_btf(struct btf *btf, const struct btf *src_btf) 1637 1637 { ··· 3126 3126 } 3127 3127 3128 3128 #define for_each_dedup_cand(d, node, hash) \ 3129 - hashmap__for_each_key_entry(d->dedup_table, node, (void *)hash) 3129 + hashmap__for_each_key_entry(d->dedup_table, node, hash) 3130 3130 3131 3131 static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id) 3132 3132 { 3133 - return hashmap__append(d->dedup_table, 3134 - (void *)hash, (void *)(long)type_id); 3133 + return hashmap__append(d->dedup_table, hash, type_id); 3135 3134 } 3136 3135 3137 3136 static int btf_dedup_hypot_map_add(struct btf_dedup *d, ··· 3177 3178 free(d); 3178 3179 } 3179 3180 3180 - static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx) 3181 + static size_t btf_dedup_identity_hash_fn(long key, void *ctx) 3181 3182 { 3182 - return (size_t)key; 3183 + return key; 3183 3184 } 3184 3185 3185 - static size_t btf_dedup_collision_hash_fn(const void *key, void *ctx) 3186 + static size_t btf_dedup_collision_hash_fn(long key, void *ctx) 3186 3187 { 3187 3188 return 0; 3188 3189 } 3189 3190 3190 - static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx) 3191 + static bool btf_dedup_equal_fn(long k1, long k2, void *ctx) 3191 3192 { 3192 3193 return k1 == k2; 3193 3194 } ··· 3749 3750 case BTF_KIND_INT: 3750 3751 h = btf_hash_int_decl_tag(t); 3751 3752 for_each_dedup_cand(d, hash_entry, h) { 3752 - cand_id = (__u32)(long)hash_entry->value; 3753 + cand_id = hash_entry->value; 3753 3754 cand = btf_type_by_id(d->btf, cand_id); 3754 3755 if (btf_equal_int_tag(t, cand)) { 3755 3756 new_id = cand_id; ··· 3762 3763 case BTF_KIND_ENUM64: 3763 3764 h = btf_hash_enum(t); 3764 3765 for_each_dedup_cand(d, hash_entry, h) { 3765 - cand_id = (__u32)(long)hash_entry->value; 3766 + cand_id = hash_entry->value; 3766 3767 cand = btf_type_by_id(d->btf, cand_id); 3767 3768 if (btf_equal_enum(t, cand)) { 3768 3769 new_id = cand_id; ··· 3784 3785 case BTF_KIND_FLOAT: 3785 3786 h = btf_hash_common(t); 3786 3787 for_each_dedup_cand(d, hash_entry, h) { 3787 - cand_id = (__u32)(long)hash_entry->value; 3788 + cand_id = hash_entry->value; 3788 3789 cand = btf_type_by_id(d->btf, cand_id); 3789 3790 if (btf_equal_common(t, cand)) { 3790 3791 new_id = cand_id; ··· 4287 4288 4288 4289 h = btf_hash_struct(t); 4289 4290 for_each_dedup_cand(d, hash_entry, h) { 4290 - __u32 cand_id = (__u32)(long)hash_entry->value; 4291 + __u32 cand_id = hash_entry->value; 4291 4292 int eq; 4292 4293 4293 4294 /* ··· 4392 4393 4393 4394 h = btf_hash_common(t); 4394 4395 for_each_dedup_cand(d, hash_entry, h) { 4395 - cand_id = (__u32)(long)hash_entry->value; 4396 + cand_id = hash_entry->value; 4396 4397 cand = btf_type_by_id(d->btf, cand_id); 4397 4398 if (btf_equal_common(t, cand)) { 4398 4399 new_id = cand_id; ··· 4409 4410 4410 4411 h = btf_hash_int_decl_tag(t); 4411 4412 for_each_dedup_cand(d, hash_entry, h) { 4412 - cand_id = (__u32)(long)hash_entry->value; 4413 + cand_id = hash_entry->value; 4413 4414 cand = btf_type_by_id(d->btf, cand_id); 4414 4415 if (btf_equal_int_tag(t, cand)) { 4415 4416 new_id = cand_id; ··· 4433 4434 4434 4435 h = btf_hash_array(t); 4435 4436 for_each_dedup_cand(d, hash_entry, h) { 4436 - cand_id = (__u32)(long)hash_entry->value; 4437 + cand_id = hash_entry->value; 4437 4438 cand = btf_type_by_id(d->btf, cand_id); 4438 4439 if (btf_equal_array(t, cand)) { 4439 4440 new_id = cand_id; ··· 4465 4466 4466 4467 h = btf_hash_fnproto(t); 4467 4468 for_each_dedup_cand(d, hash_entry, h) { 4468 - cand_id = (__u32)(long)hash_entry->value; 4469 + cand_id = hash_entry->value; 4469 4470 cand = btf_type_by_id(d->btf, cand_id); 4470 4471 if (btf_equal_fnproto(t, cand)) { 4471 4472 new_id = cand_id;
+7 -8
tools/lib/bpf/btf_dump.c
··· 117 117 struct btf_dump_data *typed_dump; 118 118 }; 119 119 120 - static size_t str_hash_fn(const void *key, void *ctx) 120 + static size_t str_hash_fn(long key, void *ctx) 121 121 { 122 - return str_hash(key); 122 + return str_hash((void *)key); 123 123 } 124 124 125 - static bool str_equal_fn(const void *a, const void *b, void *ctx) 125 + static bool str_equal_fn(long a, long b, void *ctx) 126 126 { 127 - return strcmp(a, b) == 0; 127 + return strcmp((void *)a, (void *)b) == 0; 128 128 } 129 129 130 130 static const char *btf_name_of(const struct btf_dump *d, __u32 name_off) ··· 225 225 struct hashmap_entry *cur; 226 226 227 227 hashmap__for_each_entry(map, cur, bkt) 228 - free((void *)cur->key); 228 + free((void *)cur->pkey); 229 229 230 230 hashmap__free(map); 231 231 } ··· 1543 1543 if (!new_name) 1544 1544 return 1; 1545 1545 1546 - hashmap__find(name_map, orig_name, (void **)&dup_cnt); 1546 + hashmap__find(name_map, orig_name, &dup_cnt); 1547 1547 dup_cnt++; 1548 1548 1549 - err = hashmap__set(name_map, new_name, (void *)dup_cnt, 1550 - (const void **)&old_name, NULL); 1549 + err = hashmap__set(name_map, new_name, dup_cnt, &old_name, NULL); 1551 1550 if (err) 1552 1551 free(new_name); 1553 1552
+9 -9
tools/lib/bpf/hashmap.c
··· 128 128 } 129 129 130 130 static bool hashmap_find_entry(const struct hashmap *map, 131 - const void *key, size_t hash, 131 + const long key, size_t hash, 132 132 struct hashmap_entry ***pprev, 133 133 struct hashmap_entry **entry) 134 134 { ··· 151 151 return false; 152 152 } 153 153 154 - int hashmap__insert(struct hashmap *map, const void *key, void *value, 155 - enum hashmap_insert_strategy strategy, 156 - const void **old_key, void **old_value) 154 + int hashmap_insert(struct hashmap *map, long key, long value, 155 + enum hashmap_insert_strategy strategy, 156 + long *old_key, long *old_value) 157 157 { 158 158 struct hashmap_entry *entry; 159 159 size_t h; 160 160 int err; 161 161 162 162 if (old_key) 163 - *old_key = NULL; 163 + *old_key = 0; 164 164 if (old_value) 165 - *old_value = NULL; 165 + *old_value = 0; 166 166 167 167 h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); 168 168 if (strategy != HASHMAP_APPEND && ··· 203 203 return 0; 204 204 } 205 205 206 - bool hashmap__find(const struct hashmap *map, const void *key, void **value) 206 + bool hashmap_find(const struct hashmap *map, long key, long *value) 207 207 { 208 208 struct hashmap_entry *entry; 209 209 size_t h; ··· 217 217 return true; 218 218 } 219 219 220 - bool hashmap__delete(struct hashmap *map, const void *key, 221 - const void **old_key, void **old_value) 220 + bool hashmap_delete(struct hashmap *map, long key, 221 + long *old_key, long *old_value) 222 222 { 223 223 struct hashmap_entry **pprev, *entry; 224 224 size_t h;
+56 -34
tools/lib/bpf/hashmap.h
··· 40 40 return h; 41 41 } 42 42 43 - typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx); 44 - typedef bool (*hashmap_equal_fn)(const void *key1, const void *key2, void *ctx); 43 + typedef size_t (*hashmap_hash_fn)(long key, void *ctx); 44 + typedef bool (*hashmap_equal_fn)(long key1, long key2, void *ctx); 45 45 46 + /* 47 + * Hashmap interface is polymorphic, keys and values could be either 48 + * long-sized integers or pointers, this is achieved as follows: 49 + * - interface functions that operate on keys and values are hidden 50 + * behind auxiliary macros, e.g. hashmap_insert <-> hashmap__insert; 51 + * - these auxiliary macros cast the key and value parameters as 52 + * long or long *, so the user does not have to specify the casts explicitly; 53 + * - for pointer parameters (e.g. old_key) the size of the pointed 54 + * type is verified by hashmap_cast_ptr using _Static_assert; 55 + * - when iterating using hashmap__for_each_* forms 56 + * hasmap_entry->key should be used for integer keys and 57 + * hasmap_entry->pkey should be used for pointer keys, 58 + * same goes for values. 59 + */ 46 60 struct hashmap_entry { 47 - const void *key; 48 - void *value; 61 + union { 62 + long key; 63 + const void *pkey; 64 + }; 65 + union { 66 + long value; 67 + void *pvalue; 68 + }; 49 69 struct hashmap_entry *next; 50 70 }; 51 71 ··· 122 102 HASHMAP_APPEND, 123 103 }; 124 104 105 + #define hashmap_cast_ptr(p) ({ \ 106 + _Static_assert((p) == NULL || sizeof(*(p)) == sizeof(long), \ 107 + #p " pointee should be a long-sized integer or a pointer"); \ 108 + (long *)(p); \ 109 + }) 110 + 125 111 /* 126 112 * hashmap__insert() adds key/value entry w/ various semantics, depending on 127 113 * provided strategy value. If a given key/value pair replaced already ··· 135 109 * through old_key and old_value to allow calling code do proper memory 136 110 * management. 137 111 */ 138 - int hashmap__insert(struct hashmap *map, const void *key, void *value, 139 - enum hashmap_insert_strategy strategy, 140 - const void **old_key, void **old_value); 112 + int hashmap_insert(struct hashmap *map, long key, long value, 113 + enum hashmap_insert_strategy strategy, 114 + long *old_key, long *old_value); 141 115 142 - static inline int hashmap__add(struct hashmap *map, 143 - const void *key, void *value) 144 - { 145 - return hashmap__insert(map, key, value, HASHMAP_ADD, NULL, NULL); 146 - } 116 + #define hashmap__insert(map, key, value, strategy, old_key, old_value) \ 117 + hashmap_insert((map), (long)(key), (long)(value), (strategy), \ 118 + hashmap_cast_ptr(old_key), \ 119 + hashmap_cast_ptr(old_value)) 147 120 148 - static inline int hashmap__set(struct hashmap *map, 149 - const void *key, void *value, 150 - const void **old_key, void **old_value) 151 - { 152 - return hashmap__insert(map, key, value, HASHMAP_SET, 153 - old_key, old_value); 154 - } 121 + #define hashmap__add(map, key, value) \ 122 + hashmap__insert((map), (key), (value), HASHMAP_ADD, NULL, NULL) 155 123 156 - static inline int hashmap__update(struct hashmap *map, 157 - const void *key, void *value, 158 - const void **old_key, void **old_value) 159 - { 160 - return hashmap__insert(map, key, value, HASHMAP_UPDATE, 161 - old_key, old_value); 162 - } 124 + #define hashmap__set(map, key, value, old_key, old_value) \ 125 + hashmap__insert((map), (key), (value), HASHMAP_SET, (old_key), (old_value)) 163 126 164 - static inline int hashmap__append(struct hashmap *map, 165 - const void *key, void *value) 166 - { 167 - return hashmap__insert(map, key, value, HASHMAP_APPEND, NULL, NULL); 168 - } 127 + #define hashmap__update(map, key, value, old_key, old_value) \ 128 + hashmap__insert((map), (key), (value), HASHMAP_UPDATE, (old_key), (old_value)) 169 129 170 - bool hashmap__delete(struct hashmap *map, const void *key, 171 - const void **old_key, void **old_value); 130 + #define hashmap__append(map, key, value) \ 131 + hashmap__insert((map), (key), (value), HASHMAP_APPEND, NULL, NULL) 172 132 173 - bool hashmap__find(const struct hashmap *map, const void *key, void **value); 133 + bool hashmap_delete(struct hashmap *map, long key, long *old_key, long *old_value); 134 + 135 + #define hashmap__delete(map, key, old_key, old_value) \ 136 + hashmap_delete((map), (long)(key), \ 137 + hashmap_cast_ptr(old_key), \ 138 + hashmap_cast_ptr(old_value)) 139 + 140 + bool hashmap_find(const struct hashmap *map, long key, long *value); 141 + 142 + #define hashmap__find(map, key, value) \ 143 + hashmap_find((map), (long)(key), hashmap_cast_ptr(value)) 174 144 175 145 /* 176 146 * hashmap__for_each_entry - iterate over all entries in hashmap
+6 -12
tools/lib/bpf/libbpf.c
··· 5601 5601 return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false, 32); 5602 5602 } 5603 5603 5604 - static size_t bpf_core_hash_fn(const void *key, void *ctx) 5604 + static size_t bpf_core_hash_fn(const long key, void *ctx) 5605 5605 { 5606 - return (size_t)key; 5606 + return key; 5607 5607 } 5608 5608 5609 - static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx) 5609 + static bool bpf_core_equal_fn(const long k1, const long k2, void *ctx) 5610 5610 { 5611 5611 return k1 == k2; 5612 - } 5613 - 5614 - static void *u32_as_hash_key(__u32 x) 5615 - { 5616 - return (void *)(uintptr_t)x; 5617 5612 } 5618 5613 5619 5614 static int record_relo_core(struct bpf_program *prog, ··· 5653 5658 struct bpf_core_relo_res *targ_res) 5654 5659 { 5655 5660 struct bpf_core_spec specs_scratch[3] = {}; 5656 - const void *type_key = u32_as_hash_key(relo->type_id); 5657 5661 struct bpf_core_cand_list *cands = NULL; 5658 5662 const char *prog_name = prog->name; 5659 5663 const struct btf_type *local_type; ··· 5669 5675 return -EINVAL; 5670 5676 5671 5677 if (relo->kind != BPF_CORE_TYPE_ID_LOCAL && 5672 - !hashmap__find(cand_cache, type_key, (void **)&cands)) { 5678 + !hashmap__find(cand_cache, local_id, &cands)) { 5673 5679 cands = bpf_core_find_cands(prog->obj, local_btf, local_id); 5674 5680 if (IS_ERR(cands)) { 5675 5681 pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n", ··· 5677 5683 local_name, PTR_ERR(cands)); 5678 5684 return PTR_ERR(cands); 5679 5685 } 5680 - err = hashmap__set(cand_cache, type_key, cands, NULL, NULL); 5686 + err = hashmap__set(cand_cache, local_id, cands, NULL, NULL); 5681 5687 if (err) { 5682 5688 bpf_core_free_cands(cands); 5683 5689 return err; ··· 5800 5806 5801 5807 if (!IS_ERR_OR_NULL(cand_cache)) { 5802 5808 hashmap__for_each_entry(cand_cache, entry, i) { 5803 - bpf_core_free_cands(entry->value); 5809 + bpf_core_free_cands(entry->pvalue); 5804 5810 } 5805 5811 hashmap__free(cand_cache); 5806 5812 }
+9 -9
tools/lib/bpf/strset.c
··· 19 19 struct hashmap *strs_hash; 20 20 }; 21 21 22 - static size_t strset_hash_fn(const void *key, void *ctx) 22 + static size_t strset_hash_fn(long key, void *ctx) 23 23 { 24 24 const struct strset *s = ctx; 25 - const char *str = s->strs_data + (long)key; 25 + const char *str = s->strs_data + key; 26 26 27 27 return str_hash(str); 28 28 } 29 29 30 - static bool strset_equal_fn(const void *key1, const void *key2, void *ctx) 30 + static bool strset_equal_fn(long key1, long key2, void *ctx) 31 31 { 32 32 const struct strset *s = ctx; 33 - const char *str1 = s->strs_data + (long)key1; 34 - const char *str2 = s->strs_data + (long)key2; 33 + const char *str1 = s->strs_data + key1; 34 + const char *str2 = s->strs_data + key2; 35 35 36 36 return strcmp(str1, str2) == 0; 37 37 } ··· 67 67 /* hashmap__add() returns EEXIST if string with the same 68 68 * content already is in the hash map 69 69 */ 70 - err = hashmap__add(hash, (void *)off, (void *)off); 70 + err = hashmap__add(hash, off, off); 71 71 if (err == -EEXIST) 72 72 continue; /* duplicate */ 73 73 if (err) ··· 127 127 new_off = set->strs_data_len; 128 128 memcpy(p, s, len); 129 129 130 - if (hashmap__find(set->strs_hash, (void *)new_off, (void **)&old_off)) 130 + if (hashmap__find(set->strs_hash, new_off, &old_off)) 131 131 return old_off; 132 132 133 133 return -ENOENT; ··· 165 165 * contents doesn't exist already (HASHMAP_ADD strategy). If such 166 166 * string exists, we'll get its offset in old_off (that's old_key). 167 167 */ 168 - err = hashmap__insert(set->strs_hash, (void *)new_off, (void *)new_off, 169 - HASHMAP_ADD, (const void **)&old_off, NULL); 168 + err = hashmap__insert(set->strs_hash, new_off, new_off, 169 + HASHMAP_ADD, &old_off, NULL); 170 170 if (err == -EEXIST) 171 171 return old_off; /* duplicated string, return existing offset */ 172 172 if (err)
+12 -16
tools/lib/bpf/usdt.c
··· 873 873 free(usdt_link); 874 874 } 875 875 876 - static size_t specs_hash_fn(const void *key, void *ctx) 876 + static size_t specs_hash_fn(long key, void *ctx) 877 877 { 878 - const char *s = key; 879 - 880 - return str_hash(s); 878 + return str_hash((char *)key); 881 879 } 882 880 883 - static bool specs_equal_fn(const void *key1, const void *key2, void *ctx) 881 + static bool specs_equal_fn(long key1, long key2, void *ctx) 884 882 { 885 - const char *s1 = key1; 886 - const char *s2 = key2; 887 - 888 - return strcmp(s1, s2) == 0; 883 + return strcmp((char *)key1, (char *)key2) == 0; 889 884 } 890 885 891 886 static int allocate_spec_id(struct usdt_manager *man, struct hashmap *specs_hash, 892 887 struct bpf_link_usdt *link, struct usdt_target *target, 893 888 int *spec_id, bool *is_new) 894 889 { 895 - void *tmp; 890 + long tmp; 891 + void *new_ids; 896 892 int err; 897 893 898 894 /* check if we already allocated spec ID for this spec string */ 899 895 if (hashmap__find(specs_hash, target->spec_str, &tmp)) { 900 - *spec_id = (long)tmp; 896 + *spec_id = tmp; 901 897 *is_new = false; 902 898 return 0; 903 899 } ··· 901 905 /* otherwise it's a new ID that needs to be set up in specs map and 902 906 * returned back to usdt_manager when USDT link is detached 903 907 */ 904 - tmp = libbpf_reallocarray(link->spec_ids, link->spec_cnt + 1, sizeof(*link->spec_ids)); 905 - if (!tmp) 908 + new_ids = libbpf_reallocarray(link->spec_ids, link->spec_cnt + 1, sizeof(*link->spec_ids)); 909 + if (!new_ids) 906 910 return -ENOMEM; 907 - link->spec_ids = tmp; 911 + link->spec_ids = new_ids; 908 912 909 913 /* get next free spec ID, giving preference to free list, if not empty */ 910 914 if (man->free_spec_cnt) { 911 915 *spec_id = man->free_spec_ids[man->free_spec_cnt - 1]; 912 916 913 917 /* cache spec ID for current spec string for future lookups */ 914 - err = hashmap__add(specs_hash, target->spec_str, (void *)(long)*spec_id); 918 + err = hashmap__add(specs_hash, target->spec_str, *spec_id); 915 919 if (err) 916 920 return err; 917 921 ··· 924 928 *spec_id = man->next_free_spec_id; 925 929 926 930 /* cache spec ID for current spec string for future lookups */ 927 - err = hashmap__add(specs_hash, target->spec_str, (void *)(long)*spec_id); 931 + err = hashmap__add(specs_hash, target->spec_str, *spec_id); 928 932 if (err) 929 933 return err; 930 934
+10 -18
tools/perf/tests/expr.c
··· 130 130 expr__find_ids("FOO + BAR + BAZ + BOZO", "FOO", 131 131 ctx) == 0); 132 132 TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 3); 133 - TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "BAR", 134 - (void **)&val_ptr)); 135 - TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "BAZ", 136 - (void **)&val_ptr)); 137 - TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "BOZO", 138 - (void **)&val_ptr)); 133 + TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "BAR", &val_ptr)); 134 + TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "BAZ", &val_ptr)); 135 + TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "BOZO", &val_ptr)); 139 136 140 137 expr__ctx_clear(ctx); 141 138 ctx->sctx.runtime = 3; ··· 140 143 expr__find_ids("EVENT1\\,param\\=?@ + EVENT2\\,param\\=?@", 141 144 NULL, ctx) == 0); 142 145 TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 2); 143 - TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "EVENT1,param=3@", 144 - (void **)&val_ptr)); 145 - TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "EVENT2,param=3@", 146 - (void **)&val_ptr)); 146 + TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "EVENT1,param=3@", &val_ptr)); 147 + TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "EVENT2,param=3@", &val_ptr)); 147 148 148 149 expr__ctx_clear(ctx); 149 150 TEST_ASSERT_VAL("find ids", 150 151 expr__find_ids("dash\\-event1 - dash\\-event2", 151 152 NULL, ctx) == 0); 152 153 TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 2); 153 - TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "dash-event1", 154 - (void **)&val_ptr)); 155 - TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "dash-event2", 156 - (void **)&val_ptr)); 154 + TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "dash-event1", &val_ptr)); 155 + TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, "dash-event2", &val_ptr)); 157 156 158 157 /* Only EVENT1 or EVENT2 need be measured depending on the value of smt_on. */ 159 158 { ··· 167 174 TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 1); 168 175 TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, 169 176 smton ? "EVENT1" : "EVENT2", 170 - (void **)&val_ptr)); 177 + &val_ptr)); 171 178 172 179 expr__ctx_clear(ctx); 173 180 TEST_ASSERT_VAL("find ids", ··· 176 183 TEST_ASSERT_VAL("find ids", hashmap__size(ctx->ids) == 1); 177 184 TEST_ASSERT_VAL("find ids", hashmap__find(ctx->ids, 178 185 corewide ? "EVENT1" : "EVENT2", 179 - (void **)&val_ptr)); 186 + &val_ptr)); 180 187 181 188 } 182 189 /* The expression is a constant 1.0 without needing to evaluate EVENT1. */ ··· 213 220 expr__find_ids("source_count(EVENT1)", 214 221 NULL, ctx) == 0); 215 222 TEST_ASSERT_VAL("source count", hashmap__size(ctx->ids) == 1); 216 - TEST_ASSERT_VAL("source count", hashmap__find(ctx->ids, "EVENT1", 217 - (void **)&val_ptr)); 223 + TEST_ASSERT_VAL("source count", hashmap__find(ctx->ids, "EVENT1", &val_ptr)); 218 224 219 225 expr__ctx_free(ctx); 220 226
+3 -3
tools/perf/tests/pmu-events.c
··· 986 986 */ 987 987 i = 1; 988 988 hashmap__for_each_entry(ctx->ids, cur, bkt) 989 - expr__add_id_val(ctx, strdup(cur->key), i++); 989 + expr__add_id_val(ctx, strdup(cur->pkey), i++); 990 990 991 991 hashmap__for_each_entry(ctx->ids, cur, bkt) { 992 - if (check_parse_fake(cur->key)) { 992 + if (check_parse_fake(cur->pkey)) { 993 993 pr_err("check_parse_fake failed\n"); 994 994 goto out; 995 995 } ··· 1003 1003 */ 1004 1004 i = 1024; 1005 1005 hashmap__for_each_entry(ctx->ids, cur, bkt) 1006 - expr__add_id_val(ctx, strdup(cur->key), i--); 1006 + expr__add_id_val(ctx, strdup(cur->pkey), i--); 1007 1007 if (expr__parse(&result, ctx, str)) { 1008 1008 pr_err("expr__parse failed\n"); 1009 1009 ret = -1;
+5 -6
tools/perf/util/bpf-loader.c
··· 318 318 return; 319 319 320 320 hashmap__for_each_entry(bpf_program_hash, cur, bkt) 321 - clear_prog_priv(cur->key, cur->value); 321 + clear_prog_priv(cur->pkey, cur->pvalue); 322 322 323 323 hashmap__free(bpf_program_hash); 324 324 bpf_program_hash = NULL; ··· 339 339 bpf_map_hash_free(); 340 340 } 341 341 342 - static size_t ptr_hash(const void *__key, void *ctx __maybe_unused) 342 + static size_t ptr_hash(const long __key, void *ctx __maybe_unused) 343 343 { 344 - return (size_t) __key; 344 + return __key; 345 345 } 346 346 347 - static bool ptr_equal(const void *key1, const void *key2, 348 - void *ctx __maybe_unused) 347 + static bool ptr_equal(long key1, long key2, void *ctx __maybe_unused) 349 348 { 350 349 return key1 == key2; 351 350 } ··· 1184 1185 return; 1185 1186 1186 1187 hashmap__for_each_entry(bpf_map_hash, cur, bkt) 1187 - bpf_map_priv__clear(cur->key, cur->value); 1188 + bpf_map_priv__clear(cur->pkey, cur->pvalue); 1188 1189 1189 1190 hashmap__free(bpf_map_hash); 1190 1191 bpf_map_hash = NULL;
+1 -1
tools/perf/util/evsel.c
··· 3123 3123 3124 3124 if (evsel->per_pkg_mask) { 3125 3125 hashmap__for_each_entry(evsel->per_pkg_mask, cur, bkt) 3126 - free((char *)cur->key); 3126 + free((void *)cur->pkey); 3127 3127 3128 3128 hashmap__clear(evsel->per_pkg_mask); 3129 3129 }
+15 -21
tools/perf/util/expr.c
··· 46 46 } kind; 47 47 }; 48 48 49 - static size_t key_hash(const void *key, void *ctx __maybe_unused) 49 + static size_t key_hash(long key, void *ctx __maybe_unused) 50 50 { 51 51 const char *str = (const char *)key; 52 52 size_t hash = 0; ··· 59 59 return hash; 60 60 } 61 61 62 - static bool key_equal(const void *key1, const void *key2, 63 - void *ctx __maybe_unused) 62 + static bool key_equal(long key1, long key2, void *ctx __maybe_unused) 64 63 { 65 64 return !strcmp((const char *)key1, (const char *)key2); 66 65 } ··· 83 84 return; 84 85 85 86 hashmap__for_each_entry(ids, cur, bkt) { 86 - free((char *)cur->key); 87 - free(cur->value); 87 + free((void *)cur->pkey); 88 + free((void *)cur->pvalue); 88 89 } 89 90 90 91 hashmap__free(ids); ··· 96 97 char *old_key = NULL; 97 98 int ret; 98 99 99 - ret = hashmap__set(ids, id, data_ptr, 100 - (const void **)&old_key, (void **)&old_data); 100 + ret = hashmap__set(ids, id, data_ptr, &old_key, &old_data); 101 101 if (ret) 102 102 free(data_ptr); 103 103 free(old_key); ··· 125 127 ids2 = tmp; 126 128 } 127 129 hashmap__for_each_entry(ids2, cur, bkt) { 128 - ret = hashmap__set(ids1, cur->key, cur->value, 129 - (const void **)&old_key, (void **)&old_data); 130 + ret = hashmap__set(ids1, cur->key, cur->value, &old_key, &old_data); 130 131 free(old_key); 131 132 free(old_data); 132 133 ··· 166 169 data_ptr->val.source_count = source_count; 167 170 data_ptr->kind = EXPR_ID_DATA__VALUE; 168 171 169 - ret = hashmap__set(ctx->ids, id, data_ptr, 170 - (const void **)&old_key, (void **)&old_data); 172 + ret = hashmap__set(ctx->ids, id, data_ptr, &old_key, &old_data); 171 173 if (ret) 172 174 free(data_ptr); 173 175 free(old_key); ··· 201 205 data_ptr->ref.metric_expr = ref->metric_expr; 202 206 data_ptr->kind = EXPR_ID_DATA__REF; 203 207 204 - ret = hashmap__set(ctx->ids, name, data_ptr, 205 - (const void **)&old_key, (void **)&old_data); 208 + ret = hashmap__set(ctx->ids, name, data_ptr, &old_key, &old_data); 206 209 if (ret) 207 210 free(data_ptr); 208 211 ··· 216 221 int expr__get_id(struct expr_parse_ctx *ctx, const char *id, 217 222 struct expr_id_data **data) 218 223 { 219 - return hashmap__find(ctx->ids, id, (void **)data) ? 0 : -1; 224 + return hashmap__find(ctx->ids, id, data) ? 0 : -1; 220 225 } 221 226 222 227 bool expr__subset_of_ids(struct expr_parse_ctx *haystack, ··· 227 232 struct expr_id_data *data; 228 233 229 234 hashmap__for_each_entry(needles->ids, cur, bkt) { 230 - if (expr__get_id(haystack, cur->key, &data)) 235 + if (expr__get_id(haystack, cur->pkey, &data)) 231 236 return false; 232 237 } 233 238 return true; ··· 277 282 struct expr_id_data *old_val = NULL; 278 283 char *old_key = NULL; 279 284 280 - hashmap__delete(ctx->ids, id, 281 - (const void **)&old_key, (void **)&old_val); 285 + hashmap__delete(ctx->ids, id, &old_key, &old_val); 282 286 free(old_key); 283 287 free(old_val); 284 288 } ··· 308 314 size_t bkt; 309 315 310 316 hashmap__for_each_entry(ctx->ids, cur, bkt) { 311 - free((char *)cur->key); 312 - free(cur->value); 317 + free((void *)cur->pkey); 318 + free(cur->pvalue); 313 319 } 314 320 hashmap__clear(ctx->ids); 315 321 } ··· 324 330 325 331 free(ctx->sctx.user_requested_cpu_list); 326 332 hashmap__for_each_entry(ctx->ids, cur, bkt) { 327 - free((char *)cur->key); 328 - free(cur->value); 333 + free((void *)cur->pkey); 334 + free(cur->pvalue); 329 335 } 330 336 hashmap__free(ctx->ids); 331 337 free(ctx);
+9 -9
tools/perf/util/hashmap.c
··· 128 128 } 129 129 130 130 static bool hashmap_find_entry(const struct hashmap *map, 131 - const void *key, size_t hash, 131 + const long key, size_t hash, 132 132 struct hashmap_entry ***pprev, 133 133 struct hashmap_entry **entry) 134 134 { ··· 151 151 return false; 152 152 } 153 153 154 - int hashmap__insert(struct hashmap *map, const void *key, void *value, 155 - enum hashmap_insert_strategy strategy, 156 - const void **old_key, void **old_value) 154 + int hashmap_insert(struct hashmap *map, long key, long value, 155 + enum hashmap_insert_strategy strategy, 156 + long *old_key, long *old_value) 157 157 { 158 158 struct hashmap_entry *entry; 159 159 size_t h; 160 160 int err; 161 161 162 162 if (old_key) 163 - *old_key = NULL; 163 + *old_key = 0; 164 164 if (old_value) 165 - *old_value = NULL; 165 + *old_value = 0; 166 166 167 167 h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); 168 168 if (strategy != HASHMAP_APPEND && ··· 203 203 return 0; 204 204 } 205 205 206 - bool hashmap__find(const struct hashmap *map, const void *key, void **value) 206 + bool hashmap_find(const struct hashmap *map, long key, long *value) 207 207 { 208 208 struct hashmap_entry *entry; 209 209 size_t h; ··· 217 217 return true; 218 218 } 219 219 220 - bool hashmap__delete(struct hashmap *map, const void *key, 221 - const void **old_key, void **old_value) 220 + bool hashmap_delete(struct hashmap *map, long key, 221 + long *old_key, long *old_value) 222 222 { 223 223 struct hashmap_entry **pprev, *entry; 224 224 size_t h;
+56 -34
tools/perf/util/hashmap.h
··· 40 40 return h; 41 41 } 42 42 43 - typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx); 44 - typedef bool (*hashmap_equal_fn)(const void *key1, const void *key2, void *ctx); 43 + typedef size_t (*hashmap_hash_fn)(long key, void *ctx); 44 + typedef bool (*hashmap_equal_fn)(long key1, long key2, void *ctx); 45 45 46 + /* 47 + * Hashmap interface is polymorphic, keys and values could be either 48 + * long-sized integers or pointers, this is achieved as follows: 49 + * - interface functions that operate on keys and values are hidden 50 + * behind auxiliary macros, e.g. hashmap_insert <-> hashmap__insert; 51 + * - these auxiliary macros cast the key and value parameters as 52 + * long or long *, so the user does not have to specify the casts explicitly; 53 + * - for pointer parameters (e.g. old_key) the size of the pointed 54 + * type is verified by hashmap_cast_ptr using _Static_assert; 55 + * - when iterating using hashmap__for_each_* forms 56 + * hasmap_entry->key should be used for integer keys and 57 + * hasmap_entry->pkey should be used for pointer keys, 58 + * same goes for values. 59 + */ 46 60 struct hashmap_entry { 47 - const void *key; 48 - void *value; 61 + union { 62 + long key; 63 + const void *pkey; 64 + }; 65 + union { 66 + long value; 67 + void *pvalue; 68 + }; 49 69 struct hashmap_entry *next; 50 70 }; 51 71 ··· 122 102 HASHMAP_APPEND, 123 103 }; 124 104 105 + #define hashmap_cast_ptr(p) ({ \ 106 + _Static_assert((p) == NULL || sizeof(*(p)) == sizeof(long), \ 107 + #p " pointee should be a long-sized integer or a pointer"); \ 108 + (long *)(p); \ 109 + }) 110 + 125 111 /* 126 112 * hashmap__insert() adds key/value entry w/ various semantics, depending on 127 113 * provided strategy value. If a given key/value pair replaced already ··· 135 109 * through old_key and old_value to allow calling code do proper memory 136 110 * management. 137 111 */ 138 - int hashmap__insert(struct hashmap *map, const void *key, void *value, 139 - enum hashmap_insert_strategy strategy, 140 - const void **old_key, void **old_value); 112 + int hashmap_insert(struct hashmap *map, long key, long value, 113 + enum hashmap_insert_strategy strategy, 114 + long *old_key, long *old_value); 141 115 142 - static inline int hashmap__add(struct hashmap *map, 143 - const void *key, void *value) 144 - { 145 - return hashmap__insert(map, key, value, HASHMAP_ADD, NULL, NULL); 146 - } 116 + #define hashmap__insert(map, key, value, strategy, old_key, old_value) \ 117 + hashmap_insert((map), (long)(key), (long)(value), (strategy), \ 118 + hashmap_cast_ptr(old_key), \ 119 + hashmap_cast_ptr(old_value)) 147 120 148 - static inline int hashmap__set(struct hashmap *map, 149 - const void *key, void *value, 150 - const void **old_key, void **old_value) 151 - { 152 - return hashmap__insert(map, key, value, HASHMAP_SET, 153 - old_key, old_value); 154 - } 121 + #define hashmap__add(map, key, value) \ 122 + hashmap__insert((map), (key), (value), HASHMAP_ADD, NULL, NULL) 155 123 156 - static inline int hashmap__update(struct hashmap *map, 157 - const void *key, void *value, 158 - const void **old_key, void **old_value) 159 - { 160 - return hashmap__insert(map, key, value, HASHMAP_UPDATE, 161 - old_key, old_value); 162 - } 124 + #define hashmap__set(map, key, value, old_key, old_value) \ 125 + hashmap__insert((map), (key), (value), HASHMAP_SET, (old_key), (old_value)) 163 126 164 - static inline int hashmap__append(struct hashmap *map, 165 - const void *key, void *value) 166 - { 167 - return hashmap__insert(map, key, value, HASHMAP_APPEND, NULL, NULL); 168 - } 127 + #define hashmap__update(map, key, value, old_key, old_value) \ 128 + hashmap__insert((map), (key), (value), HASHMAP_UPDATE, (old_key), (old_value)) 169 129 170 - bool hashmap__delete(struct hashmap *map, const void *key, 171 - const void **old_key, void **old_value); 130 + #define hashmap__append(map, key, value) \ 131 + hashmap__insert((map), (key), (value), HASHMAP_APPEND, NULL, NULL) 172 132 173 - bool hashmap__find(const struct hashmap *map, const void *key, void **value); 133 + bool hashmap_delete(struct hashmap *map, long key, long *old_key, long *old_value); 134 + 135 + #define hashmap__delete(map, key, old_key, old_value) \ 136 + hashmap_delete((map), (long)(key), \ 137 + hashmap_cast_ptr(old_key), \ 138 + hashmap_cast_ptr(old_value)) 139 + 140 + bool hashmap_find(const struct hashmap *map, long key, long *value); 141 + 142 + #define hashmap__find(map, key, value) \ 143 + hashmap_find((map), (long)(key), hashmap_cast_ptr(value)) 174 144 175 145 /* 176 146 * hashmap__for_each_entry - iterate over all entries in hashmap
+5 -5
tools/perf/util/metricgroup.c
··· 288 288 * combined or shared groups, this metric may not care 289 289 * about this event. 290 290 */ 291 - if (hashmap__find(ids, metric_id, (void **)&val_ptr)) { 291 + if (hashmap__find(ids, metric_id, &val_ptr)) { 292 292 metric_events[matched_events++] = ev; 293 293 294 294 if (matched_events >= ids_size) ··· 764 764 #define RETURN_IF_NON_ZERO(x) do { if (x) return x; } while (0) 765 765 766 766 hashmap__for_each_entry(ctx->ids, cur, bkt) { 767 - const char *sep, *rsep, *id = cur->key; 767 + const char *sep, *rsep, *id = cur->pkey; 768 768 enum perf_tool_event ev; 769 769 770 770 pr_debug("found event %s\n", id); ··· 945 945 hashmap__for_each_entry(root_metric->pctx->ids, cur, bkt) { 946 946 struct pmu_event pe; 947 947 948 - if (metricgroup__find_metric(cur->key, table, &pe)) { 948 + if (metricgroup__find_metric(cur->pkey, table, &pe)) { 949 949 pending = realloc(pending, 950 950 (pending_cnt + 1) * sizeof(struct to_resolve)); 951 951 if (!pending) 952 952 return -ENOMEM; 953 953 954 954 memcpy(&pending[pending_cnt].pe, &pe, sizeof(pe)); 955 - pending[pending_cnt].key = cur->key; 955 + pending[pending_cnt].key = cur->pkey; 956 956 pending_cnt++; 957 957 } 958 958 } ··· 1433 1433 list_for_each_entry(m, metric_list, nd) { 1434 1434 if (m->has_constraint && !m->modifier) { 1435 1435 hashmap__for_each_entry(m->pctx->ids, cur, bkt) { 1436 - dup = strdup(cur->key); 1436 + dup = strdup(cur->pkey); 1437 1437 if (!dup) { 1438 1438 ret = -ENOMEM; 1439 1439 goto err_out;
+1 -1
tools/perf/util/stat-shadow.c
··· 398 398 399 399 i = 0; 400 400 hashmap__for_each_entry(ctx->ids, cur, bkt) { 401 - const char *metric_name = (const char *)cur->key; 401 + const char *metric_name = cur->pkey; 402 402 403 403 found = false; 404 404 if (leader) {
+4 -5
tools/perf/util/stat.c
··· 278 278 } 279 279 } 280 280 281 - static size_t pkg_id_hash(const void *__key, void *ctx __maybe_unused) 281 + static size_t pkg_id_hash(long __key, void *ctx __maybe_unused) 282 282 { 283 283 uint64_t *key = (uint64_t *) __key; 284 284 285 285 return *key & 0xffffffff; 286 286 } 287 287 288 - static bool pkg_id_equal(const void *__key1, const void *__key2, 289 - void *ctx __maybe_unused) 288 + static bool pkg_id_equal(long __key1, long __key2, void *ctx __maybe_unused) 290 289 { 291 290 uint64_t *key1 = (uint64_t *) __key1; 292 291 uint64_t *key2 = (uint64_t *) __key2; ··· 346 347 return -ENOMEM; 347 348 348 349 *key = (uint64_t)d << 32 | s; 349 - if (hashmap__find(mask, (void *)key, NULL)) { 350 + if (hashmap__find(mask, key, NULL)) { 350 351 *skip = true; 351 352 free(key); 352 353 } else 353 - ret = hashmap__add(mask, (void *)key, (void *)1); 354 + ret = hashmap__add(mask, key, 1); 354 355 355 356 return ret; 356 357 }
+136 -54
tools/testing/selftests/bpf/prog_tests/hashmap.c
··· 7 7 */ 8 8 #include "test_progs.h" 9 9 #include "bpf/hashmap.h" 10 + #include <stddef.h> 10 11 11 12 static int duration = 0; 12 13 13 - static size_t hash_fn(const void *k, void *ctx) 14 + static size_t hash_fn(long k, void *ctx) 14 15 { 15 - return (long)k; 16 + return k; 16 17 } 17 18 18 - static bool equal_fn(const void *a, const void *b, void *ctx) 19 + static bool equal_fn(long a, long b, void *ctx) 19 20 { 20 - return (long)a == (long)b; 21 + return a == b; 21 22 } 22 23 23 24 static inline size_t next_pow_2(size_t n) ··· 53 52 return; 54 53 55 54 for (i = 0; i < ELEM_CNT; i++) { 56 - const void *oldk, *k = (const void *)(long)i; 57 - void *oldv, *v = (void *)(long)(1024 + i); 55 + long oldk, k = i; 56 + long oldv, v = 1024 + i; 58 57 59 58 err = hashmap__update(map, k, v, &oldk, &oldv); 60 59 if (CHECK(err != -ENOENT, "hashmap__update", ··· 65 64 err = hashmap__add(map, k, v); 66 65 } else { 67 66 err = hashmap__set(map, k, v, &oldk, &oldv); 68 - if (CHECK(oldk != NULL || oldv != NULL, "check_kv", 69 - "unexpected k/v: %p=%p\n", oldk, oldv)) 67 + if (CHECK(oldk != 0 || oldv != 0, "check_kv", 68 + "unexpected k/v: %ld=%ld\n", oldk, oldv)) 70 69 goto cleanup; 71 70 } 72 71 73 - if (CHECK(err, "elem_add", "failed to add k/v %ld = %ld: %d\n", 74 - (long)k, (long)v, err)) 72 + if (CHECK(err, "elem_add", "failed to add k/v %ld = %ld: %d\n", k, v, err)) 75 73 goto cleanup; 76 74 77 75 if (CHECK(!hashmap__find(map, k, &oldv), "elem_find", 78 - "failed to find key %ld\n", (long)k)) 76 + "failed to find key %ld\n", k)) 79 77 goto cleanup; 80 - if (CHECK(oldv != v, "elem_val", 81 - "found value is wrong: %ld\n", (long)oldv)) 78 + if (CHECK(oldv != v, "elem_val", "found value is wrong: %ld\n", oldv)) 82 79 goto cleanup; 83 80 } 84 81 ··· 90 91 91 92 found_msk = 0; 92 93 hashmap__for_each_entry(map, entry, bkt) { 93 - long k = (long)entry->key; 94 - long v = (long)entry->value; 94 + long k = entry->key; 95 + long v = entry->value; 95 96 96 97 found_msk |= 1ULL << k; 97 98 if (CHECK(v - k != 1024, "check_kv", ··· 103 104 goto cleanup; 104 105 105 106 for (i = 0; i < ELEM_CNT; i++) { 106 - const void *oldk, *k = (const void *)(long)i; 107 - void *oldv, *v = (void *)(long)(256 + i); 107 + long oldk, k = i; 108 + long oldv, v = 256 + i; 108 109 109 110 err = hashmap__add(map, k, v); 110 111 if (CHECK(err != -EEXIST, "hashmap__add", ··· 118 119 119 120 if (CHECK(err, "elem_upd", 120 121 "failed to update k/v %ld = %ld: %d\n", 121 - (long)k, (long)v, err)) 122 + k, v, err)) 122 123 goto cleanup; 123 124 if (CHECK(!hashmap__find(map, k, &oldv), "elem_find", 124 - "failed to find key %ld\n", (long)k)) 125 + "failed to find key %ld\n", k)) 125 126 goto cleanup; 126 127 if (CHECK(oldv != v, "elem_val", 127 - "found value is wrong: %ld\n", (long)oldv)) 128 + "found value is wrong: %ld\n", oldv)) 128 129 goto cleanup; 129 130 } 130 131 ··· 138 139 139 140 found_msk = 0; 140 141 hashmap__for_each_entry_safe(map, entry, tmp, bkt) { 141 - long k = (long)entry->key; 142 - long v = (long)entry->value; 142 + long k = entry->key; 143 + long v = entry->value; 143 144 144 145 found_msk |= 1ULL << k; 145 146 if (CHECK(v - k != 256, "elem_check", ··· 151 152 goto cleanup; 152 153 153 154 found_cnt = 0; 154 - hashmap__for_each_key_entry(map, entry, (void *)0) { 155 + hashmap__for_each_key_entry(map, entry, 0) { 155 156 found_cnt++; 156 157 } 157 158 if (CHECK(!found_cnt, "found_cnt", ··· 160 161 161 162 found_msk = 0; 162 163 found_cnt = 0; 163 - hashmap__for_each_key_entry_safe(map, entry, tmp, (void *)0) { 164 - const void *oldk, *k; 165 - void *oldv, *v; 164 + hashmap__for_each_key_entry_safe(map, entry, tmp, 0) { 165 + long oldk, k; 166 + long oldv, v; 166 167 167 168 k = entry->key; 168 169 v = entry->value; 169 170 170 171 found_cnt++; 171 - found_msk |= 1ULL << (long)k; 172 + found_msk |= 1ULL << k; 172 173 173 174 if (CHECK(!hashmap__delete(map, k, &oldk, &oldv), "elem_del", 174 - "failed to delete k/v %ld = %ld\n", 175 - (long)k, (long)v)) 175 + "failed to delete k/v %ld = %ld\n", k, v)) 176 176 goto cleanup; 177 177 if (CHECK(oldk != k || oldv != v, "check_old", 178 178 "invalid deleted k/v: expected %ld = %ld, got %ld = %ld\n", 179 - (long)k, (long)v, (long)oldk, (long)oldv)) 179 + k, v, oldk, oldv)) 180 180 goto cleanup; 181 181 if (CHECK(hashmap__delete(map, k, &oldk, &oldv), "elem_del", 182 - "unexpectedly deleted k/v %ld = %ld\n", 183 - (long)oldk, (long)oldv)) 182 + "unexpectedly deleted k/v %ld = %ld\n", oldk, oldv)) 184 183 goto cleanup; 185 184 } 186 185 ··· 195 198 goto cleanup; 196 199 197 200 hashmap__for_each_entry_safe(map, entry, tmp, bkt) { 198 - const void *oldk, *k; 199 - void *oldv, *v; 201 + long oldk, k; 202 + long oldv, v; 200 203 201 204 k = entry->key; 202 205 v = entry->value; 203 206 204 207 found_cnt++; 205 - found_msk |= 1ULL << (long)k; 208 + found_msk |= 1ULL << k; 206 209 207 210 if (CHECK(!hashmap__delete(map, k, &oldk, &oldv), "elem_del", 208 - "failed to delete k/v %ld = %ld\n", 209 - (long)k, (long)v)) 211 + "failed to delete k/v %ld = %ld\n", k, v)) 210 212 goto cleanup; 211 213 if (CHECK(oldk != k || oldv != v, "elem_check", 212 214 "invalid old k/v: expect %ld = %ld, got %ld = %ld\n", 213 - (long)k, (long)v, (long)oldk, (long)oldv)) 215 + k, v, oldk, oldv)) 214 216 goto cleanup; 215 217 if (CHECK(hashmap__delete(map, k, &oldk, &oldv), "elem_del", 216 - "unexpectedly deleted k/v %ld = %ld\n", 217 - (long)k, (long)v)) 218 + "unexpectedly deleted k/v %ld = %ld\n", k, v)) 218 219 goto cleanup; 219 220 } 220 221 ··· 230 235 hashmap__for_each_entry(map, entry, bkt) { 231 236 CHECK(false, "elem_exists", 232 237 "unexpected map entries left: %ld = %ld\n", 233 - (long)entry->key, (long)entry->value); 238 + entry->key, entry->value); 234 239 goto cleanup; 235 240 } 236 241 ··· 238 243 hashmap__for_each_entry(map, entry, bkt) { 239 244 CHECK(false, "elem_exists", 240 245 "unexpected map entries left: %ld = %ld\n", 241 - (long)entry->key, (long)entry->value); 246 + entry->key, entry->value); 242 247 goto cleanup; 243 248 } 244 249 ··· 246 251 hashmap__free(map); 247 252 } 248 253 249 - static size_t collision_hash_fn(const void *k, void *ctx) 254 + static size_t str_hash_fn(long a, void *ctx) 255 + { 256 + return str_hash((char *)a); 257 + } 258 + 259 + static bool str_equal_fn(long a, long b, void *ctx) 260 + { 261 + return strcmp((char *)a, (char *)b) == 0; 262 + } 263 + 264 + /* Verify that hashmap interface works with pointer keys and values */ 265 + static void test_hashmap_ptr_iface(void) 266 + { 267 + const char *key, *value, *old_key, *old_value; 268 + struct hashmap_entry *cur; 269 + struct hashmap *map; 270 + int err, i, bkt; 271 + 272 + map = hashmap__new(str_hash_fn, str_equal_fn, NULL); 273 + if (CHECK(!map, "hashmap__new", "can't allocate hashmap\n")) 274 + goto cleanup; 275 + 276 + #define CHECK_STR(fn, var, expected) \ 277 + CHECK(strcmp(var, (expected)), (fn), \ 278 + "wrong value of " #var ": '%s' instead of '%s'\n", var, (expected)) 279 + 280 + err = hashmap__insert(map, "a", "apricot", HASHMAP_ADD, NULL, NULL); 281 + if (CHECK(err, "hashmap__insert", "unexpected error: %d\n", err)) 282 + goto cleanup; 283 + 284 + err = hashmap__insert(map, "a", "apple", HASHMAP_SET, &old_key, &old_value); 285 + if (CHECK(err, "hashmap__insert", "unexpected error: %d\n", err)) 286 + goto cleanup; 287 + CHECK_STR("hashmap__update", old_key, "a"); 288 + CHECK_STR("hashmap__update", old_value, "apricot"); 289 + 290 + err = hashmap__add(map, "b", "banana"); 291 + if (CHECK(err, "hashmap__add", "unexpected error: %d\n", err)) 292 + goto cleanup; 293 + 294 + err = hashmap__set(map, "b", "breadfruit", &old_key, &old_value); 295 + if (CHECK(err, "hashmap__set", "unexpected error: %d\n", err)) 296 + goto cleanup; 297 + CHECK_STR("hashmap__set", old_key, "b"); 298 + CHECK_STR("hashmap__set", old_value, "banana"); 299 + 300 + err = hashmap__update(map, "b", "blueberry", &old_key, &old_value); 301 + if (CHECK(err, "hashmap__update", "unexpected error: %d\n", err)) 302 + goto cleanup; 303 + CHECK_STR("hashmap__update", old_key, "b"); 304 + CHECK_STR("hashmap__update", old_value, "breadfruit"); 305 + 306 + err = hashmap__append(map, "c", "cherry"); 307 + if (CHECK(err, "hashmap__append", "unexpected error: %d\n", err)) 308 + goto cleanup; 309 + 310 + if (CHECK(!hashmap__delete(map, "c", &old_key, &old_value), 311 + "hashmap__delete", "expected to have entry for 'c'\n")) 312 + goto cleanup; 313 + CHECK_STR("hashmap__delete", old_key, "c"); 314 + CHECK_STR("hashmap__delete", old_value, "cherry"); 315 + 316 + CHECK(!hashmap__find(map, "b", &value), "hashmap__find", "can't find value for 'b'\n"); 317 + CHECK_STR("hashmap__find", value, "blueberry"); 318 + 319 + if (CHECK(!hashmap__delete(map, "b", NULL, NULL), 320 + "hashmap__delete", "expected to have entry for 'b'\n")) 321 + goto cleanup; 322 + 323 + i = 0; 324 + hashmap__for_each_entry(map, cur, bkt) { 325 + if (CHECK(i != 0, "hashmap__for_each_entry", "too many entries")) 326 + goto cleanup; 327 + key = cur->pkey; 328 + value = cur->pvalue; 329 + CHECK_STR("entry", key, "a"); 330 + CHECK_STR("entry", value, "apple"); 331 + i++; 332 + } 333 + #undef CHECK_STR 334 + 335 + cleanup: 336 + hashmap__free(map); 337 + } 338 + 339 + static size_t collision_hash_fn(long k, void *ctx) 250 340 { 251 341 return 0; 252 342 } 253 343 254 344 static void test_hashmap_multimap(void) 255 345 { 256 - void *k1 = (void *)0, *k2 = (void *)1; 346 + long k1 = 0, k2 = 1; 257 347 struct hashmap_entry *entry; 258 348 struct hashmap *map; 259 349 long found_msk; ··· 353 273 * [0] -> 1, 2, 4; 354 274 * [1] -> 8, 16, 32; 355 275 */ 356 - err = hashmap__append(map, k1, (void *)1); 276 + err = hashmap__append(map, k1, 1); 357 277 if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err)) 358 278 goto cleanup; 359 - err = hashmap__append(map, k1, (void *)2); 279 + err = hashmap__append(map, k1, 2); 360 280 if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err)) 361 281 goto cleanup; 362 - err = hashmap__append(map, k1, (void *)4); 282 + err = hashmap__append(map, k1, 4); 363 283 if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err)) 364 284 goto cleanup; 365 285 366 - err = hashmap__append(map, k2, (void *)8); 286 + err = hashmap__append(map, k2, 8); 367 287 if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err)) 368 288 goto cleanup; 369 - err = hashmap__append(map, k2, (void *)16); 289 + err = hashmap__append(map, k2, 16); 370 290 if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err)) 371 291 goto cleanup; 372 - err = hashmap__append(map, k2, (void *)32); 292 + err = hashmap__append(map, k2, 32); 373 293 if (CHECK(err, "elem_add", "failed to add k/v: %d\n", err)) 374 294 goto cleanup; 375 295 ··· 380 300 /* verify global iteration still works and sees all values */ 381 301 found_msk = 0; 382 302 hashmap__for_each_entry(map, entry, bkt) { 383 - found_msk |= (long)entry->value; 303 + found_msk |= entry->value; 384 304 } 385 305 if (CHECK(found_msk != (1 << 6) - 1, "found_msk", 386 306 "not all keys iterated: %lx\n", found_msk)) ··· 389 309 /* iterate values for key 1 */ 390 310 found_msk = 0; 391 311 hashmap__for_each_key_entry(map, entry, k1) { 392 - found_msk |= (long)entry->value; 312 + found_msk |= entry->value; 393 313 } 394 314 if (CHECK(found_msk != (1 | 2 | 4), "found_msk", 395 315 "invalid k1 values: %lx\n", found_msk)) ··· 398 318 /* iterate values for key 2 */ 399 319 found_msk = 0; 400 320 hashmap__for_each_key_entry(map, entry, k2) { 401 - found_msk |= (long)entry->value; 321 + found_msk |= entry->value; 402 322 } 403 323 if (CHECK(found_msk != (8 | 16 | 32), "found_msk", 404 324 "invalid k2 values: %lx\n", found_msk)) ··· 413 333 struct hashmap_entry *entry; 414 334 int bkt; 415 335 struct hashmap *map; 416 - void *k = (void *)0; 336 + long k = 0; 417 337 418 338 /* force collisions */ 419 339 map = hashmap__new(hash_fn, equal_fn, NULL); ··· 454 374 test_hashmap_multimap(); 455 375 if (test__start_subtest("empty")) 456 376 test_hashmap_empty(); 377 + if (test__start_subtest("ptr_iface")) 378 + test_hashmap_ptr_iface(); 457 379 }
+3 -3
tools/testing/selftests/bpf/prog_tests/kprobe_multi_test.c
··· 312 312 return (__u64) t.tv_sec * 1000000000 + t.tv_nsec; 313 313 } 314 314 315 - static size_t symbol_hash(const void *key, void *ctx __maybe_unused) 315 + static size_t symbol_hash(long key, void *ctx __maybe_unused) 316 316 { 317 317 return str_hash((const char *) key); 318 318 } 319 319 320 - static bool symbol_equal(const void *key1, const void *key2, void *ctx __maybe_unused) 320 + static bool symbol_equal(long key1, long key2, void *ctx __maybe_unused) 321 321 { 322 322 return strcmp((const char *) key1, (const char *) key2) == 0; 323 323 } ··· 372 372 sizeof("__ftrace_invalid_address__") - 1)) 373 373 continue; 374 374 375 - err = hashmap__add(map, name, NULL); 375 + err = hashmap__add(map, name, 0); 376 376 if (err == -EEXIST) 377 377 continue; 378 378 if (err)