Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rhashtable: plumb through alloc tag

This gives better memory allocation profiling results; rhashtable
allocations will be accounted to the code that initialized the rhashtable.

[surenb@google.com: undo _noprof additions in the documentation]
Link: https://lkml.kernel.org/r/20240326231453.1206227-1-surenb@google.com
Link: https://lkml.kernel.org/r/20240321163705.3067592-32-surenb@google.com
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Tested-by: Kees Cook <keescook@chromium.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Alex Gaynor <alex.gaynor@gmail.com>
Cc: Alice Ryhl <aliceryhl@google.com>
Cc: Andreas Hindborg <a.hindborg@samsung.com>
Cc: Benno Lossin <benno.lossin@proton.me>
Cc: "Björn Roy Baron" <bjorn3_gh@protonmail.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Gary Guo <gary@garyguo.net>
Cc: Miguel Ojeda <ojeda@kernel.org>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wedson Almeida Filho <wedsonaf@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Kent Overstreet and committed by
Andrew Morton
9e54dd8b 88ae5fb7

+26 -10
+3
include/linux/alloc_tag.h
··· 152 152 ref->ct = NULL; 153 153 } 154 154 155 + #define alloc_tag_record(p) ((p) = current->alloc_tag) 156 + 155 157 #else /* CONFIG_MEM_ALLOC_PROFILING */ 156 158 157 159 #define DEFINE_ALLOC_TAG(_alloc_tag) ··· 161 159 static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, 162 160 size_t bytes) {} 163 161 static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {} 162 + #define alloc_tag_record(p) do {} while (0) 164 163 165 164 #endif /* CONFIG_MEM_ALLOC_PROFILING */ 166 165
+9 -2
include/linux/rhashtable-types.h
··· 9 9 #ifndef _LINUX_RHASHTABLE_TYPES_H 10 10 #define _LINUX_RHASHTABLE_TYPES_H 11 11 12 + #include <linux/alloc_tag.h> 12 13 #include <linux/atomic.h> 13 14 #include <linux/compiler.h> 14 15 #include <linux/mutex.h> ··· 89 88 struct mutex mutex; 90 89 spinlock_t lock; 91 90 atomic_t nelems; 91 + #ifdef CONFIG_MEM_ALLOC_PROFILING 92 + struct alloc_tag *alloc_tag; 93 + #endif 92 94 }; 93 95 94 96 /** ··· 131 127 bool end_of_table; 132 128 }; 133 129 134 - int rhashtable_init(struct rhashtable *ht, 130 + int rhashtable_init_noprof(struct rhashtable *ht, 135 131 const struct rhashtable_params *params); 136 - int rhltable_init(struct rhltable *hlt, 132 + #define rhashtable_init(...) alloc_hooks(rhashtable_init_noprof(__VA_ARGS__)) 133 + 134 + int rhltable_init_noprof(struct rhltable *hlt, 137 135 const struct rhashtable_params *params); 136 + #define rhltable_init(...) alloc_hooks(rhltable_init_noprof(__VA_ARGS__)) 138 137 139 138 #endif /* _LINUX_RHASHTABLE_TYPES_H */
+14 -8
lib/rhashtable.c
··· 130 130 if (ntbl) 131 131 return ntbl; 132 132 133 - ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC); 133 + ntbl = alloc_hooks_tag(ht->alloc_tag, 134 + kmalloc_noprof(PAGE_SIZE, GFP_ATOMIC|__GFP_ZERO)); 134 135 135 136 if (ntbl && leaf) { 136 137 for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++) ··· 158 157 159 158 size = sizeof(*tbl) + sizeof(tbl->buckets[0]); 160 159 161 - tbl = kzalloc(size, gfp); 160 + tbl = alloc_hooks_tag(ht->alloc_tag, 161 + kmalloc_noprof(size, gfp|__GFP_ZERO)); 162 162 if (!tbl) 163 163 return NULL; 164 164 ··· 183 181 int i; 184 182 static struct lock_class_key __key; 185 183 186 - tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp); 184 + tbl = alloc_hooks_tag(ht->alloc_tag, 185 + kvmalloc_node_noprof(struct_size(tbl, buckets, nbuckets), 186 + gfp|__GFP_ZERO, NUMA_NO_NODE)); 187 187 188 188 size = nbuckets; 189 189 ··· 1020 1016 * .obj_hashfn = my_hash_fn, 1021 1017 * }; 1022 1018 */ 1023 - int rhashtable_init(struct rhashtable *ht, 1019 + int rhashtable_init_noprof(struct rhashtable *ht, 1024 1020 const struct rhashtable_params *params) 1025 1021 { 1026 1022 struct bucket_table *tbl; ··· 1034 1030 mutex_init(&ht->mutex); 1035 1031 spin_lock_init(&ht->lock); 1036 1032 memcpy(&ht->p, params, sizeof(*params)); 1033 + 1034 + alloc_tag_record(ht->alloc_tag); 1037 1035 1038 1036 if (params->min_size) 1039 1037 ht->p.min_size = roundup_pow_of_two(params->min_size); ··· 1082 1076 1083 1077 return 0; 1084 1078 } 1085 - EXPORT_SYMBOL_GPL(rhashtable_init); 1079 + EXPORT_SYMBOL_GPL(rhashtable_init_noprof); 1086 1080 1087 1081 /** 1088 1082 * rhltable_init - initialize a new hash list table ··· 1093 1087 * 1094 1088 * See documentation for rhashtable_init. 1095 1089 */ 1096 - int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params) 1090 + int rhltable_init_noprof(struct rhltable *hlt, const struct rhashtable_params *params) 1097 1091 { 1098 1092 int err; 1099 1093 1100 - err = rhashtable_init(&hlt->ht, params); 1094 + err = rhashtable_init_noprof(&hlt->ht, params); 1101 1095 hlt->ht.rhlist = true; 1102 1096 return err; 1103 1097 } 1104 - EXPORT_SYMBOL_GPL(rhltable_init); 1098 + EXPORT_SYMBOL_GPL(rhltable_init_noprof); 1105 1099 1106 1100 static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj, 1107 1101 void (*free_fn)(void *ptr, void *arg),