Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mempool: hook up to memory allocation profiling

This adds hooks to mempools for correctly annotating mempool-backed
allocations at the correct source line, so they show up correctly in
/sys/kernel/debug/allocations.

Various inline functions are converted to wrappers so that we can invoke
alloc_hooks() in fewer places.

[surenb@google.com: undo _noprof additions in the documentation]
Link: https://lkml.kernel.org/r/20240326231453.1206227-4-surenb@google.com
[surenb@google.com: add missing mempool_create_node documentation]
Link: https://lkml.kernel.org/r/20240402180835.1661905-1-surenb@google.com
Link: https://lkml.kernel.org/r/20240321163705.3067592-27-surenb@google.com
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Tested-by: Kees Cook <keescook@chromium.org>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Alex Gaynor <alex.gaynor@gmail.com>
Cc: Alice Ryhl <aliceryhl@google.com>
Cc: Andreas Hindborg <a.hindborg@samsung.com>
Cc: Benno Lossin <benno.lossin@proton.me>
Cc: "Björn Roy Baron" <bjorn3_gh@protonmail.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Gary Guo <gary@garyguo.net>
Cc: Miguel Ojeda <ojeda@kernel.org>
Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wedson Almeida Filho <wedsonaf@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Kent Overstreet and committed by
Andrew Morton
e26d8769 7bd230a2

+50 -59
+35 -38
include/linux/mempool.h
··· 5 5 #ifndef _LINUX_MEMPOOL_H 6 6 #define _LINUX_MEMPOOL_H 7 7 8 + #include <linux/sched.h> 9 + #include <linux/alloc_tag.h> 8 10 #include <linux/wait.h> 9 11 #include <linux/compiler.h> 10 12 ··· 41 39 int mempool_init_node(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, 42 40 mempool_free_t *free_fn, void *pool_data, 43 41 gfp_t gfp_mask, int node_id); 44 - int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, 42 + 43 + int mempool_init_noprof(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, 45 44 mempool_free_t *free_fn, void *pool_data); 45 + #define mempool_init(...) \ 46 + alloc_hooks(mempool_init_noprof(__VA_ARGS__)) 46 47 47 48 extern mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, 48 49 mempool_free_t *free_fn, void *pool_data); 49 - extern mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, 50 + 51 + extern mempool_t *mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_fn, 50 52 mempool_free_t *free_fn, void *pool_data, 51 53 gfp_t gfp_mask, int nid); 54 + #define mempool_create_node(...) \ 55 + alloc_hooks(mempool_create_node_noprof(__VA_ARGS__)) 56 + 57 + #define mempool_create(_min_nr, _alloc_fn, _free_fn, _pool_data) \ 58 + mempool_create_node(_min_nr, _alloc_fn, _free_fn, _pool_data, \ 59 + GFP_KERNEL, NUMA_NO_NODE) 52 60 53 61 extern int mempool_resize(mempool_t *pool, int new_min_nr); 54 62 extern void mempool_destroy(mempool_t *pool); 55 - extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc; 63 + 64 + extern void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask) __malloc; 65 + #define mempool_alloc(...) \ 66 + alloc_hooks(mempool_alloc_noprof(__VA_ARGS__)) 67 + 56 68 extern void *mempool_alloc_preallocated(mempool_t *pool) __malloc; 57 69 extern void mempool_free(void *element, mempool_t *pool); 58 70 ··· 78 62 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); 79 63 void mempool_free_slab(void *element, void *pool_data); 80 64 81 - static inline int 82 - mempool_init_slab_pool(mempool_t *pool, int min_nr, struct kmem_cache *kc) 83 - { 84 - return mempool_init(pool, min_nr, mempool_alloc_slab, 85 - mempool_free_slab, (void *) kc); 86 - } 87 - 88 - static inline mempool_t * 89 - mempool_create_slab_pool(int min_nr, struct kmem_cache *kc) 90 - { 91 - return mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab, 92 - (void *) kc); 93 - } 65 + #define mempool_init_slab_pool(_pool, _min_nr, _kc) \ 66 + mempool_init(_pool, (_min_nr), mempool_alloc_slab, mempool_free_slab, (void *)(_kc)) 67 + #define mempool_create_slab_pool(_min_nr, _kc) \ 68 + mempool_create((_min_nr), mempool_alloc_slab, mempool_free_slab, (void *)(_kc)) 94 69 95 70 /* 96 71 * a mempool_alloc_t and a mempool_free_t to kmalloc and kfree the ··· 90 83 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data); 91 84 void mempool_kfree(void *element, void *pool_data); 92 85 93 - static inline int mempool_init_kmalloc_pool(mempool_t *pool, int min_nr, size_t size) 94 - { 95 - return mempool_init(pool, min_nr, mempool_kmalloc, 96 - mempool_kfree, (void *) size); 97 - } 98 - 99 - static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size) 100 - { 101 - return mempool_create(min_nr, mempool_kmalloc, mempool_kfree, 102 - (void *) size); 103 - } 86 + #define mempool_init_kmalloc_pool(_pool, _min_nr, _size) \ 87 + mempool_init(_pool, (_min_nr), mempool_kmalloc, mempool_kfree, \ 88 + (void *)(unsigned long)(_size)) 89 + #define mempool_create_kmalloc_pool(_min_nr, _size) \ 90 + mempool_create((_min_nr), mempool_kmalloc, mempool_kfree, \ 91 + (void *)(unsigned long)(_size)) 104 92 105 93 void *mempool_kvmalloc(gfp_t gfp_mask, void *pool_data); 106 94 void mempool_kvfree(void *element, void *pool_data); ··· 117 115 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data); 118 116 void mempool_free_pages(void *element, void *pool_data); 119 117 120 - static inline int mempool_init_page_pool(mempool_t *pool, int min_nr, int order) 121 - { 122 - return mempool_init(pool, min_nr, mempool_alloc_pages, 123 - mempool_free_pages, (void *)(long)order); 124 - } 125 - 126 - static inline mempool_t *mempool_create_page_pool(int min_nr, int order) 127 - { 128 - return mempool_create(min_nr, mempool_alloc_pages, mempool_free_pages, 129 - (void *)(long)order); 130 - } 118 + #define mempool_init_page_pool(_pool, _min_nr, _order) \ 119 + mempool_init(_pool, (_min_nr), mempool_alloc_pages, \ 120 + mempool_free_pages, (void *)(long)(_order)) 121 + #define mempool_create_page_pool(_min_nr, _order) \ 122 + mempool_create((_min_nr), mempool_alloc_pages, \ 123 + mempool_free_pages, (void *)(long)(_order)) 131 124 132 125 #endif /* _LINUX_MEMPOOL_H */
+15 -21
mm/mempool.c
··· 240 240 * 241 241 * Return: %0 on success, negative error code otherwise. 242 242 */ 243 - int mempool_init(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, 244 - mempool_free_t *free_fn, void *pool_data) 243 + int mempool_init_noprof(mempool_t *pool, int min_nr, mempool_alloc_t *alloc_fn, 244 + mempool_free_t *free_fn, void *pool_data) 245 245 { 246 246 return mempool_init_node(pool, min_nr, alloc_fn, free_fn, 247 247 pool_data, GFP_KERNEL, NUMA_NO_NODE); 248 248 249 249 } 250 - EXPORT_SYMBOL(mempool_init); 250 + EXPORT_SYMBOL(mempool_init_noprof); 251 251 252 252 /** 253 - * mempool_create - create a memory pool 253 + * mempool_create_node - create a memory pool 254 254 * @min_nr: the minimum number of elements guaranteed to be 255 255 * allocated for this pool. 256 256 * @alloc_fn: user-defined element-allocation function. 257 257 * @free_fn: user-defined element-freeing function. 258 258 * @pool_data: optional private data available to the user-defined functions. 259 + * @gfp_mask: memory allocation flags 260 + * @node_id: numa node to allocate on 259 261 * 260 262 * this function creates and allocates a guaranteed size, preallocated 261 263 * memory pool. The pool can be used from the mempool_alloc() and mempool_free() ··· 267 265 * 268 266 * Return: pointer to the created memory pool object or %NULL on error. 269 267 */ 270 - mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, 271 - mempool_free_t *free_fn, void *pool_data) 272 - { 273 - return mempool_create_node(min_nr, alloc_fn, free_fn, pool_data, 274 - GFP_KERNEL, NUMA_NO_NODE); 275 - } 276 - EXPORT_SYMBOL(mempool_create); 277 - 278 - mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, 279 - mempool_free_t *free_fn, void *pool_data, 280 - gfp_t gfp_mask, int node_id) 268 + mempool_t *mempool_create_node_noprof(int min_nr, mempool_alloc_t *alloc_fn, 269 + mempool_free_t *free_fn, void *pool_data, 270 + gfp_t gfp_mask, int node_id) 281 271 { 282 272 mempool_t *pool; 283 273 ··· 285 291 286 292 return pool; 287 293 } 288 - EXPORT_SYMBOL(mempool_create_node); 294 + EXPORT_SYMBOL(mempool_create_node_noprof); 289 295 290 296 /** 291 297 * mempool_resize - resize an existing memory pool ··· 381 387 * 382 388 * Return: pointer to the allocated element or %NULL on error. 383 389 */ 384 - void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) 390 + void *mempool_alloc_noprof(mempool_t *pool, gfp_t gfp_mask) 385 391 { 386 392 void *element; 387 393 unsigned long flags; ··· 448 454 finish_wait(&pool->wait, &wait); 449 455 goto repeat_alloc; 450 456 } 451 - EXPORT_SYMBOL(mempool_alloc); 457 + EXPORT_SYMBOL(mempool_alloc_noprof); 452 458 453 459 /** 454 460 * mempool_alloc_preallocated - allocate an element from preallocated elements ··· 556 562 { 557 563 struct kmem_cache *mem = pool_data; 558 564 VM_BUG_ON(mem->ctor); 559 - return kmem_cache_alloc(mem, gfp_mask); 565 + return kmem_cache_alloc_noprof(mem, gfp_mask); 560 566 } 561 567 EXPORT_SYMBOL(mempool_alloc_slab); 562 568 ··· 574 580 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) 575 581 { 576 582 size_t size = (size_t)pool_data; 577 - return kmalloc(size, gfp_mask); 583 + return kmalloc_noprof(size, gfp_mask); 578 584 } 579 585 EXPORT_SYMBOL(mempool_kmalloc); 580 586 ··· 604 610 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data) 605 611 { 606 612 int order = (int)(long)pool_data; 607 - return alloc_pages(gfp_mask, order); 613 + return alloc_pages_noprof(gfp_mask, order); 608 614 } 609 615 EXPORT_SYMBOL(mempool_alloc_pages); 610 616