Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: change inlined allocation helpers to account at the call site

Main goal of memory allocation profiling patchset is to provide accounting
that is cheap enough to run in production. To achieve that we inject
counters using codetags at the allocation call sites to account every time
allocation is made. This injection allows us to perform accounting
efficiently because injected counters are immediately available as opposed
to the alternative methods, such as using _RET_IP_, which would require
counter lookup and appropriate locking that makes accounting much more
expensive. This method requires all allocation functions to inject
separate counters at their call sites so that their callers can be
individually accounted. Counter injection is implemented by allocation
hooks which should wrap all allocation functions.

Inlined functions which perform allocations but do not use allocation
hooks are directly charged for the allocations they perform. In most
cases these functions are just specialized allocation wrappers used from
multiple places to allocate objects of a specific type. It would be more
useful to do the accounting at their call sites instead. Instrument these
helpers to do accounting at the call site. Simple inlined allocation
wrappers are converted directly into macros. More complex allocators or
allocators with documentation are converted into _noprof versions and
allocation hooks are added. This allows memory allocation profiling
mechanism to charge allocations to the callers of these functions.

Link: https://lkml.kernel.org/r/20240415020731.1152108-1-surenb@google.com
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Acked-by: Jan Kara <jack@suse.cz> [jbd2]
Cc: Anna Schumaker <anna@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Benjamin Tissoires <benjamin.tissoires@redhat.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Dennis Zhou <dennis@kernel.org>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Jakub Kicinski <kuba@kernel.org>
Cc: Jakub Sitnicki <jakub@cloudflare.com>
Cc: Jiri Kosina <jikos@kernel.org>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Kent Overstreet <kent.overstreet@linux.dev>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Paolo Abeni <pabeni@redhat.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Tejun Heo <tj@kernel.org>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Trond Myklebust <trond.myklebust@hammerspace.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Suren Baghdasaryan and committed by
Andrew Morton
2c321f3f ebdf9ad4

+142 -143
+3 -2
drivers/iommu/amd/amd_iommu.h
··· 134 134 return PCI_SEG_DEVID_TO_SBDF(seg, devid); 135 135 } 136 136 137 - static inline void *alloc_pgtable_page(int nid, gfp_t gfp) 137 + static inline void *alloc_pgtable_page_noprof(int nid, gfp_t gfp) 138 138 { 139 139 struct page *page; 140 140 141 - page = alloc_pages_node(nid, gfp | __GFP_ZERO, 0); 141 + page = alloc_pages_node_noprof(nid, gfp | __GFP_ZERO, 0); 142 142 return page ? page_address(page) : NULL; 143 143 } 144 + #define alloc_pgtable_page(...) alloc_hooks(alloc_pgtable_page_noprof(__VA_ARGS__)) 144 145 145 146 /* 146 147 * This must be called after device probe completes. During probe
+1 -4
fs/nfs/iostat.h
··· 46 46 nfs_add_server_stats(NFS_SERVER(inode), stat, addend); 47 47 } 48 48 49 - static inline struct nfs_iostats __percpu *nfs_alloc_iostats(void) 50 - { 51 - return alloc_percpu(struct nfs_iostats); 52 - } 49 + #define nfs_alloc_iostats() alloc_percpu(struct nfs_iostats) 53 50 54 51 static inline void nfs_free_iostats(struct nfs_iostats __percpu *stats) 55 52 {
+6 -13
include/acpi/platform/aclinuxex.h
··· 47 47 * However, boot has (system_state != SYSTEM_RUNNING) 48 48 * to quiet __might_sleep() in kmalloc() and resume does not. 49 49 */ 50 - static inline void *acpi_os_allocate(acpi_size size) 51 - { 52 - return kmalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL); 53 - } 50 + #define acpi_os_allocate(_size) \ 51 + kmalloc(_size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL) 54 52 55 - static inline void *acpi_os_allocate_zeroed(acpi_size size) 56 - { 57 - return kzalloc(size, irqs_disabled()? GFP_ATOMIC : GFP_KERNEL); 58 - } 53 + #define acpi_os_allocate_zeroed(_size) \ 54 + kzalloc(_size, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL) 59 55 60 56 static inline void acpi_os_free(void *memory) 61 57 { 62 58 kfree(memory); 63 59 } 64 60 65 - static inline void *acpi_os_acquire_object(acpi_cache_t * cache) 66 - { 67 - return kmem_cache_zalloc(cache, 68 - irqs_disabled()? GFP_ATOMIC : GFP_KERNEL); 69 - } 61 + #define acpi_os_acquire_object(_cache) \ 62 + kmem_cache_zalloc(_cache, irqs_disabled() ? GFP_ATOMIC : GFP_KERNEL) 70 63 71 64 static inline acpi_thread_id acpi_os_get_thread_id(void) 72 65 {
+21 -14
include/asm-generic/pgalloc.h
··· 16 16 * 17 17 * Return: pointer to the allocated memory or %NULL on error 18 18 */ 19 - static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm) 19 + static inline pte_t *__pte_alloc_one_kernel_noprof(struct mm_struct *mm) 20 20 { 21 - struct ptdesc *ptdesc = pagetable_alloc(GFP_PGTABLE_KERNEL & 21 + struct ptdesc *ptdesc = pagetable_alloc_noprof(GFP_PGTABLE_KERNEL & 22 22 ~__GFP_HIGHMEM, 0); 23 23 24 24 if (!ptdesc) 25 25 return NULL; 26 26 return ptdesc_address(ptdesc); 27 27 } 28 + #define __pte_alloc_one_kernel(...) alloc_hooks(__pte_alloc_one_kernel_noprof(__VA_ARGS__)) 28 29 29 30 #ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL 30 31 /** ··· 34 33 * 35 34 * Return: pointer to the allocated memory or %NULL on error 36 35 */ 37 - static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) 36 + static inline pte_t *pte_alloc_one_kernel_noprof(struct mm_struct *mm) 38 37 { 39 - return __pte_alloc_one_kernel(mm); 38 + return __pte_alloc_one_kernel_noprof(mm); 40 39 } 40 + #define pte_alloc_one_kernel(...) alloc_hooks(pte_alloc_one_kernel_noprof(__VA_ARGS__)) 41 41 #endif 42 42 43 43 /** ··· 63 61 * 64 62 * Return: `struct page` referencing the ptdesc or %NULL on error 65 63 */ 66 - static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp) 64 + static inline pgtable_t __pte_alloc_one_noprof(struct mm_struct *mm, gfp_t gfp) 67 65 { 68 66 struct ptdesc *ptdesc; 69 67 70 - ptdesc = pagetable_alloc(gfp, 0); 68 + ptdesc = pagetable_alloc_noprof(gfp, 0); 71 69 if (!ptdesc) 72 70 return NULL; 73 71 if (!pagetable_pte_ctor(ptdesc)) { ··· 77 75 78 76 return ptdesc_page(ptdesc); 79 77 } 78 + #define __pte_alloc_one(...) alloc_hooks(__pte_alloc_one_noprof(__VA_ARGS__)) 80 79 81 80 #ifndef __HAVE_ARCH_PTE_ALLOC_ONE 82 81 /** ··· 88 85 * 89 86 * Return: `struct page` referencing the ptdesc or %NULL on error 90 87 */ 91 - static inline pgtable_t pte_alloc_one(struct mm_struct *mm) 88 + static inline pgtable_t pte_alloc_one_noprof(struct mm_struct *mm) 92 89 { 93 - return __pte_alloc_one(mm, GFP_PGTABLE_USER); 90 + return __pte_alloc_one_noprof(mm, GFP_PGTABLE_USER); 94 91 } 92 + #define pte_alloc_one(...) alloc_hooks(pte_alloc_one_noprof(__VA_ARGS__)) 95 93 #endif 96 94 97 95 /* ··· 128 124 * 129 125 * Return: pointer to the allocated memory or %NULL on error 130 126 */ 131 - static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 127 + static inline pmd_t *pmd_alloc_one_noprof(struct mm_struct *mm, unsigned long addr) 132 128 { 133 129 struct ptdesc *ptdesc; 134 130 gfp_t gfp = GFP_PGTABLE_USER; 135 131 136 132 if (mm == &init_mm) 137 133 gfp = GFP_PGTABLE_KERNEL; 138 - ptdesc = pagetable_alloc(gfp, 0); 134 + ptdesc = pagetable_alloc_noprof(gfp, 0); 139 135 if (!ptdesc) 140 136 return NULL; 141 137 if (!pagetable_pmd_ctor(ptdesc)) { ··· 144 140 } 145 141 return ptdesc_address(ptdesc); 146 142 } 143 + #define pmd_alloc_one(...) alloc_hooks(pmd_alloc_one_noprof(__VA_ARGS__)) 147 144 #endif 148 145 149 146 #ifndef __HAVE_ARCH_PMD_FREE ··· 162 157 163 158 #if CONFIG_PGTABLE_LEVELS > 3 164 159 165 - static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr) 160 + static inline pud_t *__pud_alloc_one_noprof(struct mm_struct *mm, unsigned long addr) 166 161 { 167 162 gfp_t gfp = GFP_PGTABLE_USER; 168 163 struct ptdesc *ptdesc; ··· 171 166 gfp = GFP_PGTABLE_KERNEL; 172 167 gfp &= ~__GFP_HIGHMEM; 173 168 174 - ptdesc = pagetable_alloc(gfp, 0); 169 + ptdesc = pagetable_alloc_noprof(gfp, 0); 175 170 if (!ptdesc) 176 171 return NULL; 177 172 178 173 pagetable_pud_ctor(ptdesc); 179 174 return ptdesc_address(ptdesc); 180 175 } 176 + #define __pud_alloc_one(...) alloc_hooks(__pud_alloc_one_noprof(__VA_ARGS__)) 181 177 182 178 #ifndef __HAVE_ARCH_PUD_ALLOC_ONE 183 179 /** ··· 190 184 * 191 185 * Return: pointer to the allocated memory or %NULL on error 192 186 */ 193 - static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 187 + static inline pud_t *pud_alloc_one_noprof(struct mm_struct *mm, unsigned long addr) 194 188 { 195 - return __pud_alloc_one(mm, addr); 189 + return __pud_alloc_one_noprof(mm, addr); 196 190 } 191 + #define pud_alloc_one(...) alloc_hooks(pud_alloc_one_noprof(__VA_ARGS__)) 197 192 #endif 198 193 199 194 static inline void __pud_free(struct mm_struct *mm, pud_t *pud)
+4 -3
include/crypto/hash.h
··· 578 578 * 579 579 * Return: allocated request handle in case of success, or NULL if out of memory 580 580 */ 581 - static inline struct ahash_request *ahash_request_alloc( 581 + static inline struct ahash_request *ahash_request_alloc_noprof( 582 582 struct crypto_ahash *tfm, gfp_t gfp) 583 583 { 584 584 struct ahash_request *req; 585 585 586 - req = kmalloc(sizeof(struct ahash_request) + 587 - crypto_ahash_reqsize(tfm), gfp); 586 + req = kmalloc_noprof(sizeof(struct ahash_request) + 587 + crypto_ahash_reqsize(tfm), gfp); 588 588 589 589 if (likely(req)) 590 590 ahash_request_set_tfm(req, tfm); 591 591 592 592 return req; 593 593 } 594 + #define ahash_request_alloc(...) alloc_hooks(ahash_request_alloc_noprof(__VA_ARGS__)) 594 595 595 596 /** 596 597 * ahash_request_free() - zeroize and free the request data structure
+3 -2
include/crypto/internal/acompress.h
··· 69 69 crypto_request_complete(&req->base, err); 70 70 } 71 71 72 - static inline struct acomp_req *__acomp_request_alloc(struct crypto_acomp *tfm) 72 + static inline struct acomp_req *__acomp_request_alloc_noprof(struct crypto_acomp *tfm) 73 73 { 74 74 struct acomp_req *req; 75 75 76 - req = kzalloc(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL); 76 + req = kzalloc_noprof(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL); 77 77 if (likely(req)) 78 78 acomp_request_set_tfm(req, tfm); 79 79 return req; 80 80 } 81 + #define __acomp_request_alloc(...) alloc_hooks(__acomp_request_alloc_noprof(__VA_ARGS__)) 81 82 82 83 static inline void __acomp_request_free(struct acomp_req *req) 83 84 {
+4 -3
include/crypto/skcipher.h
··· 861 861 * 862 862 * Return: allocated request handle in case of success, or NULL if out of memory 863 863 */ 864 - static inline struct skcipher_request *skcipher_request_alloc( 864 + static inline struct skcipher_request *skcipher_request_alloc_noprof( 865 865 struct crypto_skcipher *tfm, gfp_t gfp) 866 866 { 867 867 struct skcipher_request *req; 868 868 869 - req = kmalloc(sizeof(struct skcipher_request) + 870 - crypto_skcipher_reqsize(tfm), gfp); 869 + req = kmalloc_noprof(sizeof(struct skcipher_request) + 870 + crypto_skcipher_reqsize(tfm), gfp); 871 871 872 872 if (likely(req)) 873 873 skcipher_request_set_tfm(req, tfm); 874 874 875 875 return req; 876 876 } 877 + #define skcipher_request_alloc(...) alloc_hooks(skcipher_request_alloc_noprof(__VA_ARGS__)) 877 878 878 879 /** 879 880 * skcipher_request_free() - zeroize and free request data structure
+8 -25
include/linux/bpf.h
··· 2244 2244 void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, 2245 2245 size_t align, gfp_t flags); 2246 2246 #else 2247 - static inline void * 2248 - bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, 2249 - int node) 2250 - { 2251 - return kmalloc_node(size, flags, node); 2252 - } 2253 - 2254 - static inline void * 2255 - bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags) 2256 - { 2257 - return kzalloc(size, flags); 2258 - } 2259 - 2260 - static inline void * 2261 - bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, gfp_t flags) 2262 - { 2263 - return kvcalloc(n, size, flags); 2264 - } 2265 - 2266 - static inline void __percpu * 2267 - bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, size_t align, 2268 - gfp_t flags) 2269 - { 2270 - return __alloc_percpu_gfp(size, align, flags); 2271 - } 2247 + #define bpf_map_kmalloc_node(_map, _size, _flags, _node) \ 2248 + kmalloc_node(_size, _flags, _node) 2249 + #define bpf_map_kzalloc(_map, _size, _flags) \ 2250 + kzalloc(_size, _flags) 2251 + #define bpf_map_kvcalloc(_map, _n, _size, _flags) \ 2252 + kvcalloc(_n, _size, _flags) 2253 + #define bpf_map_alloc_percpu(_map, _size, _align, _flags) \ 2254 + __alloc_percpu_gfp(_size, _align, _flags) 2272 2255 #endif 2273 2256 2274 2257 static inline int
+3 -2
include/linux/bpfptr.h
··· 65 65 return copy_to_sockptr_offset((sockptr_t) dst, offset, src, size); 66 66 } 67 67 68 - static inline void *kvmemdup_bpfptr(bpfptr_t src, size_t len) 68 + static inline void *kvmemdup_bpfptr_noprof(bpfptr_t src, size_t len) 69 69 { 70 - void *p = kvmalloc(len, GFP_USER | __GFP_NOWARN); 70 + void *p = kvmalloc_noprof(len, GFP_USER | __GFP_NOWARN); 71 71 72 72 if (!p) 73 73 return ERR_PTR(-ENOMEM); ··· 77 77 } 78 78 return p; 79 79 } 80 + #define kvmemdup_bpfptr(...) alloc_hooks(kvmemdup_bpfptr_noprof(__VA_ARGS__)) 80 81 81 82 static inline long strncpy_from_bpfptr(char *dst, bpfptr_t src, size_t count) 82 83 {
+2 -4
include/linux/dma-fence-chain.h
··· 86 86 * 87 87 * Returns a new struct dma_fence_chain object or NULL on failure. 88 88 */ 89 - static inline struct dma_fence_chain *dma_fence_chain_alloc(void) 90 - { 91 - return kmalloc(sizeof(struct dma_fence_chain), GFP_KERNEL); 92 - }; 89 + #define dma_fence_chain_alloc() \ 90 + ((struct dma_fence_chain *)kmalloc(sizeof(struct dma_fence_chain), GFP_KERNEL)) 93 91 94 92 /** 95 93 * dma_fence_chain_free
+2 -4
include/linux/hid_bpf.h
··· 149 149 static inline void hid_bpf_disconnect_device(struct hid_device *hdev) {} 150 150 static inline void hid_bpf_destroy_device(struct hid_device *hid) {} 151 151 static inline void hid_bpf_device_init(struct hid_device *hid) {} 152 - static inline u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size) 153 - { 154 - return kmemdup(rdesc, *size, GFP_KERNEL); 155 - } 152 + #define call_hid_bpf_rdesc_fixup(_hdev, _rdesc, _size) \ 153 + ((u8 *)kmemdup(_rdesc, *(_size), GFP_KERNEL)) 156 154 157 155 #endif /* CONFIG_HID_BPF */ 158 156
+4 -8
include/linux/jbd2.h
··· 1586 1586 */ 1587 1587 extern struct kmem_cache *jbd2_handle_cache; 1588 1588 1589 - static inline handle_t *jbd2_alloc_handle(gfp_t gfp_flags) 1590 - { 1591 - return kmem_cache_zalloc(jbd2_handle_cache, gfp_flags); 1592 - } 1589 + #define jbd2_alloc_handle(_gfp_flags) \ 1590 + ((handle_t *)kmem_cache_zalloc(jbd2_handle_cache, _gfp_flags)) 1593 1591 1594 1592 static inline void jbd2_free_handle(handle_t *handle) 1595 1593 { ··· 1600 1602 */ 1601 1603 extern struct kmem_cache *jbd2_inode_cache; 1602 1604 1603 - static inline struct jbd2_inode *jbd2_alloc_inode(gfp_t gfp_flags) 1604 - { 1605 - return kmem_cache_alloc(jbd2_inode_cache, gfp_flags); 1606 - } 1605 + #define jbd2_alloc_inode(_gfp_flags) \ 1606 + ((struct jbd2_inode *)kmem_cache_alloc(jbd2_inode_cache, _gfp_flags)) 1607 1607 1608 1608 static inline void jbd2_free_inode(struct jbd2_inode *jinode) 1609 1609 {
+3 -2
include/linux/mm.h
··· 2860 2860 * 2861 2861 * Return: The ptdesc describing the allocated page tables. 2862 2862 */ 2863 - static inline struct ptdesc *pagetable_alloc(gfp_t gfp, unsigned int order) 2863 + static inline struct ptdesc *pagetable_alloc_noprof(gfp_t gfp, unsigned int order) 2864 2864 { 2865 - struct page *page = alloc_pages(gfp | __GFP_COMP, order); 2865 + struct page *page = alloc_pages_noprof(gfp | __GFP_COMP, order); 2866 2866 2867 2867 return page_ptdesc(page); 2868 2868 } 2869 + #define pagetable_alloc(...) alloc_hooks(pagetable_alloc_noprof(__VA_ARGS__)) 2869 2870 2870 2871 /** 2871 2872 * pagetable_free - Free pagetables
+3 -2
include/linux/mm_types.h
··· 1170 1170 cpumask_clear(mm_cidmask(mm)); 1171 1171 } 1172 1172 1173 - static inline int mm_alloc_cid(struct mm_struct *mm) 1173 + static inline int mm_alloc_cid_noprof(struct mm_struct *mm) 1174 1174 { 1175 - mm->pcpu_cid = alloc_percpu(struct mm_cid); 1175 + mm->pcpu_cid = alloc_percpu_noprof(struct mm_cid); 1176 1176 if (!mm->pcpu_cid) 1177 1177 return -ENOMEM; 1178 1178 mm_init_cid(mm); 1179 1179 return 0; 1180 1180 } 1181 + #define mm_alloc_cid(...) alloc_hooks(mm_alloc_cid_noprof(__VA_ARGS__)) 1181 1182 1182 1183 static inline void mm_destroy_cid(struct mm_struct *mm) 1183 1184 {
+3
include/linux/percpu.h
··· 151 151 #define alloc_percpu(type) \ 152 152 (typeof(type) __percpu *)__alloc_percpu(sizeof(type), \ 153 153 __alignof__(type)) 154 + #define alloc_percpu_noprof(type) \ 155 + ((typeof(type) __percpu *)pcpu_alloc_noprof(sizeof(type), \ 156 + __alignof__(type), false, GFP_KERNEL)) 154 157 155 158 extern void free_percpu(void __percpu *__pdata); 156 159
+16 -12
include/linux/ptr_ring.h
··· 464 464 /* Not all gfp_t flags (besides GFP_KERNEL) are allowed. See 465 465 * documentation for vmalloc for which of them are legal. 466 466 */ 467 - static inline void **__ptr_ring_init_queue_alloc(unsigned int size, gfp_t gfp) 467 + static inline void **__ptr_ring_init_queue_alloc_noprof(unsigned int size, gfp_t gfp) 468 468 { 469 469 if (size > KMALLOC_MAX_SIZE / sizeof(void *)) 470 470 return NULL; 471 - return kvmalloc_array(size, sizeof(void *), gfp | __GFP_ZERO); 471 + return kvmalloc_array_noprof(size, sizeof(void *), gfp | __GFP_ZERO); 472 472 } 473 473 474 474 static inline void __ptr_ring_set_size(struct ptr_ring *r, int size) ··· 484 484 r->batch = 1; 485 485 } 486 486 487 - static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp) 487 + static inline int ptr_ring_init_noprof(struct ptr_ring *r, int size, gfp_t gfp) 488 488 { 489 - r->queue = __ptr_ring_init_queue_alloc(size, gfp); 489 + r->queue = __ptr_ring_init_queue_alloc_noprof(size, gfp); 490 490 if (!r->queue) 491 491 return -ENOMEM; 492 492 ··· 497 497 498 498 return 0; 499 499 } 500 + #define ptr_ring_init(...) alloc_hooks(ptr_ring_init_noprof(__VA_ARGS__)) 500 501 501 502 /* 502 503 * Return entries into ring. Destroy entries that don't fit. ··· 588 587 * In particular if you consume ring in interrupt or BH context, you must 589 588 * disable interrupts/BH when doing so. 590 589 */ 591 - static inline int ptr_ring_resize(struct ptr_ring *r, int size, gfp_t gfp, 590 + static inline int ptr_ring_resize_noprof(struct ptr_ring *r, int size, gfp_t gfp, 592 591 void (*destroy)(void *)) 593 592 { 594 593 unsigned long flags; 595 - void **queue = __ptr_ring_init_queue_alloc(size, gfp); 594 + void **queue = __ptr_ring_init_queue_alloc_noprof(size, gfp); 596 595 void **old; 597 596 598 597 if (!queue) ··· 610 609 611 610 return 0; 612 611 } 612 + #define ptr_ring_resize(...) alloc_hooks(ptr_ring_resize_noprof(__VA_ARGS__)) 613 613 614 614 /* 615 615 * Note: producer lock is nested within consumer lock, so if you ··· 618 616 * In particular if you consume ring in interrupt or BH context, you must 619 617 * disable interrupts/BH when doing so. 620 618 */ 621 - static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, 622 - unsigned int nrings, 623 - int size, 624 - gfp_t gfp, void (*destroy)(void *)) 619 + static inline int ptr_ring_resize_multiple_noprof(struct ptr_ring **rings, 620 + unsigned int nrings, 621 + int size, 622 + gfp_t gfp, void (*destroy)(void *)) 625 623 { 626 624 unsigned long flags; 627 625 void ***queues; 628 626 int i; 629 627 630 - queues = kmalloc_array(nrings, sizeof(*queues), gfp); 628 + queues = kmalloc_array_noprof(nrings, sizeof(*queues), gfp); 631 629 if (!queues) 632 630 goto noqueues; 633 631 634 632 for (i = 0; i < nrings; ++i) { 635 - queues[i] = __ptr_ring_init_queue_alloc(size, gfp); 633 + queues[i] = __ptr_ring_init_queue_alloc_noprof(size, gfp); 636 634 if (!queues[i]) 637 635 goto nomem; 638 636 } ··· 662 660 noqueues: 663 661 return -ENOMEM; 664 662 } 663 + #define ptr_ring_resize_multiple(...) \ 664 + alloc_hooks(ptr_ring_resize_multiple_noprof(__VA_ARGS__)) 665 665 666 666 static inline void ptr_ring_cleanup(struct ptr_ring *r, void (*destroy)(void *)) 667 667 {
+11 -8
include/linux/skb_array.h
··· 177 177 return PTR_RING_PEEK_CALL_ANY(&a->ring, __skb_array_len_with_tag); 178 178 } 179 179 180 - static inline int skb_array_init(struct skb_array *a, int size, gfp_t gfp) 180 + static inline int skb_array_init_noprof(struct skb_array *a, int size, gfp_t gfp) 181 181 { 182 - return ptr_ring_init(&a->ring, size, gfp); 182 + return ptr_ring_init_noprof(&a->ring, size, gfp); 183 183 } 184 + #define skb_array_init(...) alloc_hooks(skb_array_init_noprof(__VA_ARGS__)) 184 185 185 186 static void __skb_array_destroy_skb(void *ptr) 186 187 { ··· 199 198 return ptr_ring_resize(&a->ring, size, gfp, __skb_array_destroy_skb); 200 199 } 201 200 202 - static inline int skb_array_resize_multiple(struct skb_array **rings, 203 - int nrings, unsigned int size, 204 - gfp_t gfp) 201 + static inline int skb_array_resize_multiple_noprof(struct skb_array **rings, 202 + int nrings, unsigned int size, 203 + gfp_t gfp) 205 204 { 206 205 BUILD_BUG_ON(offsetof(struct skb_array, ring)); 207 - return ptr_ring_resize_multiple((struct ptr_ring **)rings, 208 - nrings, size, gfp, 209 - __skb_array_destroy_skb); 206 + return ptr_ring_resize_multiple_noprof((struct ptr_ring **)rings, 207 + nrings, size, gfp, 208 + __skb_array_destroy_skb); 210 209 } 210 + #define skb_array_resize_multiple(...) \ 211 + alloc_hooks(skb_array_resize_multiple_noprof(__VA_ARGS__)) 211 212 212 213 static inline void skb_array_cleanup(struct skb_array *a) 213 214 {
+8 -12
include/linux/skbuff.h
··· 3371 3371 * 3372 3372 * %NULL is returned if there is no free memory. 3373 3373 */ 3374 - static inline struct page *__dev_alloc_pages(gfp_t gfp_mask, 3374 + static inline struct page *__dev_alloc_pages_noprof(gfp_t gfp_mask, 3375 3375 unsigned int order) 3376 3376 { 3377 3377 /* This piece of code contains several assumptions. ··· 3384 3384 */ 3385 3385 gfp_mask |= __GFP_COMP | __GFP_MEMALLOC; 3386 3386 3387 - return alloc_pages_node(NUMA_NO_NODE, gfp_mask, order); 3387 + return alloc_pages_node_noprof(NUMA_NO_NODE, gfp_mask, order); 3388 3388 } 3389 + #define __dev_alloc_pages(...) alloc_hooks(__dev_alloc_pages_noprof(__VA_ARGS__)) 3389 3390 3390 - static inline struct page *dev_alloc_pages(unsigned int order) 3391 - { 3392 - return __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, order); 3393 - } 3391 + #define dev_alloc_pages(_order) __dev_alloc_pages(GFP_ATOMIC | __GFP_NOWARN, _order) 3394 3392 3395 3393 /** 3396 3394 * __dev_alloc_page - allocate a page for network Rx ··· 3398 3400 * 3399 3401 * %NULL is returned if there is no free memory. 3400 3402 */ 3401 - static inline struct page *__dev_alloc_page(gfp_t gfp_mask) 3403 + static inline struct page *__dev_alloc_page_noprof(gfp_t gfp_mask) 3402 3404 { 3403 - return __dev_alloc_pages(gfp_mask, 0); 3405 + return __dev_alloc_pages_noprof(gfp_mask, 0); 3404 3406 } 3407 + #define __dev_alloc_page(...) alloc_hooks(__dev_alloc_page_noprof(__VA_ARGS__)) 3405 3408 3406 - static inline struct page *dev_alloc_page(void) 3407 - { 3408 - return dev_alloc_pages(0); 3409 - } 3409 + #define dev_alloc_page() dev_alloc_pages(0) 3410 3410 3411 3411 /** 3412 3412 * dev_page_is_reusable - check whether a page can be reused for network Rx
+3 -5
include/linux/skmsg.h
··· 410 410 int sk_psock_msg_verdict(struct sock *sk, struct sk_psock *psock, 411 411 struct sk_msg *msg); 412 412 413 - static inline struct sk_psock_link *sk_psock_init_link(void) 414 - { 415 - return kzalloc(sizeof(struct sk_psock_link), 416 - GFP_ATOMIC | __GFP_NOWARN); 417 - } 413 + #define sk_psock_init_link() \ 414 + ((struct sk_psock_link *)kzalloc(sizeof(struct sk_psock_link), \ 415 + GFP_ATOMIC | __GFP_NOWARN)) 418 416 419 417 static inline void sk_psock_free_link(struct sk_psock_link *link) 420 418 {
+5
include/linux/slab.h
··· 744 744 */ 745 745 #define kmalloc_track_caller(...) kmalloc_node_track_caller(__VA_ARGS__, NUMA_NO_NODE) 746 746 747 + #define kmalloc_track_caller_noprof(...) \ 748 + kmalloc_node_track_caller_noprof(__VA_ARGS__, NUMA_NO_NODE, _RET_IP_) 749 + 747 750 static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, 748 751 int node) 749 752 { ··· 784 781 #define kvmalloc_node(...) alloc_hooks(kvmalloc_node_noprof(__VA_ARGS__)) 785 782 786 783 #define kvmalloc(_size, _flags) kvmalloc_node(_size, _flags, NUMA_NO_NODE) 784 + #define kvmalloc_noprof(_size, _flags) kvmalloc_node_noprof(_size, _flags, NUMA_NO_NODE) 787 785 #define kvzalloc(_size, _flags) kvmalloc(_size, _flags|__GFP_ZERO) 788 786 789 787 #define kvzalloc_node(_size, _flags, _node) kvmalloc_node(_size, _flags|__GFP_ZERO, _node) ··· 801 797 802 798 #define kvmalloc_array(...) alloc_hooks(kvmalloc_array_noprof(__VA_ARGS__)) 803 799 #define kvcalloc(_n, _size, _flags) kvmalloc_array(_n, _size, _flags|__GFP_ZERO) 800 + #define kvcalloc_noprof(_n, _size, _flags) kvmalloc_array_noprof(_n, _size, _flags|__GFP_ZERO) 804 801 805 802 extern void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flags) 806 803 __realloc_size(3);
+6 -4
include/linux/sockptr.h
··· 117 117 return copy_to_sockptr_offset(dst, 0, src, size); 118 118 } 119 119 120 - static inline void *memdup_sockptr(sockptr_t src, size_t len) 120 + static inline void *memdup_sockptr_noprof(sockptr_t src, size_t len) 121 121 { 122 - void *p = kmalloc_track_caller(len, GFP_USER | __GFP_NOWARN); 122 + void *p = kmalloc_track_caller_noprof(len, GFP_USER | __GFP_NOWARN); 123 123 124 124 if (!p) 125 125 return ERR_PTR(-ENOMEM); ··· 129 129 } 130 130 return p; 131 131 } 132 + #define memdup_sockptr(...) alloc_hooks(memdup_sockptr_noprof(__VA_ARGS__)) 132 133 133 - static inline void *memdup_sockptr_nul(sockptr_t src, size_t len) 134 + static inline void *memdup_sockptr_nul_noprof(sockptr_t src, size_t len) 134 135 { 135 - char *p = kmalloc_track_caller(len + 1, GFP_KERNEL); 136 + char *p = kmalloc_track_caller_noprof(len + 1, GFP_KERNEL); 136 137 137 138 if (!p) 138 139 return ERR_PTR(-ENOMEM); ··· 144 143 p[len] = '\0'; 145 144 return p; 146 145 } 146 + #define memdup_sockptr_nul(...) alloc_hooks(memdup_sockptr_nul_noprof(__VA_ARGS__)) 147 147 148 148 static inline long strncpy_from_sockptr(char *dst, sockptr_t src, size_t count) 149 149 {
+10 -6
include/net/netlabel.h
··· 274 274 * on success, NULL on failure. 275 275 * 276 276 */ 277 - static inline struct netlbl_lsm_cache *netlbl_secattr_cache_alloc(gfp_t flags) 277 + static inline struct netlbl_lsm_cache *netlbl_secattr_cache_alloc_noprof(gfp_t flags) 278 278 { 279 279 struct netlbl_lsm_cache *cache; 280 280 281 - cache = kzalloc(sizeof(*cache), flags); 281 + cache = kzalloc_noprof(sizeof(*cache), flags); 282 282 if (cache) 283 283 refcount_set(&cache->refcount, 1); 284 284 return cache; 285 285 } 286 + #define netlbl_secattr_cache_alloc(...) \ 287 + alloc_hooks(netlbl_secattr_cache_alloc_noprof(__VA_ARGS__)) 286 288 287 289 /** 288 290 * netlbl_secattr_cache_free - Frees a netlbl_lsm_cache struct ··· 313 311 * on failure. 314 312 * 315 313 */ 316 - static inline struct netlbl_lsm_catmap *netlbl_catmap_alloc(gfp_t flags) 314 + static inline struct netlbl_lsm_catmap *netlbl_catmap_alloc_noprof(gfp_t flags) 317 315 { 318 - return kzalloc(sizeof(struct netlbl_lsm_catmap), flags); 316 + return kzalloc_noprof(sizeof(struct netlbl_lsm_catmap), flags); 319 317 } 318 + #define netlbl_catmap_alloc(...) alloc_hooks(netlbl_catmap_alloc_noprof(__VA_ARGS__)) 320 319 321 320 /** 322 321 * netlbl_catmap_free - Free a LSM secattr catmap ··· 379 376 * pointer on success, or NULL on failure. 380 377 * 381 378 */ 382 - static inline struct netlbl_lsm_secattr *netlbl_secattr_alloc(gfp_t flags) 379 + static inline struct netlbl_lsm_secattr *netlbl_secattr_alloc_noprof(gfp_t flags) 383 380 { 384 - return kzalloc(sizeof(struct netlbl_lsm_secattr), flags); 381 + return kzalloc_noprof(sizeof(struct netlbl_lsm_secattr), flags); 385 382 } 383 + #define netlbl_secattr_alloc(...) alloc_hooks(netlbl_secattr_alloc_noprof(__VA_ARGS__)) 386 384 387 385 /** 388 386 * netlbl_secattr_free - Frees a netlbl_lsm_secattr struct
+3 -2
include/net/netlink.h
··· 1891 1891 * @src: netlink attribute to duplicate from 1892 1892 * @gfp: GFP mask 1893 1893 */ 1894 - static inline void *nla_memdup(const struct nlattr *src, gfp_t gfp) 1894 + static inline void *nla_memdup_noprof(const struct nlattr *src, gfp_t gfp) 1895 1895 { 1896 - return kmemdup(nla_data(src), nla_len(src), gfp); 1896 + return kmemdup_noprof(nla_data(src), nla_len(src), gfp); 1897 1897 } 1898 + #define nla_memdup(...) alloc_hooks(nla_memdup_noprof(__VA_ARGS__)) 1898 1899 1899 1900 /** 1900 1901 * nla_nest_start_noflag - Start a new level of nested attributes
+3 -2
include/net/request_sock.h
··· 127 127 } 128 128 129 129 static inline struct request_sock * 130 - reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener, 130 + reqsk_alloc_noprof(const struct request_sock_ops *ops, struct sock *sk_listener, 131 131 bool attach_listener) 132 132 { 133 133 struct request_sock *req; 134 134 135 - req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN); 135 + req = kmem_cache_alloc_noprof(ops->slab, GFP_ATOMIC | __GFP_NOWARN); 136 136 if (!req) 137 137 return NULL; 138 138 req->rsk_listener = NULL; ··· 157 157 158 158 return req; 159 159 } 160 + #define reqsk_alloc(...) alloc_hooks(reqsk_alloc_noprof(__VA_ARGS__)) 160 161 161 162 static inline void __reqsk_free(struct request_sock *req) 162 163 {
+3 -2
include/net/tcx.h
··· 75 75 return rcu_dereference_rtnl(dev->tcx_egress); 76 76 } 77 77 78 - static inline struct bpf_mprog_entry *tcx_entry_create(void) 78 + static inline struct bpf_mprog_entry *tcx_entry_create_noprof(void) 79 79 { 80 - struct tcx_entry *tcx = kzalloc(sizeof(*tcx), GFP_KERNEL); 80 + struct tcx_entry *tcx = kzalloc_noprof(sizeof(*tcx), GFP_KERNEL); 81 81 82 82 if (tcx) { 83 83 bpf_mprog_bundle_init(&tcx->bundle); ··· 85 85 } 86 86 return NULL; 87 87 } 88 + #define tcx_entry_create(...) alloc_hooks(tcx_entry_create_noprof(__VA_ARGS__)) 88 89 89 90 static inline void tcx_entry_free(struct bpf_mprog_entry *entry) 90 91 {
+4 -2
net/sunrpc/auth_gss/auth_gss_internal.h
··· 23 23 } 24 24 25 25 static inline const void * 26 - simple_get_netobj(const void *p, const void *end, struct xdr_netobj *dest) 26 + simple_get_netobj_noprof(const void *p, const void *end, struct xdr_netobj *dest) 27 27 { 28 28 const void *q; 29 29 unsigned int len; ··· 35 35 if (unlikely(q > end || q < p)) 36 36 return ERR_PTR(-EFAULT); 37 37 if (len) { 38 - dest->data = kmemdup(p, len, GFP_KERNEL); 38 + dest->data = kmemdup_noprof(p, len, GFP_KERNEL); 39 39 if (unlikely(dest->data == NULL)) 40 40 return ERR_PTR(-ENOMEM); 41 41 } else ··· 43 43 dest->len = len; 44 44 return q; 45 45 } 46 + 47 + #define simple_get_netobj(...) alloc_hooks(simple_get_netobj_noprof(__VA_ARGS__))