Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

radix tree test suite: add support for slab bulk APIs

Add support for kmem_cache_free_bulk() and kmem_cache_alloc_bulk() to the
radix tree test suite.

Link: https://lkml.kernel.org/r/20220906194824.2110408-6-Liam.Howlett@oracle.com
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
Tested-by: Yu Zhao <yuzhao@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: SeongJae Park <sj@kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Liam R. Howlett and committed by
Andrew Morton
cc86e0c2 000a4493

+120 -2
+4
tools/include/linux/slab.h
··· 41 41 unsigned int align, unsigned int flags, 42 42 void (*ctor)(void *)); 43 43 44 + void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list); 45 + int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size, 46 + void **list); 47 + 44 48 #endif /* _TOOLS_SLAB_H */
+116 -2
tools/testing/radix-tree/linux.c
··· 93 93 return p; 94 94 } 95 95 96 - void kmem_cache_free(struct kmem_cache *cachep, void *objp) 96 + void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp) 97 97 { 98 98 assert(objp); 99 99 uatomic_dec(&nr_allocated); 100 100 uatomic_dec(&cachep->nr_allocated); 101 101 if (kmalloc_verbose) 102 102 printf("Freeing %p to slab\n", objp); 103 - pthread_mutex_lock(&cachep->lock); 104 103 if (cachep->nr_objs > 10 || cachep->align) { 105 104 memset(objp, POISON_FREE, cachep->size); 106 105 free(objp); ··· 109 110 node->parent = cachep->objs; 110 111 cachep->objs = node; 111 112 } 113 + } 114 + 115 + void kmem_cache_free(struct kmem_cache *cachep, void *objp) 116 + { 117 + pthread_mutex_lock(&cachep->lock); 118 + kmem_cache_free_locked(cachep, objp); 112 119 pthread_mutex_unlock(&cachep->lock); 120 + } 121 + 122 + void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list) 123 + { 124 + if (kmalloc_verbose) 125 + pr_debug("Bulk free %p[0-%lu]\n", list, size - 1); 126 + 127 + pthread_mutex_lock(&cachep->lock); 128 + for (int i = 0; i < size; i++) 129 + kmem_cache_free_locked(cachep, list[i]); 130 + pthread_mutex_unlock(&cachep->lock); 131 + } 132 + 133 + int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size, 134 + void **p) 135 + { 136 + size_t i; 137 + 138 + if (kmalloc_verbose) 139 + pr_debug("Bulk alloc %lu\n", size); 140 + 141 + if (!(gfp & __GFP_DIRECT_RECLAIM)) { 142 + if (cachep->non_kernel < size) 143 + return 0; 144 + 145 + cachep->non_kernel -= size; 146 + } 147 + 148 + pthread_mutex_lock(&cachep->lock); 149 + if (cachep->nr_objs >= size) { 150 + struct radix_tree_node *node; 151 + 152 + for (i = 0; i < size; i++) { 153 + node = cachep->objs; 154 + cachep->nr_objs--; 155 + cachep->objs = node->parent; 156 + p[i] = node; 157 + node->parent = NULL; 158 + } 159 + pthread_mutex_unlock(&cachep->lock); 160 + } else { 161 + pthread_mutex_unlock(&cachep->lock); 162 + for (i = 0; i < size; i++) { 163 + if (cachep->align) { 164 + posix_memalign(&p[i], cachep->align, 165 + cachep->size * size); 166 + } else { 167 + p[i] = malloc(cachep->size * size); 168 + } 169 + if (cachep->ctor) 170 + cachep->ctor(p[i]); 171 + else if (gfp & __GFP_ZERO) 172 + memset(p[i], 0, cachep->size); 173 + } 174 + } 175 + 176 + for (i = 0; i < size; i++) { 177 + uatomic_inc(&nr_allocated); 178 + uatomic_inc(&cachep->nr_allocated); 179 + uatomic_inc(&cachep->nr_tallocated); 180 + if (kmalloc_verbose) 181 + printf("Allocating %p from slab\n", p[i]); 182 + } 183 + 184 + return size; 113 185 } 114 186 115 187 struct kmem_cache * ··· 199 129 ret->ctor = ctor; 200 130 ret->non_kernel = 0; 201 131 return ret; 132 + } 133 + 134 + /* 135 + * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts. 136 + */ 137 + void test_kmem_cache_bulk(void) 138 + { 139 + int i; 140 + void *list[12]; 141 + static struct kmem_cache *test_cache, *test_cache2; 142 + 143 + /* 144 + * Testing the bulk allocators without aligned kmem_cache to force the 145 + * bulk alloc/free to reuse 146 + */ 147 + test_cache = kmem_cache_create("test_cache", 256, 0, SLAB_PANIC, NULL); 148 + 149 + for (i = 0; i < 5; i++) 150 + list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM); 151 + 152 + for (i = 0; i < 5; i++) 153 + kmem_cache_free(test_cache, list[i]); 154 + assert(test_cache->nr_objs == 5); 155 + 156 + kmem_cache_alloc_bulk(test_cache, __GFP_DIRECT_RECLAIM, 5, list); 157 + kmem_cache_free_bulk(test_cache, 5, list); 158 + 159 + for (i = 0; i < 12 ; i++) 160 + list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM); 161 + 162 + for (i = 0; i < 12; i++) 163 + kmem_cache_free(test_cache, list[i]); 164 + 165 + /* The last free will not be kept around */ 166 + assert(test_cache->nr_objs == 11); 167 + 168 + /* Aligned caches will immediately free */ 169 + test_cache2 = kmem_cache_create("test_cache2", 128, 128, SLAB_PANIC, NULL); 170 + 171 + kmem_cache_alloc_bulk(test_cache2, __GFP_DIRECT_RECLAIM, 10, list); 172 + kmem_cache_free_bulk(test_cache2, 10, list); 173 + assert(!test_cache2->nr_objs); 174 + 175 + 202 176 }