Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

radix tree test suite: align kmem_cache_alloc_bulk() with kernel behavior.

When kmem_cache_alloc_bulk() fails to allocate, leave the freed pointers
in the array. This enables a more accurate simulation of the kernel's
behavior and allows for testing potential double-free scenarios.

Link: https://lkml.kernel.org/r/20231027033845.90608-5-zhangpeng.00@bytedance.com
Signed-off-by: Peng Zhang <zhangpeng.00@bytedance.com>
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Mateusz Guzik <mjguzik@gmail.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Mike Christie <michael.christie@oracle.com>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Suren Baghdasaryan <surenb@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Peng Zhang and committed by
Andrew Morton
46c99e26 fd32e4e9

+33 -12
+33 -12
tools/testing/radix-tree/linux.c
··· 93 93 return p; 94 94 } 95 95 96 - void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp) 96 + void __kmem_cache_free_locked(struct kmem_cache *cachep, void *objp) 97 97 { 98 98 assert(objp); 99 - uatomic_dec(&nr_allocated); 100 - uatomic_dec(&cachep->nr_allocated); 101 - if (kmalloc_verbose) 102 - printf("Freeing %p to slab\n", objp); 103 99 if (cachep->nr_objs > 10 || cachep->align) { 104 100 memset(objp, POISON_FREE, cachep->size); 105 101 free(objp); ··· 105 109 node->parent = cachep->objs; 106 110 cachep->objs = node; 107 111 } 112 + } 113 + 114 + void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp) 115 + { 116 + uatomic_dec(&nr_allocated); 117 + uatomic_dec(&cachep->nr_allocated); 118 + if (kmalloc_verbose) 119 + printf("Freeing %p to slab\n", objp); 120 + __kmem_cache_free_locked(cachep, objp); 108 121 } 109 122 110 123 void kmem_cache_free(struct kmem_cache *cachep, void *objp) ··· 146 141 if (kmalloc_verbose) 147 142 pr_debug("Bulk alloc %lu\n", size); 148 143 149 - if (!(gfp & __GFP_DIRECT_RECLAIM)) { 150 - if (cachep->non_kernel < size) 151 - return 0; 152 - 153 - cachep->non_kernel -= size; 154 - } 155 - 156 144 pthread_mutex_lock(&cachep->lock); 157 145 if (cachep->nr_objs >= size) { 158 146 struct radix_tree_node *node; 159 147 160 148 for (i = 0; i < size; i++) { 149 + if (!(gfp & __GFP_DIRECT_RECLAIM)) { 150 + if (!cachep->non_kernel) 151 + break; 152 + cachep->non_kernel--; 153 + } 154 + 161 155 node = cachep->objs; 162 156 cachep->nr_objs--; 163 157 cachep->objs = node->parent; ··· 167 163 } else { 168 164 pthread_mutex_unlock(&cachep->lock); 169 165 for (i = 0; i < size; i++) { 166 + if (!(gfp & __GFP_DIRECT_RECLAIM)) { 167 + if (!cachep->non_kernel) 168 + break; 169 + cachep->non_kernel--; 170 + } 171 + 170 172 if (cachep->align) { 171 173 posix_memalign(&p[i], cachep->align, 172 174 cachep->size); 173 175 } else { 174 176 p[i] = malloc(cachep->size); 177 + if (!p[i]) 178 + break; 175 179 } 176 180 if (cachep->ctor) 177 181 cachep->ctor(p[i]); 178 182 else if (gfp & __GFP_ZERO) 179 183 memset(p[i], 0, cachep->size); 180 184 } 185 + } 186 + 187 + if (i < size) { 188 + size = i; 189 + pthread_mutex_lock(&cachep->lock); 190 + for (i = 0; i < size; i++) 191 + __kmem_cache_free_locked(cachep, p[i]); 192 + pthread_mutex_unlock(&cachep->lock); 193 + return 0; 181 194 } 182 195 183 196 for (i = 0; i < size; i++) {