+5
-4
mm/slab.c
+5
-4
mm/slab.c
···
3604
3604
* Release an obj back to its cache. If the obj has a constructed state, it must
3605
3605
* be in this state _before_ it is released. Called with disabled ints.
3606
3606
*/
3607
-
static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3607
+
static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3608
+
void *caller)
3608
3609
{
3609
3610
struct array_cache *ac = cpu_cache_get(cachep);
3610
3611
3611
3612
check_irq_off();
3612
3613
kmemleak_free_recursive(objp, cachep->flags);
3613
-
objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
3614
+
objp = cache_free_debugcheck(cachep, objp, caller);
3614
3615
3615
3616
kmemcheck_slab_free(cachep, objp, obj_size(cachep));
3616
3617
···
3802
3801
debug_check_no_locks_freed(objp, obj_size(cachep));
3803
3802
if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3804
3803
debug_check_no_obj_freed(objp, obj_size(cachep));
3805
-
__cache_free(cachep, objp);
3804
+
__cache_free(cachep, objp, __builtin_return_address(0));
3806
3805
local_irq_restore(flags);
3807
3806
3808
3807
trace_kmem_cache_free(_RET_IP_, objp);
···
3832
3831
c = virt_to_cache(objp);
3833
3832
debug_check_no_locks_freed(objp, obj_size(c));
3834
3833
debug_check_no_obj_freed(objp, obj_size(c));
3835
-
__cache_free(c, (void *)objp);
3834
+
__cache_free(c, (void *)objp, __builtin_return_address(0));
3836
3835
local_irq_restore(flags);
3837
3836
}
3838
3837
EXPORT_SYMBOL(kfree);