Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * SLUB: A slab allocator that limits cache line use instead of queuing
4 * objects in per cpu and per node lists.
5 *
6 * The allocator synchronizes using per slab locks or atomic operations
7 * and only uses a centralized lock to manage a pool of partial slabs.
8 *
9 * (C) 2007 SGI, Christoph Lameter
10 * (C) 2011 Linux Foundation, Christoph Lameter
11 */
12
13#include <linux/mm.h>
14#include <linux/swap.h> /* mm_account_reclaimed_pages() */
15#include <linux/module.h>
16#include <linux/bit_spinlock.h>
17#include <linux/interrupt.h>
18#include <linux/swab.h>
19#include <linux/bitops.h>
20#include <linux/slab.h>
21#include "slab.h"
22#include <linux/vmalloc.h>
23#include <linux/proc_fs.h>
24#include <linux/seq_file.h>
25#include <linux/kasan.h>
26#include <linux/kmsan.h>
27#include <linux/cpu.h>
28#include <linux/cpuset.h>
29#include <linux/mempolicy.h>
30#include <linux/ctype.h>
31#include <linux/stackdepot.h>
32#include <linux/debugobjects.h>
33#include <linux/kallsyms.h>
34#include <linux/kfence.h>
35#include <linux/memory.h>
36#include <linux/math64.h>
37#include <linux/fault-inject.h>
38#include <linux/kmemleak.h>
39#include <linux/stacktrace.h>
40#include <linux/prefetch.h>
41#include <linux/memcontrol.h>
42#include <linux/random.h>
43#include <kunit/test.h>
44#include <kunit/test-bug.h>
45#include <linux/sort.h>
46
47#include <linux/debugfs.h>
48#include <trace/events/kmem.h>
49
50#include "internal.h"
51
52/*
53 * Lock order:
54 * 1. slab_mutex (Global Mutex)
55 * 2. node->list_lock (Spinlock)
56 * 3. kmem_cache->cpu_slab->lock (Local lock)
57 * 4. slab_lock(slab) (Only on some arches)
58 * 5. object_map_lock (Only for debugging)
59 *
60 * slab_mutex
61 *
62 * The role of the slab_mutex is to protect the list of all the slabs
63 * and to synchronize major metadata changes to slab cache structures.
64 * Also synchronizes memory hotplug callbacks.
65 *
66 * slab_lock
67 *
68 * The slab_lock is a wrapper around the page lock, thus it is a bit
69 * spinlock.
70 *
71 * The slab_lock is only used on arches that do not have the ability
72 * to do a cmpxchg_double. It only protects:
73 *
74 * A. slab->freelist -> List of free objects in a slab
75 * B. slab->inuse -> Number of objects in use
76 * C. slab->objects -> Number of objects in slab
77 * D. slab->frozen -> frozen state
78 *
79 * Frozen slabs
80 *
81 * If a slab is frozen then it is exempt from list management. It is
82 * the cpu slab which is actively allocated from by the processor that
83 * froze it and it is not on any list. The processor that froze the
84 * slab is the one who can perform list operations on the slab. Other
85 * processors may put objects onto the freelist but the processor that
86 * froze the slab is the only one that can retrieve the objects from the
87 * slab's freelist.
88 *
89 * CPU partial slabs
90 *
91 * The partially empty slabs cached on the CPU partial list are used
92 * for performance reasons, which speeds up the allocation process.
93 * These slabs are not frozen, but are also exempt from list management,
94 * by clearing the PG_workingset flag when moving out of the node
95 * partial list. Please see __slab_free() for more details.
96 *
97 * To sum up, the current scheme is:
98 * - node partial slab: PG_Workingset && !frozen
99 * - cpu partial slab: !PG_Workingset && !frozen
100 * - cpu slab: !PG_Workingset && frozen
101 * - full slab: !PG_Workingset && !frozen
102 *
103 * list_lock
104 *
105 * The list_lock protects the partial and full list on each node and
106 * the partial slab counter. If taken then no new slabs may be added or
107 * removed from the lists nor make the number of partial slabs be modified.
108 * (Note that the total number of slabs is an atomic value that may be
109 * modified without taking the list lock).
110 *
111 * The list_lock is a centralized lock and thus we avoid taking it as
112 * much as possible. As long as SLUB does not have to handle partial
113 * slabs, operations can continue without any centralized lock. F.e.
114 * allocating a long series of objects that fill up slabs does not require
115 * the list lock.
116 *
117 * For debug caches, all allocations are forced to go through a list_lock
118 * protected region to serialize against concurrent validation.
119 *
120 * cpu_slab->lock local lock
121 *
122 * This locks protect slowpath manipulation of all kmem_cache_cpu fields
123 * except the stat counters. This is a percpu structure manipulated only by
124 * the local cpu, so the lock protects against being preempted or interrupted
125 * by an irq. Fast path operations rely on lockless operations instead.
126 *
127 * On PREEMPT_RT, the local lock neither disables interrupts nor preemption
128 * which means the lockless fastpath cannot be used as it might interfere with
129 * an in-progress slow path operations. In this case the local lock is always
130 * taken but it still utilizes the freelist for the common operations.
131 *
132 * lockless fastpaths
133 *
134 * The fast path allocation (slab_alloc_node()) and freeing (do_slab_free())
135 * are fully lockless when satisfied from the percpu slab (and when
136 * cmpxchg_double is possible to use, otherwise slab_lock is taken).
137 * They also don't disable preemption or migration or irqs. They rely on
138 * the transaction id (tid) field to detect being preempted or moved to
139 * another cpu.
140 *
141 * irq, preemption, migration considerations
142 *
143 * Interrupts are disabled as part of list_lock or local_lock operations, or
144 * around the slab_lock operation, in order to make the slab allocator safe
145 * to use in the context of an irq.
146 *
147 * In addition, preemption (or migration on PREEMPT_RT) is disabled in the
148 * allocation slowpath, bulk allocation, and put_cpu_partial(), so that the
149 * local cpu doesn't change in the process and e.g. the kmem_cache_cpu pointer
150 * doesn't have to be revalidated in each section protected by the local lock.
151 *
152 * SLUB assigns one slab for allocation to each processor.
153 * Allocations only occur from these slabs called cpu slabs.
154 *
155 * Slabs with free elements are kept on a partial list and during regular
156 * operations no list for full slabs is used. If an object in a full slab is
157 * freed then the slab will show up again on the partial lists.
158 * We track full slabs for debugging purposes though because otherwise we
159 * cannot scan all objects.
160 *
161 * Slabs are freed when they become empty. Teardown and setup is
162 * minimal so we rely on the page allocators per cpu caches for
163 * fast frees and allocs.
164 *
165 * slab->frozen The slab is frozen and exempt from list processing.
166 * This means that the slab is dedicated to a purpose
167 * such as satisfying allocations for a specific
168 * processor. Objects may be freed in the slab while
169 * it is frozen but slab_free will then skip the usual
170 * list operations. It is up to the processor holding
171 * the slab to integrate the slab into the slab lists
172 * when the slab is no longer needed.
173 *
174 * One use of this flag is to mark slabs that are
175 * used for allocations. Then such a slab becomes a cpu
176 * slab. The cpu slab may be equipped with an additional
177 * freelist that allows lockless access to
178 * free objects in addition to the regular freelist
179 * that requires the slab lock.
180 *
181 * SLAB_DEBUG_FLAGS Slab requires special handling due to debug
182 * options set. This moves slab handling out of
183 * the fast path and disables lockless freelists.
184 */
185
186/*
187 * We could simply use migrate_disable()/enable() but as long as it's a
188 * function call even on !PREEMPT_RT, use inline preempt_disable() there.
189 */
190#ifndef CONFIG_PREEMPT_RT
191#define slub_get_cpu_ptr(var) get_cpu_ptr(var)
192#define slub_put_cpu_ptr(var) put_cpu_ptr(var)
193#define USE_LOCKLESS_FAST_PATH() (true)
194#else
195#define slub_get_cpu_ptr(var) \
196({ \
197 migrate_disable(); \
198 this_cpu_ptr(var); \
199})
200#define slub_put_cpu_ptr(var) \
201do { \
202 (void)(var); \
203 migrate_enable(); \
204} while (0)
205#define USE_LOCKLESS_FAST_PATH() (false)
206#endif
207
208#ifndef CONFIG_SLUB_TINY
209#define __fastpath_inline __always_inline
210#else
211#define __fastpath_inline
212#endif
213
214#ifdef CONFIG_SLUB_DEBUG
215#ifdef CONFIG_SLUB_DEBUG_ON
216DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
217#else
218DEFINE_STATIC_KEY_FALSE(slub_debug_enabled);
219#endif
220#endif /* CONFIG_SLUB_DEBUG */
221
222#ifdef CONFIG_NUMA
223static DEFINE_STATIC_KEY_FALSE(strict_numa);
224#endif
225
226/* Structure holding parameters for get_partial() call chain */
227struct partial_context {
228 gfp_t flags;
229 unsigned int orig_size;
230 void *object;
231};
232
233static inline bool kmem_cache_debug(struct kmem_cache *s)
234{
235 return kmem_cache_debug_flags(s, SLAB_DEBUG_FLAGS);
236}
237
238void *fixup_red_left(struct kmem_cache *s, void *p)
239{
240 if (kmem_cache_debug_flags(s, SLAB_RED_ZONE))
241 p += s->red_left_pad;
242
243 return p;
244}
245
246static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
247{
248#ifdef CONFIG_SLUB_CPU_PARTIAL
249 return !kmem_cache_debug(s);
250#else
251 return false;
252#endif
253}
254
255/*
256 * Issues still to be resolved:
257 *
258 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
259 *
260 * - Variable sizing of the per node arrays
261 */
262
263/* Enable to log cmpxchg failures */
264#undef SLUB_DEBUG_CMPXCHG
265
266#ifndef CONFIG_SLUB_TINY
267/*
268 * Minimum number of partial slabs. These will be left on the partial
269 * lists even if they are empty. kmem_cache_shrink may reclaim them.
270 */
271#define MIN_PARTIAL 5
272
273/*
274 * Maximum number of desirable partial slabs.
275 * The existence of more partial slabs makes kmem_cache_shrink
276 * sort the partial list by the number of objects in use.
277 */
278#define MAX_PARTIAL 10
279#else
280#define MIN_PARTIAL 0
281#define MAX_PARTIAL 0
282#endif
283
284#define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
285 SLAB_POISON | SLAB_STORE_USER)
286
287/*
288 * These debug flags cannot use CMPXCHG because there might be consistency
289 * issues when checking or reading debug information
290 */
291#define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
292 SLAB_TRACE)
293
294
295/*
296 * Debugging flags that require metadata to be stored in the slab. These get
297 * disabled when slab_debug=O is used and a cache's min order increases with
298 * metadata.
299 */
300#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
301
302#define OO_SHIFT 16
303#define OO_MASK ((1 << OO_SHIFT) - 1)
304#define MAX_OBJS_PER_PAGE 32767 /* since slab.objects is u15 */
305
306/* Internal SLUB flags */
307/* Poison object */
308#define __OBJECT_POISON __SLAB_FLAG_BIT(_SLAB_OBJECT_POISON)
309/* Use cmpxchg_double */
310
311#ifdef system_has_freelist_aba
312#define __CMPXCHG_DOUBLE __SLAB_FLAG_BIT(_SLAB_CMPXCHG_DOUBLE)
313#else
314#define __CMPXCHG_DOUBLE __SLAB_FLAG_UNUSED
315#endif
316
317/*
318 * Tracking user of a slab.
319 */
320#define TRACK_ADDRS_COUNT 16
321struct track {
322 unsigned long addr; /* Called from address */
323#ifdef CONFIG_STACKDEPOT
324 depot_stack_handle_t handle;
325#endif
326 int cpu; /* Was running on cpu */
327 int pid; /* Pid context */
328 unsigned long when; /* When did the operation occur */
329};
330
331enum track_item { TRACK_ALLOC, TRACK_FREE };
332
333#ifdef SLAB_SUPPORTS_SYSFS
334static int sysfs_slab_add(struct kmem_cache *);
335static int sysfs_slab_alias(struct kmem_cache *, const char *);
336#else
337static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
338static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
339 { return 0; }
340#endif
341
342#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
343static void debugfs_slab_add(struct kmem_cache *);
344#else
345static inline void debugfs_slab_add(struct kmem_cache *s) { }
346#endif
347
348enum stat_item {
349 ALLOC_FASTPATH, /* Allocation from cpu slab */
350 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
351 FREE_FASTPATH, /* Free to cpu slab */
352 FREE_SLOWPATH, /* Freeing not to cpu slab */
353 FREE_FROZEN, /* Freeing to frozen slab */
354 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
355 FREE_REMOVE_PARTIAL, /* Freeing removes last object */
356 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
357 ALLOC_SLAB, /* Cpu slab acquired from page allocator */
358 ALLOC_REFILL, /* Refill cpu slab from slab freelist */
359 ALLOC_NODE_MISMATCH, /* Switching cpu slab */
360 FREE_SLAB, /* Slab freed to the page allocator */
361 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
362 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
363 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
364 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
365 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
366 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
367 DEACTIVATE_BYPASS, /* Implicit deactivation */
368 ORDER_FALLBACK, /* Number of times fallback was necessary */
369 CMPXCHG_DOUBLE_CPU_FAIL,/* Failures of this_cpu_cmpxchg_double */
370 CMPXCHG_DOUBLE_FAIL, /* Failures of slab freelist update */
371 CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
372 CPU_PARTIAL_FREE, /* Refill cpu partial on free */
373 CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
374 CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
375 NR_SLUB_STAT_ITEMS
376};
377
378#ifndef CONFIG_SLUB_TINY
379/*
380 * When changing the layout, make sure freelist and tid are still compatible
381 * with this_cpu_cmpxchg_double() alignment requirements.
382 */
383struct kmem_cache_cpu {
384 union {
385 struct {
386 void **freelist; /* Pointer to next available object */
387 unsigned long tid; /* Globally unique transaction id */
388 };
389 freelist_aba_t freelist_tid;
390 };
391 struct slab *slab; /* The slab from which we are allocating */
392#ifdef CONFIG_SLUB_CPU_PARTIAL
393 struct slab *partial; /* Partially allocated slabs */
394#endif
395 local_lock_t lock; /* Protects the fields above */
396#ifdef CONFIG_SLUB_STATS
397 unsigned int stat[NR_SLUB_STAT_ITEMS];
398#endif
399};
400#endif /* CONFIG_SLUB_TINY */
401
402static inline void stat(const struct kmem_cache *s, enum stat_item si)
403{
404#ifdef CONFIG_SLUB_STATS
405 /*
406 * The rmw is racy on a preemptible kernel but this is acceptable, so
407 * avoid this_cpu_add()'s irq-disable overhead.
408 */
409 raw_cpu_inc(s->cpu_slab->stat[si]);
410#endif
411}
412
413static inline
414void stat_add(const struct kmem_cache *s, enum stat_item si, int v)
415{
416#ifdef CONFIG_SLUB_STATS
417 raw_cpu_add(s->cpu_slab->stat[si], v);
418#endif
419}
420
421/*
422 * The slab lists for all objects.
423 */
424struct kmem_cache_node {
425 spinlock_t list_lock;
426 unsigned long nr_partial;
427 struct list_head partial;
428#ifdef CONFIG_SLUB_DEBUG
429 atomic_long_t nr_slabs;
430 atomic_long_t total_objects;
431 struct list_head full;
432#endif
433};
434
435static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
436{
437 return s->node[node];
438}
439
440/*
441 * Iterator over all nodes. The body will be executed for each node that has
442 * a kmem_cache_node structure allocated (which is true for all online nodes)
443 */
444#define for_each_kmem_cache_node(__s, __node, __n) \
445 for (__node = 0; __node < nr_node_ids; __node++) \
446 if ((__n = get_node(__s, __node)))
447
448/*
449 * Tracks for which NUMA nodes we have kmem_cache_nodes allocated.
450 * Corresponds to node_state[N_NORMAL_MEMORY], but can temporarily
451 * differ during memory hotplug/hotremove operations.
452 * Protected by slab_mutex.
453 */
454static nodemask_t slab_nodes;
455
456#ifndef CONFIG_SLUB_TINY
457/*
458 * Workqueue used for flush_cpu_slab().
459 */
460static struct workqueue_struct *flushwq;
461#endif
462
463/********************************************************************
464 * Core slab cache functions
465 *******************************************************************/
466
467/*
468 * Returns freelist pointer (ptr). With hardening, this is obfuscated
469 * with an XOR of the address where the pointer is held and a per-cache
470 * random number.
471 */
472static inline freeptr_t freelist_ptr_encode(const struct kmem_cache *s,
473 void *ptr, unsigned long ptr_addr)
474{
475 unsigned long encoded;
476
477#ifdef CONFIG_SLAB_FREELIST_HARDENED
478 encoded = (unsigned long)ptr ^ s->random ^ swab(ptr_addr);
479#else
480 encoded = (unsigned long)ptr;
481#endif
482 return (freeptr_t){.v = encoded};
483}
484
485static inline void *freelist_ptr_decode(const struct kmem_cache *s,
486 freeptr_t ptr, unsigned long ptr_addr)
487{
488 void *decoded;
489
490#ifdef CONFIG_SLAB_FREELIST_HARDENED
491 decoded = (void *)(ptr.v ^ s->random ^ swab(ptr_addr));
492#else
493 decoded = (void *)ptr.v;
494#endif
495 return decoded;
496}
497
498static inline void *get_freepointer(struct kmem_cache *s, void *object)
499{
500 unsigned long ptr_addr;
501 freeptr_t p;
502
503 object = kasan_reset_tag(object);
504 ptr_addr = (unsigned long)object + s->offset;
505 p = *(freeptr_t *)(ptr_addr);
506 return freelist_ptr_decode(s, p, ptr_addr);
507}
508
509#ifndef CONFIG_SLUB_TINY
510static void prefetch_freepointer(const struct kmem_cache *s, void *object)
511{
512 prefetchw(object + s->offset);
513}
514#endif
515
516/*
517 * When running under KMSAN, get_freepointer_safe() may return an uninitialized
518 * pointer value in the case the current thread loses the race for the next
519 * memory chunk in the freelist. In that case this_cpu_cmpxchg_double() in
520 * slab_alloc_node() will fail, so the uninitialized value won't be used, but
521 * KMSAN will still check all arguments of cmpxchg because of imperfect
522 * handling of inline assembly.
523 * To work around this problem, we apply __no_kmsan_checks to ensure that
524 * get_freepointer_safe() returns initialized memory.
525 */
526__no_kmsan_checks
527static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
528{
529 unsigned long freepointer_addr;
530 freeptr_t p;
531
532 if (!debug_pagealloc_enabled_static())
533 return get_freepointer(s, object);
534
535 object = kasan_reset_tag(object);
536 freepointer_addr = (unsigned long)object + s->offset;
537 copy_from_kernel_nofault(&p, (freeptr_t *)freepointer_addr, sizeof(p));
538 return freelist_ptr_decode(s, p, freepointer_addr);
539}
540
541static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
542{
543 unsigned long freeptr_addr = (unsigned long)object + s->offset;
544
545#ifdef CONFIG_SLAB_FREELIST_HARDENED
546 BUG_ON(object == fp); /* naive detection of double free or corruption */
547#endif
548
549 freeptr_addr = (unsigned long)kasan_reset_tag((void *)freeptr_addr);
550 *(freeptr_t *)freeptr_addr = freelist_ptr_encode(s, fp, freeptr_addr);
551}
552
553/*
554 * See comment in calculate_sizes().
555 */
556static inline bool freeptr_outside_object(struct kmem_cache *s)
557{
558 return s->offset >= s->inuse;
559}
560
561/*
562 * Return offset of the end of info block which is inuse + free pointer if
563 * not overlapping with object.
564 */
565static inline unsigned int get_info_end(struct kmem_cache *s)
566{
567 if (freeptr_outside_object(s))
568 return s->inuse + sizeof(void *);
569 else
570 return s->inuse;
571}
572
573/* Loop over all objects in a slab */
574#define for_each_object(__p, __s, __addr, __objects) \
575 for (__p = fixup_red_left(__s, __addr); \
576 __p < (__addr) + (__objects) * (__s)->size; \
577 __p += (__s)->size)
578
579static inline unsigned int order_objects(unsigned int order, unsigned int size)
580{
581 return ((unsigned int)PAGE_SIZE << order) / size;
582}
583
584static inline struct kmem_cache_order_objects oo_make(unsigned int order,
585 unsigned int size)
586{
587 struct kmem_cache_order_objects x = {
588 (order << OO_SHIFT) + order_objects(order, size)
589 };
590
591 return x;
592}
593
594static inline unsigned int oo_order(struct kmem_cache_order_objects x)
595{
596 return x.x >> OO_SHIFT;
597}
598
599static inline unsigned int oo_objects(struct kmem_cache_order_objects x)
600{
601 return x.x & OO_MASK;
602}
603
604#ifdef CONFIG_SLUB_CPU_PARTIAL
605static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
606{
607 unsigned int nr_slabs;
608
609 s->cpu_partial = nr_objects;
610
611 /*
612 * We take the number of objects but actually limit the number of
613 * slabs on the per cpu partial list, in order to limit excessive
614 * growth of the list. For simplicity we assume that the slabs will
615 * be half-full.
616 */
617 nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo));
618 s->cpu_partial_slabs = nr_slabs;
619}
620
621static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s)
622{
623 return s->cpu_partial_slabs;
624}
625#else
626static inline void
627slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
628{
629}
630
631static inline unsigned int slub_get_cpu_partial(struct kmem_cache *s)
632{
633 return 0;
634}
635#endif /* CONFIG_SLUB_CPU_PARTIAL */
636
637/*
638 * Per slab locking using the pagelock
639 */
640static __always_inline void slab_lock(struct slab *slab)
641{
642 bit_spin_lock(PG_locked, &slab->__page_flags);
643}
644
645static __always_inline void slab_unlock(struct slab *slab)
646{
647 bit_spin_unlock(PG_locked, &slab->__page_flags);
648}
649
650static inline bool
651__update_freelist_fast(struct slab *slab,
652 void *freelist_old, unsigned long counters_old,
653 void *freelist_new, unsigned long counters_new)
654{
655#ifdef system_has_freelist_aba
656 freelist_aba_t old = { .freelist = freelist_old, .counter = counters_old };
657 freelist_aba_t new = { .freelist = freelist_new, .counter = counters_new };
658
659 return try_cmpxchg_freelist(&slab->freelist_counter.full, &old.full, new.full);
660#else
661 return false;
662#endif
663}
664
665static inline bool
666__update_freelist_slow(struct slab *slab,
667 void *freelist_old, unsigned long counters_old,
668 void *freelist_new, unsigned long counters_new)
669{
670 bool ret = false;
671
672 slab_lock(slab);
673 if (slab->freelist == freelist_old &&
674 slab->counters == counters_old) {
675 slab->freelist = freelist_new;
676 slab->counters = counters_new;
677 ret = true;
678 }
679 slab_unlock(slab);
680
681 return ret;
682}
683
684/*
685 * Interrupts must be disabled (for the fallback code to work right), typically
686 * by an _irqsave() lock variant. On PREEMPT_RT the preempt_disable(), which is
687 * part of bit_spin_lock(), is sufficient because the policy is not to allow any
688 * allocation/ free operation in hardirq context. Therefore nothing can
689 * interrupt the operation.
690 */
691static inline bool __slab_update_freelist(struct kmem_cache *s, struct slab *slab,
692 void *freelist_old, unsigned long counters_old,
693 void *freelist_new, unsigned long counters_new,
694 const char *n)
695{
696 bool ret;
697
698 if (USE_LOCKLESS_FAST_PATH())
699 lockdep_assert_irqs_disabled();
700
701 if (s->flags & __CMPXCHG_DOUBLE) {
702 ret = __update_freelist_fast(slab, freelist_old, counters_old,
703 freelist_new, counters_new);
704 } else {
705 ret = __update_freelist_slow(slab, freelist_old, counters_old,
706 freelist_new, counters_new);
707 }
708 if (likely(ret))
709 return true;
710
711 cpu_relax();
712 stat(s, CMPXCHG_DOUBLE_FAIL);
713
714#ifdef SLUB_DEBUG_CMPXCHG
715 pr_info("%s %s: cmpxchg double redo ", n, s->name);
716#endif
717
718 return false;
719}
720
721static inline bool slab_update_freelist(struct kmem_cache *s, struct slab *slab,
722 void *freelist_old, unsigned long counters_old,
723 void *freelist_new, unsigned long counters_new,
724 const char *n)
725{
726 bool ret;
727
728 if (s->flags & __CMPXCHG_DOUBLE) {
729 ret = __update_freelist_fast(slab, freelist_old, counters_old,
730 freelist_new, counters_new);
731 } else {
732 unsigned long flags;
733
734 local_irq_save(flags);
735 ret = __update_freelist_slow(slab, freelist_old, counters_old,
736 freelist_new, counters_new);
737 local_irq_restore(flags);
738 }
739 if (likely(ret))
740 return true;
741
742 cpu_relax();
743 stat(s, CMPXCHG_DOUBLE_FAIL);
744
745#ifdef SLUB_DEBUG_CMPXCHG
746 pr_info("%s %s: cmpxchg double redo ", n, s->name);
747#endif
748
749 return false;
750}
751
752/*
753 * kmalloc caches has fixed sizes (mostly power of 2), and kmalloc() API
754 * family will round up the real request size to these fixed ones, so
755 * there could be an extra area than what is requested. Save the original
756 * request size in the meta data area, for better debug and sanity check.
757 */
758static inline void set_orig_size(struct kmem_cache *s,
759 void *object, unsigned int orig_size)
760{
761 void *p = kasan_reset_tag(object);
762
763 if (!slub_debug_orig_size(s))
764 return;
765
766 p += get_info_end(s);
767 p += sizeof(struct track) * 2;
768
769 *(unsigned int *)p = orig_size;
770}
771
772static inline unsigned int get_orig_size(struct kmem_cache *s, void *object)
773{
774 void *p = kasan_reset_tag(object);
775
776 if (is_kfence_address(object))
777 return kfence_ksize(object);
778
779 if (!slub_debug_orig_size(s))
780 return s->object_size;
781
782 p += get_info_end(s);
783 p += sizeof(struct track) * 2;
784
785 return *(unsigned int *)p;
786}
787
788#ifdef CONFIG_SLUB_DEBUG
789static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
790static DEFINE_SPINLOCK(object_map_lock);
791
792static void __fill_map(unsigned long *obj_map, struct kmem_cache *s,
793 struct slab *slab)
794{
795 void *addr = slab_address(slab);
796 void *p;
797
798 bitmap_zero(obj_map, slab->objects);
799
800 for (p = slab->freelist; p; p = get_freepointer(s, p))
801 set_bit(__obj_to_index(s, addr, p), obj_map);
802}
803
804#if IS_ENABLED(CONFIG_KUNIT)
805static bool slab_add_kunit_errors(void)
806{
807 struct kunit_resource *resource;
808
809 if (!kunit_get_current_test())
810 return false;
811
812 resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
813 if (!resource)
814 return false;
815
816 (*(int *)resource->data)++;
817 kunit_put_resource(resource);
818 return true;
819}
820
821bool slab_in_kunit_test(void)
822{
823 struct kunit_resource *resource;
824
825 if (!kunit_get_current_test())
826 return false;
827
828 resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
829 if (!resource)
830 return false;
831
832 kunit_put_resource(resource);
833 return true;
834}
835#else
836static inline bool slab_add_kunit_errors(void) { return false; }
837#endif
838
839static inline unsigned int size_from_object(struct kmem_cache *s)
840{
841 if (s->flags & SLAB_RED_ZONE)
842 return s->size - s->red_left_pad;
843
844 return s->size;
845}
846
847static inline void *restore_red_left(struct kmem_cache *s, void *p)
848{
849 if (s->flags & SLAB_RED_ZONE)
850 p -= s->red_left_pad;
851
852 return p;
853}
854
855/*
856 * Debug settings:
857 */
858#if defined(CONFIG_SLUB_DEBUG_ON)
859static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS;
860#else
861static slab_flags_t slub_debug;
862#endif
863
864static char *slub_debug_string;
865static int disable_higher_order_debug;
866
867/*
868 * slub is about to manipulate internal object metadata. This memory lies
869 * outside the range of the allocated object, so accessing it would normally
870 * be reported by kasan as a bounds error. metadata_access_enable() is used
871 * to tell kasan that these accesses are OK.
872 */
873static inline void metadata_access_enable(void)
874{
875 kasan_disable_current();
876 kmsan_disable_current();
877}
878
879static inline void metadata_access_disable(void)
880{
881 kmsan_enable_current();
882 kasan_enable_current();
883}
884
885/*
886 * Object debugging
887 */
888
889/* Verify that a pointer has an address that is valid within a slab page */
890static inline int check_valid_pointer(struct kmem_cache *s,
891 struct slab *slab, void *object)
892{
893 void *base;
894
895 if (!object)
896 return 1;
897
898 base = slab_address(slab);
899 object = kasan_reset_tag(object);
900 object = restore_red_left(s, object);
901 if (object < base || object >= base + slab->objects * s->size ||
902 (object - base) % s->size) {
903 return 0;
904 }
905
906 return 1;
907}
908
909static void print_section(char *level, char *text, u8 *addr,
910 unsigned int length)
911{
912 metadata_access_enable();
913 print_hex_dump(level, text, DUMP_PREFIX_ADDRESS,
914 16, 1, kasan_reset_tag((void *)addr), length, 1);
915 metadata_access_disable();
916}
917
918static struct track *get_track(struct kmem_cache *s, void *object,
919 enum track_item alloc)
920{
921 struct track *p;
922
923 p = object + get_info_end(s);
924
925 return kasan_reset_tag(p + alloc);
926}
927
928#ifdef CONFIG_STACKDEPOT
929static noinline depot_stack_handle_t set_track_prepare(void)
930{
931 depot_stack_handle_t handle;
932 unsigned long entries[TRACK_ADDRS_COUNT];
933 unsigned int nr_entries;
934
935 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
936 handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
937
938 return handle;
939}
940#else
941static inline depot_stack_handle_t set_track_prepare(void)
942{
943 return 0;
944}
945#endif
946
947static void set_track_update(struct kmem_cache *s, void *object,
948 enum track_item alloc, unsigned long addr,
949 depot_stack_handle_t handle)
950{
951 struct track *p = get_track(s, object, alloc);
952
953#ifdef CONFIG_STACKDEPOT
954 p->handle = handle;
955#endif
956 p->addr = addr;
957 p->cpu = smp_processor_id();
958 p->pid = current->pid;
959 p->when = jiffies;
960}
961
962static __always_inline void set_track(struct kmem_cache *s, void *object,
963 enum track_item alloc, unsigned long addr)
964{
965 depot_stack_handle_t handle = set_track_prepare();
966
967 set_track_update(s, object, alloc, addr, handle);
968}
969
970static void init_tracking(struct kmem_cache *s, void *object)
971{
972 struct track *p;
973
974 if (!(s->flags & SLAB_STORE_USER))
975 return;
976
977 p = get_track(s, object, TRACK_ALLOC);
978 memset(p, 0, 2*sizeof(struct track));
979}
980
981static void print_track(const char *s, struct track *t, unsigned long pr_time)
982{
983 depot_stack_handle_t handle __maybe_unused;
984
985 if (!t->addr)
986 return;
987
988 pr_err("%s in %pS age=%lu cpu=%u pid=%d\n",
989 s, (void *)t->addr, pr_time - t->when, t->cpu, t->pid);
990#ifdef CONFIG_STACKDEPOT
991 handle = READ_ONCE(t->handle);
992 if (handle)
993 stack_depot_print(handle);
994 else
995 pr_err("object allocation/free stack trace missing\n");
996#endif
997}
998
999void print_tracking(struct kmem_cache *s, void *object)
1000{
1001 unsigned long pr_time = jiffies;
1002 if (!(s->flags & SLAB_STORE_USER))
1003 return;
1004
1005 print_track("Allocated", get_track(s, object, TRACK_ALLOC), pr_time);
1006 print_track("Freed", get_track(s, object, TRACK_FREE), pr_time);
1007}
1008
1009static void print_slab_info(const struct slab *slab)
1010{
1011 pr_err("Slab 0x%p objects=%u used=%u fp=0x%p flags=%pGp\n",
1012 slab, slab->objects, slab->inuse, slab->freelist,
1013 &slab->__page_flags);
1014}
1015
1016void skip_orig_size_check(struct kmem_cache *s, const void *object)
1017{
1018 set_orig_size(s, (void *)object, s->object_size);
1019}
1020
1021static void __slab_bug(struct kmem_cache *s, const char *fmt, va_list argsp)
1022{
1023 struct va_format vaf;
1024 va_list args;
1025
1026 va_copy(args, argsp);
1027 vaf.fmt = fmt;
1028 vaf.va = &args;
1029 pr_err("=============================================================================\n");
1030 pr_err("BUG %s (%s): %pV\n", s ? s->name : "<unknown>", print_tainted(), &vaf);
1031 pr_err("-----------------------------------------------------------------------------\n\n");
1032 va_end(args);
1033}
1034
1035static void slab_bug(struct kmem_cache *s, const char *fmt, ...)
1036{
1037 va_list args;
1038
1039 va_start(args, fmt);
1040 __slab_bug(s, fmt, args);
1041 va_end(args);
1042}
1043
1044__printf(2, 3)
1045static void slab_fix(struct kmem_cache *s, const char *fmt, ...)
1046{
1047 struct va_format vaf;
1048 va_list args;
1049
1050 if (slab_add_kunit_errors())
1051 return;
1052
1053 va_start(args, fmt);
1054 vaf.fmt = fmt;
1055 vaf.va = &args;
1056 pr_err("FIX %s: %pV\n", s->name, &vaf);
1057 va_end(args);
1058}
1059
1060static void print_trailer(struct kmem_cache *s, struct slab *slab, u8 *p)
1061{
1062 unsigned int off; /* Offset of last byte */
1063 u8 *addr = slab_address(slab);
1064
1065 print_tracking(s, p);
1066
1067 print_slab_info(slab);
1068
1069 pr_err("Object 0x%p @offset=%tu fp=0x%p\n\n",
1070 p, p - addr, get_freepointer(s, p));
1071
1072 if (s->flags & SLAB_RED_ZONE)
1073 print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
1074 s->red_left_pad);
1075 else if (p > addr + 16)
1076 print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
1077
1078 print_section(KERN_ERR, "Object ", p,
1079 min_t(unsigned int, s->object_size, PAGE_SIZE));
1080 if (s->flags & SLAB_RED_ZONE)
1081 print_section(KERN_ERR, "Redzone ", p + s->object_size,
1082 s->inuse - s->object_size);
1083
1084 off = get_info_end(s);
1085
1086 if (s->flags & SLAB_STORE_USER)
1087 off += 2 * sizeof(struct track);
1088
1089 if (slub_debug_orig_size(s))
1090 off += sizeof(unsigned int);
1091
1092 off += kasan_metadata_size(s, false);
1093
1094 if (off != size_from_object(s))
1095 /* Beginning of the filler is the free pointer */
1096 print_section(KERN_ERR, "Padding ", p + off,
1097 size_from_object(s) - off);
1098}
1099
1100static void object_err(struct kmem_cache *s, struct slab *slab,
1101 u8 *object, const char *reason)
1102{
1103 if (slab_add_kunit_errors())
1104 return;
1105
1106 slab_bug(s, reason);
1107 print_trailer(s, slab, object);
1108 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1109
1110 WARN_ON(1);
1111}
1112
1113static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
1114 void **freelist, void *nextfree)
1115{
1116 if ((s->flags & SLAB_CONSISTENCY_CHECKS) &&
1117 !check_valid_pointer(s, slab, nextfree) && freelist) {
1118 object_err(s, slab, *freelist, "Freechain corrupt");
1119 *freelist = NULL;
1120 slab_fix(s, "Isolate corrupted freechain");
1121 return true;
1122 }
1123
1124 return false;
1125}
1126
1127static void __slab_err(struct slab *slab)
1128{
1129 if (slab_in_kunit_test())
1130 return;
1131
1132 print_slab_info(slab);
1133 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1134
1135 WARN_ON(1);
1136}
1137
1138static __printf(3, 4) void slab_err(struct kmem_cache *s, struct slab *slab,
1139 const char *fmt, ...)
1140{
1141 va_list args;
1142
1143 if (slab_add_kunit_errors())
1144 return;
1145
1146 va_start(args, fmt);
1147 __slab_bug(s, fmt, args);
1148 va_end(args);
1149
1150 __slab_err(slab);
1151}
1152
1153static void init_object(struct kmem_cache *s, void *object, u8 val)
1154{
1155 u8 *p = kasan_reset_tag(object);
1156 unsigned int poison_size = s->object_size;
1157
1158 if (s->flags & SLAB_RED_ZONE) {
1159 /*
1160 * Here and below, avoid overwriting the KMSAN shadow. Keeping
1161 * the shadow makes it possible to distinguish uninit-value
1162 * from use-after-free.
1163 */
1164 memset_no_sanitize_memory(p - s->red_left_pad, val,
1165 s->red_left_pad);
1166
1167 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
1168 /*
1169 * Redzone the extra allocated space by kmalloc than
1170 * requested, and the poison size will be limited to
1171 * the original request size accordingly.
1172 */
1173 poison_size = get_orig_size(s, object);
1174 }
1175 }
1176
1177 if (s->flags & __OBJECT_POISON) {
1178 memset_no_sanitize_memory(p, POISON_FREE, poison_size - 1);
1179 memset_no_sanitize_memory(p + poison_size - 1, POISON_END, 1);
1180 }
1181
1182 if (s->flags & SLAB_RED_ZONE)
1183 memset_no_sanitize_memory(p + poison_size, val,
1184 s->inuse - poison_size);
1185}
1186
1187static void restore_bytes(struct kmem_cache *s, const char *message, u8 data,
1188 void *from, void *to)
1189{
1190 slab_fix(s, "Restoring %s 0x%p-0x%p=0x%x", message, from, to - 1, data);
1191 memset(from, data, to - from);
1192}
1193
1194#ifdef CONFIG_KMSAN
1195#define pad_check_attributes noinline __no_kmsan_checks
1196#else
1197#define pad_check_attributes
1198#endif
1199
1200static pad_check_attributes int
1201check_bytes_and_report(struct kmem_cache *s, struct slab *slab,
1202 u8 *object, const char *what, u8 *start, unsigned int value,
1203 unsigned int bytes, bool slab_obj_print)
1204{
1205 u8 *fault;
1206 u8 *end;
1207 u8 *addr = slab_address(slab);
1208
1209 metadata_access_enable();
1210 fault = memchr_inv(kasan_reset_tag(start), value, bytes);
1211 metadata_access_disable();
1212 if (!fault)
1213 return 1;
1214
1215 end = start + bytes;
1216 while (end > fault && end[-1] == value)
1217 end--;
1218
1219 if (slab_add_kunit_errors())
1220 goto skip_bug_print;
1221
1222 pr_err("[%s overwritten] 0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
1223 what, fault, end - 1, fault - addr, fault[0], value);
1224
1225 if (slab_obj_print)
1226 object_err(s, slab, object, "Object corrupt");
1227
1228skip_bug_print:
1229 restore_bytes(s, what, value, fault, end);
1230 return 0;
1231}
1232
1233/*
1234 * Object layout:
1235 *
1236 * object address
1237 * Bytes of the object to be managed.
1238 * If the freepointer may overlay the object then the free
1239 * pointer is at the middle of the object.
1240 *
1241 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
1242 * 0xa5 (POISON_END)
1243 *
1244 * object + s->object_size
1245 * Padding to reach word boundary. This is also used for Redzoning.
1246 * Padding is extended by another word if Redzoning is enabled and
1247 * object_size == inuse.
1248 *
1249 * We fill with 0xbb (SLUB_RED_INACTIVE) for inactive objects and with
1250 * 0xcc (SLUB_RED_ACTIVE) for objects in use.
1251 *
1252 * object + s->inuse
1253 * Meta data starts here.
1254 *
1255 * A. Free pointer (if we cannot overwrite object on free)
1256 * B. Tracking data for SLAB_STORE_USER
1257 * C. Original request size for kmalloc object (SLAB_STORE_USER enabled)
1258 * D. Padding to reach required alignment boundary or at minimum
1259 * one word if debugging is on to be able to detect writes
1260 * before the word boundary.
1261 *
1262 * Padding is done using 0x5a (POISON_INUSE)
1263 *
1264 * object + s->size
1265 * Nothing is used beyond s->size.
1266 *
1267 * If slabcaches are merged then the object_size and inuse boundaries are mostly
1268 * ignored. And therefore no slab options that rely on these boundaries
1269 * may be used with merged slabcaches.
1270 */
1271
1272static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p)
1273{
1274 unsigned long off = get_info_end(s); /* The end of info */
1275
1276 if (s->flags & SLAB_STORE_USER) {
1277 /* We also have user information there */
1278 off += 2 * sizeof(struct track);
1279
1280 if (s->flags & SLAB_KMALLOC)
1281 off += sizeof(unsigned int);
1282 }
1283
1284 off += kasan_metadata_size(s, false);
1285
1286 if (size_from_object(s) == off)
1287 return 1;
1288
1289 return check_bytes_and_report(s, slab, p, "Object padding",
1290 p + off, POISON_INUSE, size_from_object(s) - off, true);
1291}
1292
1293/* Check the pad bytes at the end of a slab page */
1294static pad_check_attributes void
1295slab_pad_check(struct kmem_cache *s, struct slab *slab)
1296{
1297 u8 *start;
1298 u8 *fault;
1299 u8 *end;
1300 u8 *pad;
1301 int length;
1302 int remainder;
1303
1304 if (!(s->flags & SLAB_POISON))
1305 return;
1306
1307 start = slab_address(slab);
1308 length = slab_size(slab);
1309 end = start + length;
1310 remainder = length % s->size;
1311 if (!remainder)
1312 return;
1313
1314 pad = end - remainder;
1315 metadata_access_enable();
1316 fault = memchr_inv(kasan_reset_tag(pad), POISON_INUSE, remainder);
1317 metadata_access_disable();
1318 if (!fault)
1319 return;
1320 while (end > fault && end[-1] == POISON_INUSE)
1321 end--;
1322
1323 slab_bug(s, "Padding overwritten. 0x%p-0x%p @offset=%tu",
1324 fault, end - 1, fault - start);
1325 print_section(KERN_ERR, "Padding ", pad, remainder);
1326 __slab_err(slab);
1327
1328 restore_bytes(s, "slab padding", POISON_INUSE, fault, end);
1329}
1330
1331static int check_object(struct kmem_cache *s, struct slab *slab,
1332 void *object, u8 val)
1333{
1334 u8 *p = object;
1335 u8 *endobject = object + s->object_size;
1336 unsigned int orig_size, kasan_meta_size;
1337 int ret = 1;
1338
1339 if (s->flags & SLAB_RED_ZONE) {
1340 if (!check_bytes_and_report(s, slab, object, "Left Redzone",
1341 object - s->red_left_pad, val, s->red_left_pad, ret))
1342 ret = 0;
1343
1344 if (!check_bytes_and_report(s, slab, object, "Right Redzone",
1345 endobject, val, s->inuse - s->object_size, ret))
1346 ret = 0;
1347
1348 if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
1349 orig_size = get_orig_size(s, object);
1350
1351 if (s->object_size > orig_size &&
1352 !check_bytes_and_report(s, slab, object,
1353 "kmalloc Redzone", p + orig_size,
1354 val, s->object_size - orig_size, ret)) {
1355 ret = 0;
1356 }
1357 }
1358 } else {
1359 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
1360 if (!check_bytes_and_report(s, slab, p, "Alignment padding",
1361 endobject, POISON_INUSE,
1362 s->inuse - s->object_size, ret))
1363 ret = 0;
1364 }
1365 }
1366
1367 if (s->flags & SLAB_POISON) {
1368 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON)) {
1369 /*
1370 * KASAN can save its free meta data inside of the
1371 * object at offset 0. Thus, skip checking the part of
1372 * the redzone that overlaps with the meta data.
1373 */
1374 kasan_meta_size = kasan_metadata_size(s, true);
1375 if (kasan_meta_size < s->object_size - 1 &&
1376 !check_bytes_and_report(s, slab, p, "Poison",
1377 p + kasan_meta_size, POISON_FREE,
1378 s->object_size - kasan_meta_size - 1, ret))
1379 ret = 0;
1380 if (kasan_meta_size < s->object_size &&
1381 !check_bytes_and_report(s, slab, p, "End Poison",
1382 p + s->object_size - 1, POISON_END, 1, ret))
1383 ret = 0;
1384 }
1385 /*
1386 * check_pad_bytes cleans up on its own.
1387 */
1388 if (!check_pad_bytes(s, slab, p))
1389 ret = 0;
1390 }
1391
1392 /*
1393 * Cannot check freepointer while object is allocated if
1394 * object and freepointer overlap.
1395 */
1396 if ((freeptr_outside_object(s) || val != SLUB_RED_ACTIVE) &&
1397 !check_valid_pointer(s, slab, get_freepointer(s, p))) {
1398 object_err(s, slab, p, "Freepointer corrupt");
1399 /*
1400 * No choice but to zap it and thus lose the remainder
1401 * of the free objects in this slab. May cause
1402 * another error because the object count is now wrong.
1403 */
1404 set_freepointer(s, p, NULL);
1405 ret = 0;
1406 }
1407
1408 return ret;
1409}
1410
1411static int check_slab(struct kmem_cache *s, struct slab *slab)
1412{
1413 int maxobj;
1414
1415 if (!folio_test_slab(slab_folio(slab))) {
1416 slab_err(s, slab, "Not a valid slab page");
1417 return 0;
1418 }
1419
1420 maxobj = order_objects(slab_order(slab), s->size);
1421 if (slab->objects > maxobj) {
1422 slab_err(s, slab, "objects %u > max %u",
1423 slab->objects, maxobj);
1424 return 0;
1425 }
1426 if (slab->inuse > slab->objects) {
1427 slab_err(s, slab, "inuse %u > max %u",
1428 slab->inuse, slab->objects);
1429 return 0;
1430 }
1431 if (slab->frozen) {
1432 slab_err(s, slab, "Slab disabled since SLUB metadata consistency check failed");
1433 return 0;
1434 }
1435
1436 /* Slab_pad_check fixes things up after itself */
1437 slab_pad_check(s, slab);
1438 return 1;
1439}
1440
1441/*
1442 * Determine if a certain object in a slab is on the freelist. Must hold the
1443 * slab lock to guarantee that the chains are in a consistent state.
1444 */
1445static bool on_freelist(struct kmem_cache *s, struct slab *slab, void *search)
1446{
1447 int nr = 0;
1448 void *fp;
1449 void *object = NULL;
1450 int max_objects;
1451
1452 fp = slab->freelist;
1453 while (fp && nr <= slab->objects) {
1454 if (fp == search)
1455 return true;
1456 if (!check_valid_pointer(s, slab, fp)) {
1457 if (object) {
1458 object_err(s, slab, object,
1459 "Freechain corrupt");
1460 set_freepointer(s, object, NULL);
1461 break;
1462 } else {
1463 slab_err(s, slab, "Freepointer corrupt");
1464 slab->freelist = NULL;
1465 slab->inuse = slab->objects;
1466 slab_fix(s, "Freelist cleared");
1467 return false;
1468 }
1469 }
1470 object = fp;
1471 fp = get_freepointer(s, object);
1472 nr++;
1473 }
1474
1475 if (nr > slab->objects) {
1476 slab_err(s, slab, "Freelist cycle detected");
1477 slab->freelist = NULL;
1478 slab->inuse = slab->objects;
1479 slab_fix(s, "Freelist cleared");
1480 return false;
1481 }
1482
1483 max_objects = order_objects(slab_order(slab), s->size);
1484 if (max_objects > MAX_OBJS_PER_PAGE)
1485 max_objects = MAX_OBJS_PER_PAGE;
1486
1487 if (slab->objects != max_objects) {
1488 slab_err(s, slab, "Wrong number of objects. Found %d but should be %d",
1489 slab->objects, max_objects);
1490 slab->objects = max_objects;
1491 slab_fix(s, "Number of objects adjusted");
1492 }
1493 if (slab->inuse != slab->objects - nr) {
1494 slab_err(s, slab, "Wrong object count. Counter is %d but counted were %d",
1495 slab->inuse, slab->objects - nr);
1496 slab->inuse = slab->objects - nr;
1497 slab_fix(s, "Object count adjusted");
1498 }
1499 return search == NULL;
1500}
1501
1502static void trace(struct kmem_cache *s, struct slab *slab, void *object,
1503 int alloc)
1504{
1505 if (s->flags & SLAB_TRACE) {
1506 pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
1507 s->name,
1508 alloc ? "alloc" : "free",
1509 object, slab->inuse,
1510 slab->freelist);
1511
1512 if (!alloc)
1513 print_section(KERN_INFO, "Object ", (void *)object,
1514 s->object_size);
1515
1516 dump_stack();
1517 }
1518}
1519
1520/*
1521 * Tracking of fully allocated slabs for debugging purposes.
1522 */
1523static void add_full(struct kmem_cache *s,
1524 struct kmem_cache_node *n, struct slab *slab)
1525{
1526 if (!(s->flags & SLAB_STORE_USER))
1527 return;
1528
1529 lockdep_assert_held(&n->list_lock);
1530 list_add(&slab->slab_list, &n->full);
1531}
1532
1533static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct slab *slab)
1534{
1535 if (!(s->flags & SLAB_STORE_USER))
1536 return;
1537
1538 lockdep_assert_held(&n->list_lock);
1539 list_del(&slab->slab_list);
1540}
1541
1542static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1543{
1544 return atomic_long_read(&n->nr_slabs);
1545}
1546
1547static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1548{
1549 struct kmem_cache_node *n = get_node(s, node);
1550
1551 atomic_long_inc(&n->nr_slabs);
1552 atomic_long_add(objects, &n->total_objects);
1553}
1554static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1555{
1556 struct kmem_cache_node *n = get_node(s, node);
1557
1558 atomic_long_dec(&n->nr_slabs);
1559 atomic_long_sub(objects, &n->total_objects);
1560}
1561
1562/* Object debug checks for alloc/free paths */
1563static void setup_object_debug(struct kmem_cache *s, void *object)
1564{
1565 if (!kmem_cache_debug_flags(s, SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON))
1566 return;
1567
1568 init_object(s, object, SLUB_RED_INACTIVE);
1569 init_tracking(s, object);
1570}
1571
1572static
1573void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr)
1574{
1575 if (!kmem_cache_debug_flags(s, SLAB_POISON))
1576 return;
1577
1578 metadata_access_enable();
1579 memset(kasan_reset_tag(addr), POISON_INUSE, slab_size(slab));
1580 metadata_access_disable();
1581}
1582
1583static inline int alloc_consistency_checks(struct kmem_cache *s,
1584 struct slab *slab, void *object)
1585{
1586 if (!check_slab(s, slab))
1587 return 0;
1588
1589 if (!check_valid_pointer(s, slab, object)) {
1590 object_err(s, slab, object, "Freelist Pointer check fails");
1591 return 0;
1592 }
1593
1594 if (!check_object(s, slab, object, SLUB_RED_INACTIVE))
1595 return 0;
1596
1597 return 1;
1598}
1599
1600static noinline bool alloc_debug_processing(struct kmem_cache *s,
1601 struct slab *slab, void *object, int orig_size)
1602{
1603 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1604 if (!alloc_consistency_checks(s, slab, object))
1605 goto bad;
1606 }
1607
1608 /* Success. Perform special debug activities for allocs */
1609 trace(s, slab, object, 1);
1610 set_orig_size(s, object, orig_size);
1611 init_object(s, object, SLUB_RED_ACTIVE);
1612 return true;
1613
1614bad:
1615 if (folio_test_slab(slab_folio(slab))) {
1616 /*
1617 * If this is a slab page then lets do the best we can
1618 * to avoid issues in the future. Marking all objects
1619 * as used avoids touching the remaining objects.
1620 */
1621 slab_fix(s, "Marking all objects used");
1622 slab->inuse = slab->objects;
1623 slab->freelist = NULL;
1624 slab->frozen = 1; /* mark consistency-failed slab as frozen */
1625 }
1626 return false;
1627}
1628
1629static inline int free_consistency_checks(struct kmem_cache *s,
1630 struct slab *slab, void *object, unsigned long addr)
1631{
1632 if (!check_valid_pointer(s, slab, object)) {
1633 slab_err(s, slab, "Invalid object pointer 0x%p", object);
1634 return 0;
1635 }
1636
1637 if (on_freelist(s, slab, object)) {
1638 object_err(s, slab, object, "Object already free");
1639 return 0;
1640 }
1641
1642 if (!check_object(s, slab, object, SLUB_RED_ACTIVE))
1643 return 0;
1644
1645 if (unlikely(s != slab->slab_cache)) {
1646 if (!folio_test_slab(slab_folio(slab))) {
1647 slab_err(s, slab, "Attempt to free object(0x%p) outside of slab",
1648 object);
1649 } else if (!slab->slab_cache) {
1650 slab_err(NULL, slab, "No slab cache for object 0x%p",
1651 object);
1652 } else {
1653 object_err(s, slab, object,
1654 "page slab pointer corrupt.");
1655 }
1656 return 0;
1657 }
1658 return 1;
1659}
1660
1661/*
1662 * Parse a block of slab_debug options. Blocks are delimited by ';'
1663 *
1664 * @str: start of block
1665 * @flags: returns parsed flags, or DEBUG_DEFAULT_FLAGS if none specified
1666 * @slabs: return start of list of slabs, or NULL when there's no list
1667 * @init: assume this is initial parsing and not per-kmem-create parsing
1668 *
1669 * returns the start of next block if there's any, or NULL
1670 */
1671static char *
1672parse_slub_debug_flags(char *str, slab_flags_t *flags, char **slabs, bool init)
1673{
1674 bool higher_order_disable = false;
1675
1676 /* Skip any completely empty blocks */
1677 while (*str && *str == ';')
1678 str++;
1679
1680 if (*str == ',') {
1681 /*
1682 * No options but restriction on slabs. This means full
1683 * debugging for slabs matching a pattern.
1684 */
1685 *flags = DEBUG_DEFAULT_FLAGS;
1686 goto check_slabs;
1687 }
1688 *flags = 0;
1689
1690 /* Determine which debug features should be switched on */
1691 for (; *str && *str != ',' && *str != ';'; str++) {
1692 switch (tolower(*str)) {
1693 case '-':
1694 *flags = 0;
1695 break;
1696 case 'f':
1697 *flags |= SLAB_CONSISTENCY_CHECKS;
1698 break;
1699 case 'z':
1700 *flags |= SLAB_RED_ZONE;
1701 break;
1702 case 'p':
1703 *flags |= SLAB_POISON;
1704 break;
1705 case 'u':
1706 *flags |= SLAB_STORE_USER;
1707 break;
1708 case 't':
1709 *flags |= SLAB_TRACE;
1710 break;
1711 case 'a':
1712 *flags |= SLAB_FAILSLAB;
1713 break;
1714 case 'o':
1715 /*
1716 * Avoid enabling debugging on caches if its minimum
1717 * order would increase as a result.
1718 */
1719 higher_order_disable = true;
1720 break;
1721 default:
1722 if (init)
1723 pr_err("slab_debug option '%c' unknown. skipped\n", *str);
1724 }
1725 }
1726check_slabs:
1727 if (*str == ',')
1728 *slabs = ++str;
1729 else
1730 *slabs = NULL;
1731
1732 /* Skip over the slab list */
1733 while (*str && *str != ';')
1734 str++;
1735
1736 /* Skip any completely empty blocks */
1737 while (*str && *str == ';')
1738 str++;
1739
1740 if (init && higher_order_disable)
1741 disable_higher_order_debug = 1;
1742
1743 if (*str)
1744 return str;
1745 else
1746 return NULL;
1747}
1748
1749static int __init setup_slub_debug(char *str)
1750{
1751 slab_flags_t flags;
1752 slab_flags_t global_flags;
1753 char *saved_str;
1754 char *slab_list;
1755 bool global_slub_debug_changed = false;
1756 bool slab_list_specified = false;
1757
1758 global_flags = DEBUG_DEFAULT_FLAGS;
1759 if (*str++ != '=' || !*str)
1760 /*
1761 * No options specified. Switch on full debugging.
1762 */
1763 goto out;
1764
1765 saved_str = str;
1766 while (str) {
1767 str = parse_slub_debug_flags(str, &flags, &slab_list, true);
1768
1769 if (!slab_list) {
1770 global_flags = flags;
1771 global_slub_debug_changed = true;
1772 } else {
1773 slab_list_specified = true;
1774 if (flags & SLAB_STORE_USER)
1775 stack_depot_request_early_init();
1776 }
1777 }
1778
1779 /*
1780 * For backwards compatibility, a single list of flags with list of
1781 * slabs means debugging is only changed for those slabs, so the global
1782 * slab_debug should be unchanged (0 or DEBUG_DEFAULT_FLAGS, depending
1783 * on CONFIG_SLUB_DEBUG_ON). We can extended that to multiple lists as
1784 * long as there is no option specifying flags without a slab list.
1785 */
1786 if (slab_list_specified) {
1787 if (!global_slub_debug_changed)
1788 global_flags = slub_debug;
1789 slub_debug_string = saved_str;
1790 }
1791out:
1792 slub_debug = global_flags;
1793 if (slub_debug & SLAB_STORE_USER)
1794 stack_depot_request_early_init();
1795 if (slub_debug != 0 || slub_debug_string)
1796 static_branch_enable(&slub_debug_enabled);
1797 else
1798 static_branch_disable(&slub_debug_enabled);
1799 if ((static_branch_unlikely(&init_on_alloc) ||
1800 static_branch_unlikely(&init_on_free)) &&
1801 (slub_debug & SLAB_POISON))
1802 pr_info("mem auto-init: SLAB_POISON will take precedence over init_on_alloc/init_on_free\n");
1803 return 1;
1804}
1805
1806__setup("slab_debug", setup_slub_debug);
1807__setup_param("slub_debug", slub_debug, setup_slub_debug, 0);
1808
1809/*
1810 * kmem_cache_flags - apply debugging options to the cache
1811 * @flags: flags to set
1812 * @name: name of the cache
1813 *
1814 * Debug option(s) are applied to @flags. In addition to the debug
1815 * option(s), if a slab name (or multiple) is specified i.e.
1816 * slab_debug=<Debug-Options>,<slab name1>,<slab name2> ...
1817 * then only the select slabs will receive the debug option(s).
1818 */
1819slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name)
1820{
1821 char *iter;
1822 size_t len;
1823 char *next_block;
1824 slab_flags_t block_flags;
1825 slab_flags_t slub_debug_local = slub_debug;
1826
1827 if (flags & SLAB_NO_USER_FLAGS)
1828 return flags;
1829
1830 /*
1831 * If the slab cache is for debugging (e.g. kmemleak) then
1832 * don't store user (stack trace) information by default,
1833 * but let the user enable it via the command line below.
1834 */
1835 if (flags & SLAB_NOLEAKTRACE)
1836 slub_debug_local &= ~SLAB_STORE_USER;
1837
1838 len = strlen(name);
1839 next_block = slub_debug_string;
1840 /* Go through all blocks of debug options, see if any matches our slab's name */
1841 while (next_block) {
1842 next_block = parse_slub_debug_flags(next_block, &block_flags, &iter, false);
1843 if (!iter)
1844 continue;
1845 /* Found a block that has a slab list, search it */
1846 while (*iter) {
1847 char *end, *glob;
1848 size_t cmplen;
1849
1850 end = strchrnul(iter, ',');
1851 if (next_block && next_block < end)
1852 end = next_block - 1;
1853
1854 glob = strnchr(iter, end - iter, '*');
1855 if (glob)
1856 cmplen = glob - iter;
1857 else
1858 cmplen = max_t(size_t, len, (end - iter));
1859
1860 if (!strncmp(name, iter, cmplen)) {
1861 flags |= block_flags;
1862 return flags;
1863 }
1864
1865 if (!*end || *end == ';')
1866 break;
1867 iter = end + 1;
1868 }
1869 }
1870
1871 return flags | slub_debug_local;
1872}
1873#else /* !CONFIG_SLUB_DEBUG */
1874static inline void setup_object_debug(struct kmem_cache *s, void *object) {}
1875static inline
1876void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {}
1877
1878static inline bool alloc_debug_processing(struct kmem_cache *s,
1879 struct slab *slab, void *object, int orig_size) { return true; }
1880
1881static inline bool free_debug_processing(struct kmem_cache *s,
1882 struct slab *slab, void *head, void *tail, int *bulk_cnt,
1883 unsigned long addr, depot_stack_handle_t handle) { return true; }
1884
1885static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {}
1886static inline int check_object(struct kmem_cache *s, struct slab *slab,
1887 void *object, u8 val) { return 1; }
1888static inline depot_stack_handle_t set_track_prepare(void) { return 0; }
1889static inline void set_track(struct kmem_cache *s, void *object,
1890 enum track_item alloc, unsigned long addr) {}
1891static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1892 struct slab *slab) {}
1893static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1894 struct slab *slab) {}
1895slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name)
1896{
1897 return flags;
1898}
1899#define slub_debug 0
1900
1901#define disable_higher_order_debug 0
1902
1903static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1904 { return 0; }
1905static inline void inc_slabs_node(struct kmem_cache *s, int node,
1906 int objects) {}
1907static inline void dec_slabs_node(struct kmem_cache *s, int node,
1908 int objects) {}
1909#ifndef CONFIG_SLUB_TINY
1910static bool freelist_corrupted(struct kmem_cache *s, struct slab *slab,
1911 void **freelist, void *nextfree)
1912{
1913 return false;
1914}
1915#endif
1916#endif /* CONFIG_SLUB_DEBUG */
1917
1918#ifdef CONFIG_SLAB_OBJ_EXT
1919
1920#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
1921
1922static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
1923{
1924 struct slabobj_ext *slab_exts;
1925 struct slab *obj_exts_slab;
1926
1927 obj_exts_slab = virt_to_slab(obj_exts);
1928 slab_exts = slab_obj_exts(obj_exts_slab);
1929 if (slab_exts) {
1930 unsigned int offs = obj_to_index(obj_exts_slab->slab_cache,
1931 obj_exts_slab, obj_exts);
1932 /* codetag should be NULL */
1933 WARN_ON(slab_exts[offs].ref.ct);
1934 set_codetag_empty(&slab_exts[offs].ref);
1935 }
1936}
1937
1938static inline void mark_failed_objexts_alloc(struct slab *slab)
1939{
1940 slab->obj_exts = OBJEXTS_ALLOC_FAIL;
1941}
1942
1943static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
1944 struct slabobj_ext *vec, unsigned int objects)
1945{
1946 /*
1947 * If vector previously failed to allocate then we have live
1948 * objects with no tag reference. Mark all references in this
1949 * vector as empty to avoid warnings later on.
1950 */
1951 if (obj_exts & OBJEXTS_ALLOC_FAIL) {
1952 unsigned int i;
1953
1954 for (i = 0; i < objects; i++)
1955 set_codetag_empty(&vec[i].ref);
1956 }
1957}
1958
1959#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
1960
1961static inline void mark_objexts_empty(struct slabobj_ext *obj_exts) {}
1962static inline void mark_failed_objexts_alloc(struct slab *slab) {}
1963static inline void handle_failed_objexts_alloc(unsigned long obj_exts,
1964 struct slabobj_ext *vec, unsigned int objects) {}
1965
1966#endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
1967
1968/*
1969 * The allocated objcg pointers array is not accounted directly.
1970 * Moreover, it should not come from DMA buffer and is not readily
1971 * reclaimable. So those GFP bits should be masked off.
1972 */
1973#define OBJCGS_CLEAR_MASK (__GFP_DMA | __GFP_RECLAIMABLE | \
1974 __GFP_ACCOUNT | __GFP_NOFAIL)
1975
1976int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
1977 gfp_t gfp, bool new_slab)
1978{
1979 unsigned int objects = objs_per_slab(s, slab);
1980 unsigned long new_exts;
1981 unsigned long old_exts;
1982 struct slabobj_ext *vec;
1983
1984 gfp &= ~OBJCGS_CLEAR_MASK;
1985 /* Prevent recursive extension vector allocation */
1986 gfp |= __GFP_NO_OBJ_EXT;
1987 vec = kcalloc_node(objects, sizeof(struct slabobj_ext), gfp,
1988 slab_nid(slab));
1989 if (!vec) {
1990 /* Mark vectors which failed to allocate */
1991 if (new_slab)
1992 mark_failed_objexts_alloc(slab);
1993
1994 return -ENOMEM;
1995 }
1996
1997 new_exts = (unsigned long)vec;
1998#ifdef CONFIG_MEMCG
1999 new_exts |= MEMCG_DATA_OBJEXTS;
2000#endif
2001 old_exts = READ_ONCE(slab->obj_exts);
2002 handle_failed_objexts_alloc(old_exts, vec, objects);
2003 if (new_slab) {
2004 /*
2005 * If the slab is brand new and nobody can yet access its
2006 * obj_exts, no synchronization is required and obj_exts can
2007 * be simply assigned.
2008 */
2009 slab->obj_exts = new_exts;
2010 } else if ((old_exts & ~OBJEXTS_FLAGS_MASK) ||
2011 cmpxchg(&slab->obj_exts, old_exts, new_exts) != old_exts) {
2012 /*
2013 * If the slab is already in use, somebody can allocate and
2014 * assign slabobj_exts in parallel. In this case the existing
2015 * objcg vector should be reused.
2016 */
2017 mark_objexts_empty(vec);
2018 kfree(vec);
2019 return 0;
2020 }
2021
2022 kmemleak_not_leak(vec);
2023 return 0;
2024}
2025
2026/* Should be called only if mem_alloc_profiling_enabled() */
2027static noinline void free_slab_obj_exts(struct slab *slab)
2028{
2029 struct slabobj_ext *obj_exts;
2030
2031 obj_exts = slab_obj_exts(slab);
2032 if (!obj_exts)
2033 return;
2034
2035 /*
2036 * obj_exts was created with __GFP_NO_OBJ_EXT flag, therefore its
2037 * corresponding extension will be NULL. alloc_tag_sub() will throw a
2038 * warning if slab has extensions but the extension of an object is
2039 * NULL, therefore replace NULL with CODETAG_EMPTY to indicate that
2040 * the extension for obj_exts is expected to be NULL.
2041 */
2042 mark_objexts_empty(obj_exts);
2043 kfree(obj_exts);
2044 slab->obj_exts = 0;
2045}
2046
2047static inline bool need_slab_obj_ext(void)
2048{
2049 if (mem_alloc_profiling_enabled())
2050 return true;
2051
2052 /*
2053 * CONFIG_MEMCG creates vector of obj_cgroup objects conditionally
2054 * inside memcg_slab_post_alloc_hook. No other users for now.
2055 */
2056 return false;
2057}
2058
2059#else /* CONFIG_SLAB_OBJ_EXT */
2060
2061static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
2062 gfp_t gfp, bool new_slab)
2063{
2064 return 0;
2065}
2066
2067static inline void free_slab_obj_exts(struct slab *slab)
2068{
2069}
2070
2071static inline bool need_slab_obj_ext(void)
2072{
2073 return false;
2074}
2075
2076#endif /* CONFIG_SLAB_OBJ_EXT */
2077
2078#ifdef CONFIG_MEM_ALLOC_PROFILING
2079
2080static inline struct slabobj_ext *
2081prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
2082{
2083 struct slab *slab;
2084
2085 if (!p)
2086 return NULL;
2087
2088 if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
2089 return NULL;
2090
2091 if (flags & __GFP_NO_OBJ_EXT)
2092 return NULL;
2093
2094 slab = virt_to_slab(p);
2095 if (!slab_obj_exts(slab) &&
2096 WARN(alloc_slab_obj_exts(slab, s, flags, false),
2097 "%s, %s: Failed to create slab extension vector!\n",
2098 __func__, s->name))
2099 return NULL;
2100
2101 return slab_obj_exts(slab) + obj_to_index(s, slab, p);
2102}
2103
2104/* Should be called only if mem_alloc_profiling_enabled() */
2105static noinline void
2106__alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
2107{
2108 struct slabobj_ext *obj_exts;
2109
2110 obj_exts = prepare_slab_obj_exts_hook(s, flags, object);
2111 /*
2112 * Currently obj_exts is used only for allocation profiling.
2113 * If other users appear then mem_alloc_profiling_enabled()
2114 * check should be added before alloc_tag_add().
2115 */
2116 if (likely(obj_exts))
2117 alloc_tag_add(&obj_exts->ref, current->alloc_tag, s->size);
2118}
2119
2120static inline void
2121alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
2122{
2123 if (need_slab_obj_ext())
2124 __alloc_tagging_slab_alloc_hook(s, object, flags);
2125}
2126
2127/* Should be called only if mem_alloc_profiling_enabled() */
2128static noinline void
2129__alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2130 int objects)
2131{
2132 struct slabobj_ext *obj_exts;
2133 int i;
2134
2135 /* slab->obj_exts might not be NULL if it was created for MEMCG accounting. */
2136 if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
2137 return;
2138
2139 obj_exts = slab_obj_exts(slab);
2140 if (!obj_exts)
2141 return;
2142
2143 for (i = 0; i < objects; i++) {
2144 unsigned int off = obj_to_index(s, slab, p[i]);
2145
2146 alloc_tag_sub(&obj_exts[off].ref, s->size);
2147 }
2148}
2149
2150static inline void
2151alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2152 int objects)
2153{
2154 if (mem_alloc_profiling_enabled())
2155 __alloc_tagging_slab_free_hook(s, slab, p, objects);
2156}
2157
2158#else /* CONFIG_MEM_ALLOC_PROFILING */
2159
2160static inline void
2161alloc_tagging_slab_alloc_hook(struct kmem_cache *s, void *object, gfp_t flags)
2162{
2163}
2164
2165static inline void
2166alloc_tagging_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2167 int objects)
2168{
2169}
2170
2171#endif /* CONFIG_MEM_ALLOC_PROFILING */
2172
2173
2174#ifdef CONFIG_MEMCG
2175
2176static void memcg_alloc_abort_single(struct kmem_cache *s, void *object);
2177
2178static __fastpath_inline
2179bool memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
2180 gfp_t flags, size_t size, void **p)
2181{
2182 if (likely(!memcg_kmem_online()))
2183 return true;
2184
2185 if (likely(!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)))
2186 return true;
2187
2188 if (likely(__memcg_slab_post_alloc_hook(s, lru, flags, size, p)))
2189 return true;
2190
2191 if (likely(size == 1)) {
2192 memcg_alloc_abort_single(s, *p);
2193 *p = NULL;
2194 } else {
2195 kmem_cache_free_bulk(s, size, p);
2196 }
2197
2198 return false;
2199}
2200
2201static __fastpath_inline
2202void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, void **p,
2203 int objects)
2204{
2205 struct slabobj_ext *obj_exts;
2206
2207 if (!memcg_kmem_online())
2208 return;
2209
2210 obj_exts = slab_obj_exts(slab);
2211 if (likely(!obj_exts))
2212 return;
2213
2214 __memcg_slab_free_hook(s, slab, p, objects, obj_exts);
2215}
2216
2217static __fastpath_inline
2218bool memcg_slab_post_charge(void *p, gfp_t flags)
2219{
2220 struct slabobj_ext *slab_exts;
2221 struct kmem_cache *s;
2222 struct folio *folio;
2223 struct slab *slab;
2224 unsigned long off;
2225
2226 folio = virt_to_folio(p);
2227 if (!folio_test_slab(folio)) {
2228 int size;
2229
2230 if (folio_memcg_kmem(folio))
2231 return true;
2232
2233 if (__memcg_kmem_charge_page(folio_page(folio, 0), flags,
2234 folio_order(folio)))
2235 return false;
2236
2237 /*
2238 * This folio has already been accounted in the global stats but
2239 * not in the memcg stats. So, subtract from the global and use
2240 * the interface which adds to both global and memcg stats.
2241 */
2242 size = folio_size(folio);
2243 node_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, -size);
2244 lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, size);
2245 return true;
2246 }
2247
2248 slab = folio_slab(folio);
2249 s = slab->slab_cache;
2250
2251 /*
2252 * Ignore KMALLOC_NORMAL cache to avoid possible circular dependency
2253 * of slab_obj_exts being allocated from the same slab and thus the slab
2254 * becoming effectively unfreeable.
2255 */
2256 if (is_kmalloc_normal(s))
2257 return true;
2258
2259 /* Ignore already charged objects. */
2260 slab_exts = slab_obj_exts(slab);
2261 if (slab_exts) {
2262 off = obj_to_index(s, slab, p);
2263 if (unlikely(slab_exts[off].objcg))
2264 return true;
2265 }
2266
2267 return __memcg_slab_post_alloc_hook(s, NULL, flags, 1, &p);
2268}
2269
2270#else /* CONFIG_MEMCG */
2271static inline bool memcg_slab_post_alloc_hook(struct kmem_cache *s,
2272 struct list_lru *lru,
2273 gfp_t flags, size_t size,
2274 void **p)
2275{
2276 return true;
2277}
2278
2279static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
2280 void **p, int objects)
2281{
2282}
2283
2284static inline bool memcg_slab_post_charge(void *p, gfp_t flags)
2285{
2286 return true;
2287}
2288#endif /* CONFIG_MEMCG */
2289
2290#ifdef CONFIG_SLUB_RCU_DEBUG
2291static void slab_free_after_rcu_debug(struct rcu_head *rcu_head);
2292
2293struct rcu_delayed_free {
2294 struct rcu_head head;
2295 void *object;
2296};
2297#endif
2298
2299/*
2300 * Hooks for other subsystems that check memory allocations. In a typical
2301 * production configuration these hooks all should produce no code at all.
2302 *
2303 * Returns true if freeing of the object can proceed, false if its reuse
2304 * was delayed by CONFIG_SLUB_RCU_DEBUG or KASAN quarantine, or it was returned
2305 * to KFENCE.
2306 */
2307static __always_inline
2308bool slab_free_hook(struct kmem_cache *s, void *x, bool init,
2309 bool after_rcu_delay)
2310{
2311 /* Are the object contents still accessible? */
2312 bool still_accessible = (s->flags & SLAB_TYPESAFE_BY_RCU) && !after_rcu_delay;
2313
2314 kmemleak_free_recursive(x, s->flags);
2315 kmsan_slab_free(s, x);
2316
2317 debug_check_no_locks_freed(x, s->object_size);
2318
2319 if (!(s->flags & SLAB_DEBUG_OBJECTS))
2320 debug_check_no_obj_freed(x, s->object_size);
2321
2322 /* Use KCSAN to help debug racy use-after-free. */
2323 if (!still_accessible)
2324 __kcsan_check_access(x, s->object_size,
2325 KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT);
2326
2327 if (kfence_free(x))
2328 return false;
2329
2330 /*
2331 * Give KASAN a chance to notice an invalid free operation before we
2332 * modify the object.
2333 */
2334 if (kasan_slab_pre_free(s, x))
2335 return false;
2336
2337#ifdef CONFIG_SLUB_RCU_DEBUG
2338 if (still_accessible) {
2339 struct rcu_delayed_free *delayed_free;
2340
2341 delayed_free = kmalloc(sizeof(*delayed_free), GFP_NOWAIT);
2342 if (delayed_free) {
2343 /*
2344 * Let KASAN track our call stack as a "related work
2345 * creation", just like if the object had been freed
2346 * normally via kfree_rcu().
2347 * We have to do this manually because the rcu_head is
2348 * not located inside the object.
2349 */
2350 kasan_record_aux_stack(x);
2351
2352 delayed_free->object = x;
2353 call_rcu(&delayed_free->head, slab_free_after_rcu_debug);
2354 return false;
2355 }
2356 }
2357#endif /* CONFIG_SLUB_RCU_DEBUG */
2358
2359 /*
2360 * As memory initialization might be integrated into KASAN,
2361 * kasan_slab_free and initialization memset's must be
2362 * kept together to avoid discrepancies in behavior.
2363 *
2364 * The initialization memset's clear the object and the metadata,
2365 * but don't touch the SLAB redzone.
2366 *
2367 * The object's freepointer is also avoided if stored outside the
2368 * object.
2369 */
2370 if (unlikely(init)) {
2371 int rsize;
2372 unsigned int inuse, orig_size;
2373
2374 inuse = get_info_end(s);
2375 orig_size = get_orig_size(s, x);
2376 if (!kasan_has_integrated_init())
2377 memset(kasan_reset_tag(x), 0, orig_size);
2378 rsize = (s->flags & SLAB_RED_ZONE) ? s->red_left_pad : 0;
2379 memset((char *)kasan_reset_tag(x) + inuse, 0,
2380 s->size - inuse - rsize);
2381 /*
2382 * Restore orig_size, otherwize kmalloc redzone overwritten
2383 * would be reported
2384 */
2385 set_orig_size(s, x, orig_size);
2386
2387 }
2388 /* KASAN might put x into memory quarantine, delaying its reuse. */
2389 return !kasan_slab_free(s, x, init, still_accessible);
2390}
2391
2392static __fastpath_inline
2393bool slab_free_freelist_hook(struct kmem_cache *s, void **head, void **tail,
2394 int *cnt)
2395{
2396
2397 void *object;
2398 void *next = *head;
2399 void *old_tail = *tail;
2400 bool init;
2401
2402 if (is_kfence_address(next)) {
2403 slab_free_hook(s, next, false, false);
2404 return false;
2405 }
2406
2407 /* Head and tail of the reconstructed freelist */
2408 *head = NULL;
2409 *tail = NULL;
2410
2411 init = slab_want_init_on_free(s);
2412
2413 do {
2414 object = next;
2415 next = get_freepointer(s, object);
2416
2417 /* If object's reuse doesn't have to be delayed */
2418 if (likely(slab_free_hook(s, object, init, false))) {
2419 /* Move object to the new freelist */
2420 set_freepointer(s, object, *head);
2421 *head = object;
2422 if (!*tail)
2423 *tail = object;
2424 } else {
2425 /*
2426 * Adjust the reconstructed freelist depth
2427 * accordingly if object's reuse is delayed.
2428 */
2429 --(*cnt);
2430 }
2431 } while (object != old_tail);
2432
2433 return *head != NULL;
2434}
2435
2436static void *setup_object(struct kmem_cache *s, void *object)
2437{
2438 setup_object_debug(s, object);
2439 object = kasan_init_slab_obj(s, object);
2440 if (unlikely(s->ctor)) {
2441 kasan_unpoison_new_object(s, object);
2442 s->ctor(object);
2443 kasan_poison_new_object(s, object);
2444 }
2445 return object;
2446}
2447
2448/*
2449 * Slab allocation and freeing
2450 */
2451static inline struct slab *alloc_slab_page(gfp_t flags, int node,
2452 struct kmem_cache_order_objects oo)
2453{
2454 struct folio *folio;
2455 struct slab *slab;
2456 unsigned int order = oo_order(oo);
2457
2458 if (node == NUMA_NO_NODE)
2459 folio = (struct folio *)alloc_frozen_pages(flags, order);
2460 else
2461 folio = (struct folio *)__alloc_frozen_pages(flags, order, node, NULL);
2462
2463 if (!folio)
2464 return NULL;
2465
2466 slab = folio_slab(folio);
2467 __folio_set_slab(folio);
2468 if (folio_is_pfmemalloc(folio))
2469 slab_set_pfmemalloc(slab);
2470
2471 return slab;
2472}
2473
2474#ifdef CONFIG_SLAB_FREELIST_RANDOM
2475/* Pre-initialize the random sequence cache */
2476static int init_cache_random_seq(struct kmem_cache *s)
2477{
2478 unsigned int count = oo_objects(s->oo);
2479 int err;
2480
2481 /* Bailout if already initialised */
2482 if (s->random_seq)
2483 return 0;
2484
2485 err = cache_random_seq_create(s, count, GFP_KERNEL);
2486 if (err) {
2487 pr_err("SLUB: Unable to initialize free list for %s\n",
2488 s->name);
2489 return err;
2490 }
2491
2492 /* Transform to an offset on the set of pages */
2493 if (s->random_seq) {
2494 unsigned int i;
2495
2496 for (i = 0; i < count; i++)
2497 s->random_seq[i] *= s->size;
2498 }
2499 return 0;
2500}
2501
2502/* Initialize each random sequence freelist per cache */
2503static void __init init_freelist_randomization(void)
2504{
2505 struct kmem_cache *s;
2506
2507 mutex_lock(&slab_mutex);
2508
2509 list_for_each_entry(s, &slab_caches, list)
2510 init_cache_random_seq(s);
2511
2512 mutex_unlock(&slab_mutex);
2513}
2514
2515/* Get the next entry on the pre-computed freelist randomized */
2516static void *next_freelist_entry(struct kmem_cache *s,
2517 unsigned long *pos, void *start,
2518 unsigned long page_limit,
2519 unsigned long freelist_count)
2520{
2521 unsigned int idx;
2522
2523 /*
2524 * If the target page allocation failed, the number of objects on the
2525 * page might be smaller than the usual size defined by the cache.
2526 */
2527 do {
2528 idx = s->random_seq[*pos];
2529 *pos += 1;
2530 if (*pos >= freelist_count)
2531 *pos = 0;
2532 } while (unlikely(idx >= page_limit));
2533
2534 return (char *)start + idx;
2535}
2536
2537/* Shuffle the single linked freelist based on a random pre-computed sequence */
2538static bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
2539{
2540 void *start;
2541 void *cur;
2542 void *next;
2543 unsigned long idx, pos, page_limit, freelist_count;
2544
2545 if (slab->objects < 2 || !s->random_seq)
2546 return false;
2547
2548 freelist_count = oo_objects(s->oo);
2549 pos = get_random_u32_below(freelist_count);
2550
2551 page_limit = slab->objects * s->size;
2552 start = fixup_red_left(s, slab_address(slab));
2553
2554 /* First entry is used as the base of the freelist */
2555 cur = next_freelist_entry(s, &pos, start, page_limit, freelist_count);
2556 cur = setup_object(s, cur);
2557 slab->freelist = cur;
2558
2559 for (idx = 1; idx < slab->objects; idx++) {
2560 next = next_freelist_entry(s, &pos, start, page_limit,
2561 freelist_count);
2562 next = setup_object(s, next);
2563 set_freepointer(s, cur, next);
2564 cur = next;
2565 }
2566 set_freepointer(s, cur, NULL);
2567
2568 return true;
2569}
2570#else
2571static inline int init_cache_random_seq(struct kmem_cache *s)
2572{
2573 return 0;
2574}
2575static inline void init_freelist_randomization(void) { }
2576static inline bool shuffle_freelist(struct kmem_cache *s, struct slab *slab)
2577{
2578 return false;
2579}
2580#endif /* CONFIG_SLAB_FREELIST_RANDOM */
2581
2582static __always_inline void account_slab(struct slab *slab, int order,
2583 struct kmem_cache *s, gfp_t gfp)
2584{
2585 if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT))
2586 alloc_slab_obj_exts(slab, s, gfp, true);
2587
2588 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
2589 PAGE_SIZE << order);
2590}
2591
2592static __always_inline void unaccount_slab(struct slab *slab, int order,
2593 struct kmem_cache *s)
2594{
2595 if (memcg_kmem_online() || need_slab_obj_ext())
2596 free_slab_obj_exts(slab);
2597
2598 mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
2599 -(PAGE_SIZE << order));
2600}
2601
2602static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
2603{
2604 struct slab *slab;
2605 struct kmem_cache_order_objects oo = s->oo;
2606 gfp_t alloc_gfp;
2607 void *start, *p, *next;
2608 int idx;
2609 bool shuffle;
2610
2611 flags &= gfp_allowed_mask;
2612
2613 flags |= s->allocflags;
2614
2615 /*
2616 * Let the initial higher-order allocation fail under memory pressure
2617 * so we fall-back to the minimum order allocation.
2618 */
2619 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
2620 if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
2621 alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~__GFP_RECLAIM;
2622
2623 slab = alloc_slab_page(alloc_gfp, node, oo);
2624 if (unlikely(!slab)) {
2625 oo = s->min;
2626 alloc_gfp = flags;
2627 /*
2628 * Allocation may have failed due to fragmentation.
2629 * Try a lower order alloc if possible
2630 */
2631 slab = alloc_slab_page(alloc_gfp, node, oo);
2632 if (unlikely(!slab))
2633 return NULL;
2634 stat(s, ORDER_FALLBACK);
2635 }
2636
2637 slab->objects = oo_objects(oo);
2638 slab->inuse = 0;
2639 slab->frozen = 0;
2640
2641 account_slab(slab, oo_order(oo), s, flags);
2642
2643 slab->slab_cache = s;
2644
2645 kasan_poison_slab(slab);
2646
2647 start = slab_address(slab);
2648
2649 setup_slab_debug(s, slab, start);
2650
2651 shuffle = shuffle_freelist(s, slab);
2652
2653 if (!shuffle) {
2654 start = fixup_red_left(s, start);
2655 start = setup_object(s, start);
2656 slab->freelist = start;
2657 for (idx = 0, p = start; idx < slab->objects - 1; idx++) {
2658 next = p + s->size;
2659 next = setup_object(s, next);
2660 set_freepointer(s, p, next);
2661 p = next;
2662 }
2663 set_freepointer(s, p, NULL);
2664 }
2665
2666 return slab;
2667}
2668
2669static struct slab *new_slab(struct kmem_cache *s, gfp_t flags, int node)
2670{
2671 if (unlikely(flags & GFP_SLAB_BUG_MASK))
2672 flags = kmalloc_fix_flags(flags);
2673
2674 WARN_ON_ONCE(s->ctor && (flags & __GFP_ZERO));
2675
2676 return allocate_slab(s,
2677 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
2678}
2679
2680static void __free_slab(struct kmem_cache *s, struct slab *slab)
2681{
2682 struct folio *folio = slab_folio(slab);
2683 int order = folio_order(folio);
2684 int pages = 1 << order;
2685
2686 __slab_clear_pfmemalloc(slab);
2687 folio->mapping = NULL;
2688 __folio_clear_slab(folio);
2689 mm_account_reclaimed_pages(pages);
2690 unaccount_slab(slab, order, s);
2691 free_frozen_pages(&folio->page, order);
2692}
2693
2694static void rcu_free_slab(struct rcu_head *h)
2695{
2696 struct slab *slab = container_of(h, struct slab, rcu_head);
2697
2698 __free_slab(slab->slab_cache, slab);
2699}
2700
2701static void free_slab(struct kmem_cache *s, struct slab *slab)
2702{
2703 if (kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) {
2704 void *p;
2705
2706 slab_pad_check(s, slab);
2707 for_each_object(p, s, slab_address(slab), slab->objects)
2708 check_object(s, slab, p, SLUB_RED_INACTIVE);
2709 }
2710
2711 if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU))
2712 call_rcu(&slab->rcu_head, rcu_free_slab);
2713 else
2714 __free_slab(s, slab);
2715}
2716
2717static void discard_slab(struct kmem_cache *s, struct slab *slab)
2718{
2719 dec_slabs_node(s, slab_nid(slab), slab->objects);
2720 free_slab(s, slab);
2721}
2722
2723/*
2724 * SLUB reuses PG_workingset bit to keep track of whether it's on
2725 * the per-node partial list.
2726 */
2727static inline bool slab_test_node_partial(const struct slab *slab)
2728{
2729 return folio_test_workingset(slab_folio(slab));
2730}
2731
2732static inline void slab_set_node_partial(struct slab *slab)
2733{
2734 set_bit(PG_workingset, folio_flags(slab_folio(slab), 0));
2735}
2736
2737static inline void slab_clear_node_partial(struct slab *slab)
2738{
2739 clear_bit(PG_workingset, folio_flags(slab_folio(slab), 0));
2740}
2741
2742/*
2743 * Management of partially allocated slabs.
2744 */
2745static inline void
2746__add_partial(struct kmem_cache_node *n, struct slab *slab, int tail)
2747{
2748 n->nr_partial++;
2749 if (tail == DEACTIVATE_TO_TAIL)
2750 list_add_tail(&slab->slab_list, &n->partial);
2751 else
2752 list_add(&slab->slab_list, &n->partial);
2753 slab_set_node_partial(slab);
2754}
2755
2756static inline void add_partial(struct kmem_cache_node *n,
2757 struct slab *slab, int tail)
2758{
2759 lockdep_assert_held(&n->list_lock);
2760 __add_partial(n, slab, tail);
2761}
2762
2763static inline void remove_partial(struct kmem_cache_node *n,
2764 struct slab *slab)
2765{
2766 lockdep_assert_held(&n->list_lock);
2767 list_del(&slab->slab_list);
2768 slab_clear_node_partial(slab);
2769 n->nr_partial--;
2770}
2771
2772/*
2773 * Called only for kmem_cache_debug() caches instead of remove_partial(), with a
2774 * slab from the n->partial list. Remove only a single object from the slab, do
2775 * the alloc_debug_processing() checks and leave the slab on the list, or move
2776 * it to full list if it was the last free object.
2777 */
2778static void *alloc_single_from_partial(struct kmem_cache *s,
2779 struct kmem_cache_node *n, struct slab *slab, int orig_size)
2780{
2781 void *object;
2782
2783 lockdep_assert_held(&n->list_lock);
2784
2785 object = slab->freelist;
2786 slab->freelist = get_freepointer(s, object);
2787 slab->inuse++;
2788
2789 if (!alloc_debug_processing(s, slab, object, orig_size)) {
2790 if (folio_test_slab(slab_folio(slab)))
2791 remove_partial(n, slab);
2792 return NULL;
2793 }
2794
2795 if (slab->inuse == slab->objects) {
2796 remove_partial(n, slab);
2797 add_full(s, n, slab);
2798 }
2799
2800 return object;
2801}
2802
2803/*
2804 * Called only for kmem_cache_debug() caches to allocate from a freshly
2805 * allocated slab. Allocate a single object instead of whole freelist
2806 * and put the slab to the partial (or full) list.
2807 */
2808static void *alloc_single_from_new_slab(struct kmem_cache *s,
2809 struct slab *slab, int orig_size)
2810{
2811 int nid = slab_nid(slab);
2812 struct kmem_cache_node *n = get_node(s, nid);
2813 unsigned long flags;
2814 void *object;
2815
2816
2817 object = slab->freelist;
2818 slab->freelist = get_freepointer(s, object);
2819 slab->inuse = 1;
2820
2821 if (!alloc_debug_processing(s, slab, object, orig_size))
2822 /*
2823 * It's not really expected that this would fail on a
2824 * freshly allocated slab, but a concurrent memory
2825 * corruption in theory could cause that.
2826 */
2827 return NULL;
2828
2829 spin_lock_irqsave(&n->list_lock, flags);
2830
2831 if (slab->inuse == slab->objects)
2832 add_full(s, n, slab);
2833 else
2834 add_partial(n, slab, DEACTIVATE_TO_HEAD);
2835
2836 inc_slabs_node(s, nid, slab->objects);
2837 spin_unlock_irqrestore(&n->list_lock, flags);
2838
2839 return object;
2840}
2841
2842#ifdef CONFIG_SLUB_CPU_PARTIAL
2843static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain);
2844#else
2845static inline void put_cpu_partial(struct kmem_cache *s, struct slab *slab,
2846 int drain) { }
2847#endif
2848static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags);
2849
2850/*
2851 * Try to allocate a partial slab from a specific node.
2852 */
2853static struct slab *get_partial_node(struct kmem_cache *s,
2854 struct kmem_cache_node *n,
2855 struct partial_context *pc)
2856{
2857 struct slab *slab, *slab2, *partial = NULL;
2858 unsigned long flags;
2859 unsigned int partial_slabs = 0;
2860
2861 /*
2862 * Racy check. If we mistakenly see no partial slabs then we
2863 * just allocate an empty slab. If we mistakenly try to get a
2864 * partial slab and there is none available then get_partial()
2865 * will return NULL.
2866 */
2867 if (!n || !n->nr_partial)
2868 return NULL;
2869
2870 spin_lock_irqsave(&n->list_lock, flags);
2871 list_for_each_entry_safe(slab, slab2, &n->partial, slab_list) {
2872 if (!pfmemalloc_match(slab, pc->flags))
2873 continue;
2874
2875 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
2876 void *object = alloc_single_from_partial(s, n, slab,
2877 pc->orig_size);
2878 if (object) {
2879 partial = slab;
2880 pc->object = object;
2881 break;
2882 }
2883 continue;
2884 }
2885
2886 remove_partial(n, slab);
2887
2888 if (!partial) {
2889 partial = slab;
2890 stat(s, ALLOC_FROM_PARTIAL);
2891
2892 if ((slub_get_cpu_partial(s) == 0)) {
2893 break;
2894 }
2895 } else {
2896 put_cpu_partial(s, slab, 0);
2897 stat(s, CPU_PARTIAL_NODE);
2898
2899 if (++partial_slabs > slub_get_cpu_partial(s) / 2) {
2900 break;
2901 }
2902 }
2903 }
2904 spin_unlock_irqrestore(&n->list_lock, flags);
2905 return partial;
2906}
2907
2908/*
2909 * Get a slab from somewhere. Search in increasing NUMA distances.
2910 */
2911static struct slab *get_any_partial(struct kmem_cache *s,
2912 struct partial_context *pc)
2913{
2914#ifdef CONFIG_NUMA
2915 struct zonelist *zonelist;
2916 struct zoneref *z;
2917 struct zone *zone;
2918 enum zone_type highest_zoneidx = gfp_zone(pc->flags);
2919 struct slab *slab;
2920 unsigned int cpuset_mems_cookie;
2921
2922 /*
2923 * The defrag ratio allows a configuration of the tradeoffs between
2924 * inter node defragmentation and node local allocations. A lower
2925 * defrag_ratio increases the tendency to do local allocations
2926 * instead of attempting to obtain partial slabs from other nodes.
2927 *
2928 * If the defrag_ratio is set to 0 then kmalloc() always
2929 * returns node local objects. If the ratio is higher then kmalloc()
2930 * may return off node objects because partial slabs are obtained
2931 * from other nodes and filled up.
2932 *
2933 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
2934 * (which makes defrag_ratio = 1000) then every (well almost)
2935 * allocation will first attempt to defrag slab caches on other nodes.
2936 * This means scanning over all nodes to look for partial slabs which
2937 * may be expensive if we do it every time we are trying to find a slab
2938 * with available objects.
2939 */
2940 if (!s->remote_node_defrag_ratio ||
2941 get_cycles() % 1024 > s->remote_node_defrag_ratio)
2942 return NULL;
2943
2944 do {
2945 cpuset_mems_cookie = read_mems_allowed_begin();
2946 zonelist = node_zonelist(mempolicy_slab_node(), pc->flags);
2947 for_each_zone_zonelist(zone, z, zonelist, highest_zoneidx) {
2948 struct kmem_cache_node *n;
2949
2950 n = get_node(s, zone_to_nid(zone));
2951
2952 if (n && cpuset_zone_allowed(zone, pc->flags) &&
2953 n->nr_partial > s->min_partial) {
2954 slab = get_partial_node(s, n, pc);
2955 if (slab) {
2956 /*
2957 * Don't check read_mems_allowed_retry()
2958 * here - if mems_allowed was updated in
2959 * parallel, that was a harmless race
2960 * between allocation and the cpuset
2961 * update
2962 */
2963 return slab;
2964 }
2965 }
2966 }
2967 } while (read_mems_allowed_retry(cpuset_mems_cookie));
2968#endif /* CONFIG_NUMA */
2969 return NULL;
2970}
2971
2972/*
2973 * Get a partial slab, lock it and return it.
2974 */
2975static struct slab *get_partial(struct kmem_cache *s, int node,
2976 struct partial_context *pc)
2977{
2978 struct slab *slab;
2979 int searchnode = node;
2980
2981 if (node == NUMA_NO_NODE)
2982 searchnode = numa_mem_id();
2983
2984 slab = get_partial_node(s, get_node(s, searchnode), pc);
2985 if (slab || (node != NUMA_NO_NODE && (pc->flags & __GFP_THISNODE)))
2986 return slab;
2987
2988 return get_any_partial(s, pc);
2989}
2990
2991#ifndef CONFIG_SLUB_TINY
2992
2993#ifdef CONFIG_PREEMPTION
2994/*
2995 * Calculate the next globally unique transaction for disambiguation
2996 * during cmpxchg. The transactions start with the cpu number and are then
2997 * incremented by CONFIG_NR_CPUS.
2998 */
2999#define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS)
3000#else
3001/*
3002 * No preemption supported therefore also no need to check for
3003 * different cpus.
3004 */
3005#define TID_STEP 1
3006#endif /* CONFIG_PREEMPTION */
3007
3008static inline unsigned long next_tid(unsigned long tid)
3009{
3010 return tid + TID_STEP;
3011}
3012
3013#ifdef SLUB_DEBUG_CMPXCHG
3014static inline unsigned int tid_to_cpu(unsigned long tid)
3015{
3016 return tid % TID_STEP;
3017}
3018
3019static inline unsigned long tid_to_event(unsigned long tid)
3020{
3021 return tid / TID_STEP;
3022}
3023#endif
3024
3025static inline unsigned int init_tid(int cpu)
3026{
3027 return cpu;
3028}
3029
3030static inline void note_cmpxchg_failure(const char *n,
3031 const struct kmem_cache *s, unsigned long tid)
3032{
3033#ifdef SLUB_DEBUG_CMPXCHG
3034 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
3035
3036 pr_info("%s %s: cmpxchg redo ", n, s->name);
3037
3038#ifdef CONFIG_PREEMPTION
3039 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
3040 pr_warn("due to cpu change %d -> %d\n",
3041 tid_to_cpu(tid), tid_to_cpu(actual_tid));
3042 else
3043#endif
3044 if (tid_to_event(tid) != tid_to_event(actual_tid))
3045 pr_warn("due to cpu running other code. Event %ld->%ld\n",
3046 tid_to_event(tid), tid_to_event(actual_tid));
3047 else
3048 pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
3049 actual_tid, tid, next_tid(tid));
3050#endif
3051 stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
3052}
3053
3054static void init_kmem_cache_cpus(struct kmem_cache *s)
3055{
3056 int cpu;
3057 struct kmem_cache_cpu *c;
3058
3059 for_each_possible_cpu(cpu) {
3060 c = per_cpu_ptr(s->cpu_slab, cpu);
3061 local_lock_init(&c->lock);
3062 c->tid = init_tid(cpu);
3063 }
3064}
3065
3066/*
3067 * Finishes removing the cpu slab. Merges cpu's freelist with slab's freelist,
3068 * unfreezes the slabs and puts it on the proper list.
3069 * Assumes the slab has been already safely taken away from kmem_cache_cpu
3070 * by the caller.
3071 */
3072static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
3073 void *freelist)
3074{
3075 struct kmem_cache_node *n = get_node(s, slab_nid(slab));
3076 int free_delta = 0;
3077 void *nextfree, *freelist_iter, *freelist_tail;
3078 int tail = DEACTIVATE_TO_HEAD;
3079 unsigned long flags = 0;
3080 struct slab new;
3081 struct slab old;
3082
3083 if (READ_ONCE(slab->freelist)) {
3084 stat(s, DEACTIVATE_REMOTE_FREES);
3085 tail = DEACTIVATE_TO_TAIL;
3086 }
3087
3088 /*
3089 * Stage one: Count the objects on cpu's freelist as free_delta and
3090 * remember the last object in freelist_tail for later splicing.
3091 */
3092 freelist_tail = NULL;
3093 freelist_iter = freelist;
3094 while (freelist_iter) {
3095 nextfree = get_freepointer(s, freelist_iter);
3096
3097 /*
3098 * If 'nextfree' is invalid, it is possible that the object at
3099 * 'freelist_iter' is already corrupted. So isolate all objects
3100 * starting at 'freelist_iter' by skipping them.
3101 */
3102 if (freelist_corrupted(s, slab, &freelist_iter, nextfree))
3103 break;
3104
3105 freelist_tail = freelist_iter;
3106 free_delta++;
3107
3108 freelist_iter = nextfree;
3109 }
3110
3111 /*
3112 * Stage two: Unfreeze the slab while splicing the per-cpu
3113 * freelist to the head of slab's freelist.
3114 */
3115 do {
3116 old.freelist = READ_ONCE(slab->freelist);
3117 old.counters = READ_ONCE(slab->counters);
3118 VM_BUG_ON(!old.frozen);
3119
3120 /* Determine target state of the slab */
3121 new.counters = old.counters;
3122 new.frozen = 0;
3123 if (freelist_tail) {
3124 new.inuse -= free_delta;
3125 set_freepointer(s, freelist_tail, old.freelist);
3126 new.freelist = freelist;
3127 } else {
3128 new.freelist = old.freelist;
3129 }
3130 } while (!slab_update_freelist(s, slab,
3131 old.freelist, old.counters,
3132 new.freelist, new.counters,
3133 "unfreezing slab"));
3134
3135 /*
3136 * Stage three: Manipulate the slab list based on the updated state.
3137 */
3138 if (!new.inuse && n->nr_partial >= s->min_partial) {
3139 stat(s, DEACTIVATE_EMPTY);
3140 discard_slab(s, slab);
3141 stat(s, FREE_SLAB);
3142 } else if (new.freelist) {
3143 spin_lock_irqsave(&n->list_lock, flags);
3144 add_partial(n, slab, tail);
3145 spin_unlock_irqrestore(&n->list_lock, flags);
3146 stat(s, tail);
3147 } else {
3148 stat(s, DEACTIVATE_FULL);
3149 }
3150}
3151
3152#ifdef CONFIG_SLUB_CPU_PARTIAL
3153static void __put_partials(struct kmem_cache *s, struct slab *partial_slab)
3154{
3155 struct kmem_cache_node *n = NULL, *n2 = NULL;
3156 struct slab *slab, *slab_to_discard = NULL;
3157 unsigned long flags = 0;
3158
3159 while (partial_slab) {
3160 slab = partial_slab;
3161 partial_slab = slab->next;
3162
3163 n2 = get_node(s, slab_nid(slab));
3164 if (n != n2) {
3165 if (n)
3166 spin_unlock_irqrestore(&n->list_lock, flags);
3167
3168 n = n2;
3169 spin_lock_irqsave(&n->list_lock, flags);
3170 }
3171
3172 if (unlikely(!slab->inuse && n->nr_partial >= s->min_partial)) {
3173 slab->next = slab_to_discard;
3174 slab_to_discard = slab;
3175 } else {
3176 add_partial(n, slab, DEACTIVATE_TO_TAIL);
3177 stat(s, FREE_ADD_PARTIAL);
3178 }
3179 }
3180
3181 if (n)
3182 spin_unlock_irqrestore(&n->list_lock, flags);
3183
3184 while (slab_to_discard) {
3185 slab = slab_to_discard;
3186 slab_to_discard = slab_to_discard->next;
3187
3188 stat(s, DEACTIVATE_EMPTY);
3189 discard_slab(s, slab);
3190 stat(s, FREE_SLAB);
3191 }
3192}
3193
3194/*
3195 * Put all the cpu partial slabs to the node partial list.
3196 */
3197static void put_partials(struct kmem_cache *s)
3198{
3199 struct slab *partial_slab;
3200 unsigned long flags;
3201
3202 local_lock_irqsave(&s->cpu_slab->lock, flags);
3203 partial_slab = this_cpu_read(s->cpu_slab->partial);
3204 this_cpu_write(s->cpu_slab->partial, NULL);
3205 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3206
3207 if (partial_slab)
3208 __put_partials(s, partial_slab);
3209}
3210
3211static void put_partials_cpu(struct kmem_cache *s,
3212 struct kmem_cache_cpu *c)
3213{
3214 struct slab *partial_slab;
3215
3216 partial_slab = slub_percpu_partial(c);
3217 c->partial = NULL;
3218
3219 if (partial_slab)
3220 __put_partials(s, partial_slab);
3221}
3222
3223/*
3224 * Put a slab into a partial slab slot if available.
3225 *
3226 * If we did not find a slot then simply move all the partials to the
3227 * per node partial list.
3228 */
3229static void put_cpu_partial(struct kmem_cache *s, struct slab *slab, int drain)
3230{
3231 struct slab *oldslab;
3232 struct slab *slab_to_put = NULL;
3233 unsigned long flags;
3234 int slabs = 0;
3235
3236 local_lock_irqsave(&s->cpu_slab->lock, flags);
3237
3238 oldslab = this_cpu_read(s->cpu_slab->partial);
3239
3240 if (oldslab) {
3241 if (drain && oldslab->slabs >= s->cpu_partial_slabs) {
3242 /*
3243 * Partial array is full. Move the existing set to the
3244 * per node partial list. Postpone the actual unfreezing
3245 * outside of the critical section.
3246 */
3247 slab_to_put = oldslab;
3248 oldslab = NULL;
3249 } else {
3250 slabs = oldslab->slabs;
3251 }
3252 }
3253
3254 slabs++;
3255
3256 slab->slabs = slabs;
3257 slab->next = oldslab;
3258
3259 this_cpu_write(s->cpu_slab->partial, slab);
3260
3261 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3262
3263 if (slab_to_put) {
3264 __put_partials(s, slab_to_put);
3265 stat(s, CPU_PARTIAL_DRAIN);
3266 }
3267}
3268
3269#else /* CONFIG_SLUB_CPU_PARTIAL */
3270
3271static inline void put_partials(struct kmem_cache *s) { }
3272static inline void put_partials_cpu(struct kmem_cache *s,
3273 struct kmem_cache_cpu *c) { }
3274
3275#endif /* CONFIG_SLUB_CPU_PARTIAL */
3276
3277static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
3278{
3279 unsigned long flags;
3280 struct slab *slab;
3281 void *freelist;
3282
3283 local_lock_irqsave(&s->cpu_slab->lock, flags);
3284
3285 slab = c->slab;
3286 freelist = c->freelist;
3287
3288 c->slab = NULL;
3289 c->freelist = NULL;
3290 c->tid = next_tid(c->tid);
3291
3292 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3293
3294 if (slab) {
3295 deactivate_slab(s, slab, freelist);
3296 stat(s, CPUSLAB_FLUSH);
3297 }
3298}
3299
3300static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
3301{
3302 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
3303 void *freelist = c->freelist;
3304 struct slab *slab = c->slab;
3305
3306 c->slab = NULL;
3307 c->freelist = NULL;
3308 c->tid = next_tid(c->tid);
3309
3310 if (slab) {
3311 deactivate_slab(s, slab, freelist);
3312 stat(s, CPUSLAB_FLUSH);
3313 }
3314
3315 put_partials_cpu(s, c);
3316}
3317
3318struct slub_flush_work {
3319 struct work_struct work;
3320 struct kmem_cache *s;
3321 bool skip;
3322};
3323
3324/*
3325 * Flush cpu slab.
3326 *
3327 * Called from CPU work handler with migration disabled.
3328 */
3329static void flush_cpu_slab(struct work_struct *w)
3330{
3331 struct kmem_cache *s;
3332 struct kmem_cache_cpu *c;
3333 struct slub_flush_work *sfw;
3334
3335 sfw = container_of(w, struct slub_flush_work, work);
3336
3337 s = sfw->s;
3338 c = this_cpu_ptr(s->cpu_slab);
3339
3340 if (c->slab)
3341 flush_slab(s, c);
3342
3343 put_partials(s);
3344}
3345
3346static bool has_cpu_slab(int cpu, struct kmem_cache *s)
3347{
3348 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
3349
3350 return c->slab || slub_percpu_partial(c);
3351}
3352
3353static DEFINE_MUTEX(flush_lock);
3354static DEFINE_PER_CPU(struct slub_flush_work, slub_flush);
3355
3356static void flush_all_cpus_locked(struct kmem_cache *s)
3357{
3358 struct slub_flush_work *sfw;
3359 unsigned int cpu;
3360
3361 lockdep_assert_cpus_held();
3362 mutex_lock(&flush_lock);
3363
3364 for_each_online_cpu(cpu) {
3365 sfw = &per_cpu(slub_flush, cpu);
3366 if (!has_cpu_slab(cpu, s)) {
3367 sfw->skip = true;
3368 continue;
3369 }
3370 INIT_WORK(&sfw->work, flush_cpu_slab);
3371 sfw->skip = false;
3372 sfw->s = s;
3373 queue_work_on(cpu, flushwq, &sfw->work);
3374 }
3375
3376 for_each_online_cpu(cpu) {
3377 sfw = &per_cpu(slub_flush, cpu);
3378 if (sfw->skip)
3379 continue;
3380 flush_work(&sfw->work);
3381 }
3382
3383 mutex_unlock(&flush_lock);
3384}
3385
3386static void flush_all(struct kmem_cache *s)
3387{
3388 cpus_read_lock();
3389 flush_all_cpus_locked(s);
3390 cpus_read_unlock();
3391}
3392
3393/*
3394 * Use the cpu notifier to insure that the cpu slabs are flushed when
3395 * necessary.
3396 */
3397static int slub_cpu_dead(unsigned int cpu)
3398{
3399 struct kmem_cache *s;
3400
3401 mutex_lock(&slab_mutex);
3402 list_for_each_entry(s, &slab_caches, list)
3403 __flush_cpu_slab(s, cpu);
3404 mutex_unlock(&slab_mutex);
3405 return 0;
3406}
3407
3408#else /* CONFIG_SLUB_TINY */
3409static inline void flush_all_cpus_locked(struct kmem_cache *s) { }
3410static inline void flush_all(struct kmem_cache *s) { }
3411static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu) { }
3412static inline int slub_cpu_dead(unsigned int cpu) { return 0; }
3413#endif /* CONFIG_SLUB_TINY */
3414
3415/*
3416 * Check if the objects in a per cpu structure fit numa
3417 * locality expectations.
3418 */
3419static inline int node_match(struct slab *slab, int node)
3420{
3421#ifdef CONFIG_NUMA
3422 if (node != NUMA_NO_NODE && slab_nid(slab) != node)
3423 return 0;
3424#endif
3425 return 1;
3426}
3427
3428#ifdef CONFIG_SLUB_DEBUG
3429static int count_free(struct slab *slab)
3430{
3431 return slab->objects - slab->inuse;
3432}
3433
3434static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
3435{
3436 return atomic_long_read(&n->total_objects);
3437}
3438
3439/* Supports checking bulk free of a constructed freelist */
3440static inline bool free_debug_processing(struct kmem_cache *s,
3441 struct slab *slab, void *head, void *tail, int *bulk_cnt,
3442 unsigned long addr, depot_stack_handle_t handle)
3443{
3444 bool checks_ok = false;
3445 void *object = head;
3446 int cnt = 0;
3447
3448 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
3449 if (!check_slab(s, slab))
3450 goto out;
3451 }
3452
3453 if (slab->inuse < *bulk_cnt) {
3454 slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n",
3455 slab->inuse, *bulk_cnt);
3456 goto out;
3457 }
3458
3459next_object:
3460
3461 if (++cnt > *bulk_cnt)
3462 goto out_cnt;
3463
3464 if (s->flags & SLAB_CONSISTENCY_CHECKS) {
3465 if (!free_consistency_checks(s, slab, object, addr))
3466 goto out;
3467 }
3468
3469 if (s->flags & SLAB_STORE_USER)
3470 set_track_update(s, object, TRACK_FREE, addr, handle);
3471 trace(s, slab, object, 0);
3472 /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
3473 init_object(s, object, SLUB_RED_INACTIVE);
3474
3475 /* Reached end of constructed freelist yet? */
3476 if (object != tail) {
3477 object = get_freepointer(s, object);
3478 goto next_object;
3479 }
3480 checks_ok = true;
3481
3482out_cnt:
3483 if (cnt != *bulk_cnt) {
3484 slab_err(s, slab, "Bulk free expected %d objects but found %d\n",
3485 *bulk_cnt, cnt);
3486 *bulk_cnt = cnt;
3487 }
3488
3489out:
3490
3491 if (!checks_ok)
3492 slab_fix(s, "Object at 0x%p not freed", object);
3493
3494 return checks_ok;
3495}
3496#endif /* CONFIG_SLUB_DEBUG */
3497
3498#if defined(CONFIG_SLUB_DEBUG) || defined(SLAB_SUPPORTS_SYSFS)
3499static unsigned long count_partial(struct kmem_cache_node *n,
3500 int (*get_count)(struct slab *))
3501{
3502 unsigned long flags;
3503 unsigned long x = 0;
3504 struct slab *slab;
3505
3506 spin_lock_irqsave(&n->list_lock, flags);
3507 list_for_each_entry(slab, &n->partial, slab_list)
3508 x += get_count(slab);
3509 spin_unlock_irqrestore(&n->list_lock, flags);
3510 return x;
3511}
3512#endif /* CONFIG_SLUB_DEBUG || SLAB_SUPPORTS_SYSFS */
3513
3514#ifdef CONFIG_SLUB_DEBUG
3515#define MAX_PARTIAL_TO_SCAN 10000
3516
3517static unsigned long count_partial_free_approx(struct kmem_cache_node *n)
3518{
3519 unsigned long flags;
3520 unsigned long x = 0;
3521 struct slab *slab;
3522
3523 spin_lock_irqsave(&n->list_lock, flags);
3524 if (n->nr_partial <= MAX_PARTIAL_TO_SCAN) {
3525 list_for_each_entry(slab, &n->partial, slab_list)
3526 x += slab->objects - slab->inuse;
3527 } else {
3528 /*
3529 * For a long list, approximate the total count of objects in
3530 * it to meet the limit on the number of slabs to scan.
3531 * Scan from both the list's head and tail for better accuracy.
3532 */
3533 unsigned long scanned = 0;
3534
3535 list_for_each_entry(slab, &n->partial, slab_list) {
3536 x += slab->objects - slab->inuse;
3537 if (++scanned == MAX_PARTIAL_TO_SCAN / 2)
3538 break;
3539 }
3540 list_for_each_entry_reverse(slab, &n->partial, slab_list) {
3541 x += slab->objects - slab->inuse;
3542 if (++scanned == MAX_PARTIAL_TO_SCAN)
3543 break;
3544 }
3545 x = mult_frac(x, n->nr_partial, scanned);
3546 x = min(x, node_nr_objs(n));
3547 }
3548 spin_unlock_irqrestore(&n->list_lock, flags);
3549 return x;
3550}
3551
3552static noinline void
3553slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
3554{
3555 static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
3556 DEFAULT_RATELIMIT_BURST);
3557 int cpu = raw_smp_processor_id();
3558 int node;
3559 struct kmem_cache_node *n;
3560
3561 if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
3562 return;
3563
3564 pr_warn("SLUB: Unable to allocate memory on CPU %u (of node %d) on node %d, gfp=%#x(%pGg)\n",
3565 cpu, cpu_to_node(cpu), nid, gfpflags, &gfpflags);
3566 pr_warn(" cache: %s, object size: %u, buffer size: %u, default order: %u, min order: %u\n",
3567 s->name, s->object_size, s->size, oo_order(s->oo),
3568 oo_order(s->min));
3569
3570 if (oo_order(s->min) > get_order(s->object_size))
3571 pr_warn(" %s debugging increased min order, use slab_debug=O to disable.\n",
3572 s->name);
3573
3574 for_each_kmem_cache_node(s, node, n) {
3575 unsigned long nr_slabs;
3576 unsigned long nr_objs;
3577 unsigned long nr_free;
3578
3579 nr_free = count_partial_free_approx(n);
3580 nr_slabs = node_nr_slabs(n);
3581 nr_objs = node_nr_objs(n);
3582
3583 pr_warn(" node %d: slabs: %ld, objs: %ld, free: %ld\n",
3584 node, nr_slabs, nr_objs, nr_free);
3585 }
3586}
3587#else /* CONFIG_SLUB_DEBUG */
3588static inline void
3589slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid) { }
3590#endif
3591
3592static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
3593{
3594 if (unlikely(slab_test_pfmemalloc(slab)))
3595 return gfp_pfmemalloc_allowed(gfpflags);
3596
3597 return true;
3598}
3599
3600#ifndef CONFIG_SLUB_TINY
3601static inline bool
3602__update_cpu_freelist_fast(struct kmem_cache *s,
3603 void *freelist_old, void *freelist_new,
3604 unsigned long tid)
3605{
3606 freelist_aba_t old = { .freelist = freelist_old, .counter = tid };
3607 freelist_aba_t new = { .freelist = freelist_new, .counter = next_tid(tid) };
3608
3609 return this_cpu_try_cmpxchg_freelist(s->cpu_slab->freelist_tid.full,
3610 &old.full, new.full);
3611}
3612
3613/*
3614 * Check the slab->freelist and either transfer the freelist to the
3615 * per cpu freelist or deactivate the slab.
3616 *
3617 * The slab is still frozen if the return value is not NULL.
3618 *
3619 * If this function returns NULL then the slab has been unfrozen.
3620 */
3621static inline void *get_freelist(struct kmem_cache *s, struct slab *slab)
3622{
3623 struct slab new;
3624 unsigned long counters;
3625 void *freelist;
3626
3627 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock));
3628
3629 do {
3630 freelist = slab->freelist;
3631 counters = slab->counters;
3632
3633 new.counters = counters;
3634
3635 new.inuse = slab->objects;
3636 new.frozen = freelist != NULL;
3637
3638 } while (!__slab_update_freelist(s, slab,
3639 freelist, counters,
3640 NULL, new.counters,
3641 "get_freelist"));
3642
3643 return freelist;
3644}
3645
3646/*
3647 * Freeze the partial slab and return the pointer to the freelist.
3648 */
3649static inline void *freeze_slab(struct kmem_cache *s, struct slab *slab)
3650{
3651 struct slab new;
3652 unsigned long counters;
3653 void *freelist;
3654
3655 do {
3656 freelist = slab->freelist;
3657 counters = slab->counters;
3658
3659 new.counters = counters;
3660 VM_BUG_ON(new.frozen);
3661
3662 new.inuse = slab->objects;
3663 new.frozen = 1;
3664
3665 } while (!slab_update_freelist(s, slab,
3666 freelist, counters,
3667 NULL, new.counters,
3668 "freeze_slab"));
3669
3670 return freelist;
3671}
3672
3673/*
3674 * Slow path. The lockless freelist is empty or we need to perform
3675 * debugging duties.
3676 *
3677 * Processing is still very fast if new objects have been freed to the
3678 * regular freelist. In that case we simply take over the regular freelist
3679 * as the lockless freelist and zap the regular freelist.
3680 *
3681 * If that is not working then we fall back to the partial lists. We take the
3682 * first element of the freelist as the object to allocate now and move the
3683 * rest of the freelist to the lockless freelist.
3684 *
3685 * And if we were unable to get a new slab from the partial slab lists then
3686 * we need to allocate a new slab. This is the slowest path since it involves
3687 * a call to the page allocator and the setup of a new slab.
3688 *
3689 * Version of __slab_alloc to use when we know that preemption is
3690 * already disabled (which is the case for bulk allocation).
3691 */
3692static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
3693 unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size)
3694{
3695 void *freelist;
3696 struct slab *slab;
3697 unsigned long flags;
3698 struct partial_context pc;
3699 bool try_thisnode = true;
3700
3701 stat(s, ALLOC_SLOWPATH);
3702
3703reread_slab:
3704
3705 slab = READ_ONCE(c->slab);
3706 if (!slab) {
3707 /*
3708 * if the node is not online or has no normal memory, just
3709 * ignore the node constraint
3710 */
3711 if (unlikely(node != NUMA_NO_NODE &&
3712 !node_isset(node, slab_nodes)))
3713 node = NUMA_NO_NODE;
3714 goto new_slab;
3715 }
3716
3717 if (unlikely(!node_match(slab, node))) {
3718 /*
3719 * same as above but node_match() being false already
3720 * implies node != NUMA_NO_NODE
3721 */
3722 if (!node_isset(node, slab_nodes)) {
3723 node = NUMA_NO_NODE;
3724 } else {
3725 stat(s, ALLOC_NODE_MISMATCH);
3726 goto deactivate_slab;
3727 }
3728 }
3729
3730 /*
3731 * By rights, we should be searching for a slab page that was
3732 * PFMEMALLOC but right now, we are losing the pfmemalloc
3733 * information when the page leaves the per-cpu allocator
3734 */
3735 if (unlikely(!pfmemalloc_match(slab, gfpflags)))
3736 goto deactivate_slab;
3737
3738 /* must check again c->slab in case we got preempted and it changed */
3739 local_lock_irqsave(&s->cpu_slab->lock, flags);
3740 if (unlikely(slab != c->slab)) {
3741 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3742 goto reread_slab;
3743 }
3744 freelist = c->freelist;
3745 if (freelist)
3746 goto load_freelist;
3747
3748 freelist = get_freelist(s, slab);
3749
3750 if (!freelist) {
3751 c->slab = NULL;
3752 c->tid = next_tid(c->tid);
3753 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3754 stat(s, DEACTIVATE_BYPASS);
3755 goto new_slab;
3756 }
3757
3758 stat(s, ALLOC_REFILL);
3759
3760load_freelist:
3761
3762 lockdep_assert_held(this_cpu_ptr(&s->cpu_slab->lock));
3763
3764 /*
3765 * freelist is pointing to the list of objects to be used.
3766 * slab is pointing to the slab from which the objects are obtained.
3767 * That slab must be frozen for per cpu allocations to work.
3768 */
3769 VM_BUG_ON(!c->slab->frozen);
3770 c->freelist = get_freepointer(s, freelist);
3771 c->tid = next_tid(c->tid);
3772 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3773 return freelist;
3774
3775deactivate_slab:
3776
3777 local_lock_irqsave(&s->cpu_slab->lock, flags);
3778 if (slab != c->slab) {
3779 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3780 goto reread_slab;
3781 }
3782 freelist = c->freelist;
3783 c->slab = NULL;
3784 c->freelist = NULL;
3785 c->tid = next_tid(c->tid);
3786 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3787 deactivate_slab(s, slab, freelist);
3788
3789new_slab:
3790
3791#ifdef CONFIG_SLUB_CPU_PARTIAL
3792 while (slub_percpu_partial(c)) {
3793 local_lock_irqsave(&s->cpu_slab->lock, flags);
3794 if (unlikely(c->slab)) {
3795 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3796 goto reread_slab;
3797 }
3798 if (unlikely(!slub_percpu_partial(c))) {
3799 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3800 /* we were preempted and partial list got empty */
3801 goto new_objects;
3802 }
3803
3804 slab = slub_percpu_partial(c);
3805 slub_set_percpu_partial(c, slab);
3806
3807 if (likely(node_match(slab, node) &&
3808 pfmemalloc_match(slab, gfpflags))) {
3809 c->slab = slab;
3810 freelist = get_freelist(s, slab);
3811 VM_BUG_ON(!freelist);
3812 stat(s, CPU_PARTIAL_ALLOC);
3813 goto load_freelist;
3814 }
3815
3816 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3817
3818 slab->next = NULL;
3819 __put_partials(s, slab);
3820 }
3821#endif
3822
3823new_objects:
3824
3825 pc.flags = gfpflags;
3826 /*
3827 * When a preferred node is indicated but no __GFP_THISNODE
3828 *
3829 * 1) try to get a partial slab from target node only by having
3830 * __GFP_THISNODE in pc.flags for get_partial()
3831 * 2) if 1) failed, try to allocate a new slab from target node with
3832 * GPF_NOWAIT | __GFP_THISNODE opportunistically
3833 * 3) if 2) failed, retry with original gfpflags which will allow
3834 * get_partial() try partial lists of other nodes before potentially
3835 * allocating new page from other nodes
3836 */
3837 if (unlikely(node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE)
3838 && try_thisnode))
3839 pc.flags = GFP_NOWAIT | __GFP_THISNODE;
3840
3841 pc.orig_size = orig_size;
3842 slab = get_partial(s, node, &pc);
3843 if (slab) {
3844 if (kmem_cache_debug(s)) {
3845 freelist = pc.object;
3846 /*
3847 * For debug caches here we had to go through
3848 * alloc_single_from_partial() so just store the
3849 * tracking info and return the object.
3850 */
3851 if (s->flags & SLAB_STORE_USER)
3852 set_track(s, freelist, TRACK_ALLOC, addr);
3853
3854 return freelist;
3855 }
3856
3857 freelist = freeze_slab(s, slab);
3858 goto retry_load_slab;
3859 }
3860
3861 slub_put_cpu_ptr(s->cpu_slab);
3862 slab = new_slab(s, pc.flags, node);
3863 c = slub_get_cpu_ptr(s->cpu_slab);
3864
3865 if (unlikely(!slab)) {
3866 if (node != NUMA_NO_NODE && !(gfpflags & __GFP_THISNODE)
3867 && try_thisnode) {
3868 try_thisnode = false;
3869 goto new_objects;
3870 }
3871 slab_out_of_memory(s, gfpflags, node);
3872 return NULL;
3873 }
3874
3875 stat(s, ALLOC_SLAB);
3876
3877 if (kmem_cache_debug(s)) {
3878 freelist = alloc_single_from_new_slab(s, slab, orig_size);
3879
3880 if (unlikely(!freelist))
3881 goto new_objects;
3882
3883 if (s->flags & SLAB_STORE_USER)
3884 set_track(s, freelist, TRACK_ALLOC, addr);
3885
3886 return freelist;
3887 }
3888
3889 /*
3890 * No other reference to the slab yet so we can
3891 * muck around with it freely without cmpxchg
3892 */
3893 freelist = slab->freelist;
3894 slab->freelist = NULL;
3895 slab->inuse = slab->objects;
3896 slab->frozen = 1;
3897
3898 inc_slabs_node(s, slab_nid(slab), slab->objects);
3899
3900 if (unlikely(!pfmemalloc_match(slab, gfpflags))) {
3901 /*
3902 * For !pfmemalloc_match() case we don't load freelist so that
3903 * we don't make further mismatched allocations easier.
3904 */
3905 deactivate_slab(s, slab, get_freepointer(s, freelist));
3906 return freelist;
3907 }
3908
3909retry_load_slab:
3910
3911 local_lock_irqsave(&s->cpu_slab->lock, flags);
3912 if (unlikely(c->slab)) {
3913 void *flush_freelist = c->freelist;
3914 struct slab *flush_slab = c->slab;
3915
3916 c->slab = NULL;
3917 c->freelist = NULL;
3918 c->tid = next_tid(c->tid);
3919
3920 local_unlock_irqrestore(&s->cpu_slab->lock, flags);
3921
3922 deactivate_slab(s, flush_slab, flush_freelist);
3923
3924 stat(s, CPUSLAB_FLUSH);
3925
3926 goto retry_load_slab;
3927 }
3928 c->slab = slab;
3929
3930 goto load_freelist;
3931}
3932
3933/*
3934 * A wrapper for ___slab_alloc() for contexts where preemption is not yet
3935 * disabled. Compensates for possible cpu changes by refetching the per cpu area
3936 * pointer.
3937 */
3938static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
3939 unsigned long addr, struct kmem_cache_cpu *c, unsigned int orig_size)
3940{
3941 void *p;
3942
3943#ifdef CONFIG_PREEMPT_COUNT
3944 /*
3945 * We may have been preempted and rescheduled on a different
3946 * cpu before disabling preemption. Need to reload cpu area
3947 * pointer.
3948 */
3949 c = slub_get_cpu_ptr(s->cpu_slab);
3950#endif
3951
3952 p = ___slab_alloc(s, gfpflags, node, addr, c, orig_size);
3953#ifdef CONFIG_PREEMPT_COUNT
3954 slub_put_cpu_ptr(s->cpu_slab);
3955#endif
3956 return p;
3957}
3958
3959static __always_inline void *__slab_alloc_node(struct kmem_cache *s,
3960 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
3961{
3962 struct kmem_cache_cpu *c;
3963 struct slab *slab;
3964 unsigned long tid;
3965 void *object;
3966
3967redo:
3968 /*
3969 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
3970 * enabled. We may switch back and forth between cpus while
3971 * reading from one cpu area. That does not matter as long
3972 * as we end up on the original cpu again when doing the cmpxchg.
3973 *
3974 * We must guarantee that tid and kmem_cache_cpu are retrieved on the
3975 * same cpu. We read first the kmem_cache_cpu pointer and use it to read
3976 * the tid. If we are preempted and switched to another cpu between the
3977 * two reads, it's OK as the two are still associated with the same cpu
3978 * and cmpxchg later will validate the cpu.
3979 */
3980 c = raw_cpu_ptr(s->cpu_slab);
3981 tid = READ_ONCE(c->tid);
3982
3983 /*
3984 * Irqless object alloc/free algorithm used here depends on sequence
3985 * of fetching cpu_slab's data. tid should be fetched before anything
3986 * on c to guarantee that object and slab associated with previous tid
3987 * won't be used with current tid. If we fetch tid first, object and
3988 * slab could be one associated with next tid and our alloc/free
3989 * request will be failed. In this case, we will retry. So, no problem.
3990 */
3991 barrier();
3992
3993 /*
3994 * The transaction ids are globally unique per cpu and per operation on
3995 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
3996 * occurs on the right processor and that there was no operation on the
3997 * linked list in between.
3998 */
3999
4000 object = c->freelist;
4001 slab = c->slab;
4002
4003#ifdef CONFIG_NUMA
4004 if (static_branch_unlikely(&strict_numa) &&
4005 node == NUMA_NO_NODE) {
4006
4007 struct mempolicy *mpol = current->mempolicy;
4008
4009 if (mpol) {
4010 /*
4011 * Special BIND rule support. If existing slab
4012 * is in permitted set then do not redirect
4013 * to a particular node.
4014 * Otherwise we apply the memory policy to get
4015 * the node we need to allocate on.
4016 */
4017 if (mpol->mode != MPOL_BIND || !slab ||
4018 !node_isset(slab_nid(slab), mpol->nodes))
4019
4020 node = mempolicy_slab_node();
4021 }
4022 }
4023#endif
4024
4025 if (!USE_LOCKLESS_FAST_PATH() ||
4026 unlikely(!object || !slab || !node_match(slab, node))) {
4027 object = __slab_alloc(s, gfpflags, node, addr, c, orig_size);
4028 } else {
4029 void *next_object = get_freepointer_safe(s, object);
4030
4031 /*
4032 * The cmpxchg will only match if there was no additional
4033 * operation and if we are on the right processor.
4034 *
4035 * The cmpxchg does the following atomically (without lock
4036 * semantics!)
4037 * 1. Relocate first pointer to the current per cpu area.
4038 * 2. Verify that tid and freelist have not been changed
4039 * 3. If they were not changed replace tid and freelist
4040 *
4041 * Since this is without lock semantics the protection is only
4042 * against code executing on this cpu *not* from access by
4043 * other cpus.
4044 */
4045 if (unlikely(!__update_cpu_freelist_fast(s, object, next_object, tid))) {
4046 note_cmpxchg_failure("slab_alloc", s, tid);
4047 goto redo;
4048 }
4049 prefetch_freepointer(s, next_object);
4050 stat(s, ALLOC_FASTPATH);
4051 }
4052
4053 return object;
4054}
4055#else /* CONFIG_SLUB_TINY */
4056static void *__slab_alloc_node(struct kmem_cache *s,
4057 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
4058{
4059 struct partial_context pc;
4060 struct slab *slab;
4061 void *object;
4062
4063 pc.flags = gfpflags;
4064 pc.orig_size = orig_size;
4065 slab = get_partial(s, node, &pc);
4066
4067 if (slab)
4068 return pc.object;
4069
4070 slab = new_slab(s, gfpflags, node);
4071 if (unlikely(!slab)) {
4072 slab_out_of_memory(s, gfpflags, node);
4073 return NULL;
4074 }
4075
4076 object = alloc_single_from_new_slab(s, slab, orig_size);
4077
4078 return object;
4079}
4080#endif /* CONFIG_SLUB_TINY */
4081
4082/*
4083 * If the object has been wiped upon free, make sure it's fully initialized by
4084 * zeroing out freelist pointer.
4085 *
4086 * Note that we also wipe custom freelist pointers.
4087 */
4088static __always_inline void maybe_wipe_obj_freeptr(struct kmem_cache *s,
4089 void *obj)
4090{
4091 if (unlikely(slab_want_init_on_free(s)) && obj &&
4092 !freeptr_outside_object(s))
4093 memset((void *)((char *)kasan_reset_tag(obj) + s->offset),
4094 0, sizeof(void *));
4095}
4096
4097static __fastpath_inline
4098struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
4099{
4100 flags &= gfp_allowed_mask;
4101
4102 might_alloc(flags);
4103
4104 if (unlikely(should_failslab(s, flags)))
4105 return NULL;
4106
4107 return s;
4108}
4109
4110static __fastpath_inline
4111bool slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
4112 gfp_t flags, size_t size, void **p, bool init,
4113 unsigned int orig_size)
4114{
4115 unsigned int zero_size = s->object_size;
4116 bool kasan_init = init;
4117 size_t i;
4118 gfp_t init_flags = flags & gfp_allowed_mask;
4119
4120 /*
4121 * For kmalloc object, the allocated memory size(object_size) is likely
4122 * larger than the requested size(orig_size). If redzone check is
4123 * enabled for the extra space, don't zero it, as it will be redzoned
4124 * soon. The redzone operation for this extra space could be seen as a
4125 * replacement of current poisoning under certain debug option, and
4126 * won't break other sanity checks.
4127 */
4128 if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) &&
4129 (s->flags & SLAB_KMALLOC))
4130 zero_size = orig_size;
4131
4132 /*
4133 * When slab_debug is enabled, avoid memory initialization integrated
4134 * into KASAN and instead zero out the memory via the memset below with
4135 * the proper size. Otherwise, KASAN might overwrite SLUB redzones and
4136 * cause false-positive reports. This does not lead to a performance
4137 * penalty on production builds, as slab_debug is not intended to be
4138 * enabled there.
4139 */
4140 if (__slub_debug_enabled())
4141 kasan_init = false;
4142
4143 /*
4144 * As memory initialization might be integrated into KASAN,
4145 * kasan_slab_alloc and initialization memset must be
4146 * kept together to avoid discrepancies in behavior.
4147 *
4148 * As p[i] might get tagged, memset and kmemleak hook come after KASAN.
4149 */
4150 for (i = 0; i < size; i++) {
4151 p[i] = kasan_slab_alloc(s, p[i], init_flags, kasan_init);
4152 if (p[i] && init && (!kasan_init ||
4153 !kasan_has_integrated_init()))
4154 memset(p[i], 0, zero_size);
4155 kmemleak_alloc_recursive(p[i], s->object_size, 1,
4156 s->flags, init_flags);
4157 kmsan_slab_alloc(s, p[i], init_flags);
4158 alloc_tagging_slab_alloc_hook(s, p[i], flags);
4159 }
4160
4161 return memcg_slab_post_alloc_hook(s, lru, flags, size, p);
4162}
4163
4164/*
4165 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
4166 * have the fastpath folded into their functions. So no function call
4167 * overhead for requests that can be satisfied on the fastpath.
4168 *
4169 * The fastpath works by first checking if the lockless freelist can be used.
4170 * If not then __slab_alloc is called for slow processing.
4171 *
4172 * Otherwise we can simply pick the next object from the lockless free list.
4173 */
4174static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list_lru *lru,
4175 gfp_t gfpflags, int node, unsigned long addr, size_t orig_size)
4176{
4177 void *object;
4178 bool init = false;
4179
4180 s = slab_pre_alloc_hook(s, gfpflags);
4181 if (unlikely(!s))
4182 return NULL;
4183
4184 object = kfence_alloc(s, orig_size, gfpflags);
4185 if (unlikely(object))
4186 goto out;
4187
4188 object = __slab_alloc_node(s, gfpflags, node, addr, orig_size);
4189
4190 maybe_wipe_obj_freeptr(s, object);
4191 init = slab_want_init_on_alloc(gfpflags, s);
4192
4193out:
4194 /*
4195 * When init equals 'true', like for kzalloc() family, only
4196 * @orig_size bytes might be zeroed instead of s->object_size
4197 * In case this fails due to memcg_slab_post_alloc_hook(),
4198 * object is set to NULL
4199 */
4200 slab_post_alloc_hook(s, lru, gfpflags, 1, &object, init, orig_size);
4201
4202 return object;
4203}
4204
4205void *kmem_cache_alloc_noprof(struct kmem_cache *s, gfp_t gfpflags)
4206{
4207 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_,
4208 s->object_size);
4209
4210 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
4211
4212 return ret;
4213}
4214EXPORT_SYMBOL(kmem_cache_alloc_noprof);
4215
4216void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
4217 gfp_t gfpflags)
4218{
4219 void *ret = slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, _RET_IP_,
4220 s->object_size);
4221
4222 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, NUMA_NO_NODE);
4223
4224 return ret;
4225}
4226EXPORT_SYMBOL(kmem_cache_alloc_lru_noprof);
4227
4228bool kmem_cache_charge(void *objp, gfp_t gfpflags)
4229{
4230 if (!memcg_kmem_online())
4231 return true;
4232
4233 return memcg_slab_post_charge(objp, gfpflags);
4234}
4235EXPORT_SYMBOL(kmem_cache_charge);
4236
4237/**
4238 * kmem_cache_alloc_node - Allocate an object on the specified node
4239 * @s: The cache to allocate from.
4240 * @gfpflags: See kmalloc().
4241 * @node: node number of the target node.
4242 *
4243 * Identical to kmem_cache_alloc but it will allocate memory on the given
4244 * node, which can improve the performance for cpu bound structures.
4245 *
4246 * Fallback to other node is possible if __GFP_THISNODE is not set.
4247 *
4248 * Return: pointer to the new object or %NULL in case of error
4249 */
4250void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t gfpflags, int node)
4251{
4252 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
4253
4254 trace_kmem_cache_alloc(_RET_IP_, ret, s, gfpflags, node);
4255
4256 return ret;
4257}
4258EXPORT_SYMBOL(kmem_cache_alloc_node_noprof);
4259
4260/*
4261 * To avoid unnecessary overhead, we pass through large allocation requests
4262 * directly to the page allocator. We use __GFP_COMP, because we will need to
4263 * know the allocation order to free the pages properly in kfree.
4264 */
4265static void *___kmalloc_large_node(size_t size, gfp_t flags, int node)
4266{
4267 struct folio *folio;
4268 void *ptr = NULL;
4269 unsigned int order = get_order(size);
4270
4271 if (unlikely(flags & GFP_SLAB_BUG_MASK))
4272 flags = kmalloc_fix_flags(flags);
4273
4274 flags |= __GFP_COMP;
4275 folio = (struct folio *)alloc_pages_node_noprof(node, flags, order);
4276 if (folio) {
4277 ptr = folio_address(folio);
4278 lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
4279 PAGE_SIZE << order);
4280 __folio_set_large_kmalloc(folio);
4281 }
4282
4283 ptr = kasan_kmalloc_large(ptr, size, flags);
4284 /* As ptr might get tagged, call kmemleak hook after KASAN. */
4285 kmemleak_alloc(ptr, size, 1, flags);
4286 kmsan_kmalloc_large(ptr, size, flags);
4287
4288 return ptr;
4289}
4290
4291void *__kmalloc_large_noprof(size_t size, gfp_t flags)
4292{
4293 void *ret = ___kmalloc_large_node(size, flags, NUMA_NO_NODE);
4294
4295 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
4296 flags, NUMA_NO_NODE);
4297 return ret;
4298}
4299EXPORT_SYMBOL(__kmalloc_large_noprof);
4300
4301void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
4302{
4303 void *ret = ___kmalloc_large_node(size, flags, node);
4304
4305 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
4306 flags, node);
4307 return ret;
4308}
4309EXPORT_SYMBOL(__kmalloc_large_node_noprof);
4310
4311static __always_inline
4312void *__do_kmalloc_node(size_t size, kmem_buckets *b, gfp_t flags, int node,
4313 unsigned long caller)
4314{
4315 struct kmem_cache *s;
4316 void *ret;
4317
4318 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
4319 ret = __kmalloc_large_node_noprof(size, flags, node);
4320 trace_kmalloc(caller, ret, size,
4321 PAGE_SIZE << get_order(size), flags, node);
4322 return ret;
4323 }
4324
4325 if (unlikely(!size))
4326 return ZERO_SIZE_PTR;
4327
4328 s = kmalloc_slab(size, b, flags, caller);
4329
4330 ret = slab_alloc_node(s, NULL, flags, node, caller, size);
4331 ret = kasan_kmalloc(s, ret, size, flags);
4332 trace_kmalloc(caller, ret, size, s->size, flags, node);
4333 return ret;
4334}
4335void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
4336{
4337 return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, _RET_IP_);
4338}
4339EXPORT_SYMBOL(__kmalloc_node_noprof);
4340
4341void *__kmalloc_noprof(size_t size, gfp_t flags)
4342{
4343 return __do_kmalloc_node(size, NULL, flags, NUMA_NO_NODE, _RET_IP_);
4344}
4345EXPORT_SYMBOL(__kmalloc_noprof);
4346
4347void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags,
4348 int node, unsigned long caller)
4349{
4350 return __do_kmalloc_node(size, PASS_BUCKET_PARAM(b), flags, node, caller);
4351
4352}
4353EXPORT_SYMBOL(__kmalloc_node_track_caller_noprof);
4354
4355void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size)
4356{
4357 void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE,
4358 _RET_IP_, size);
4359
4360 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
4361
4362 ret = kasan_kmalloc(s, ret, size, gfpflags);
4363 return ret;
4364}
4365EXPORT_SYMBOL(__kmalloc_cache_noprof);
4366
4367void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags,
4368 int node, size_t size)
4369{
4370 void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
4371
4372 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
4373
4374 ret = kasan_kmalloc(s, ret, size, gfpflags);
4375 return ret;
4376}
4377EXPORT_SYMBOL(__kmalloc_cache_node_noprof);
4378
4379static noinline void free_to_partial_list(
4380 struct kmem_cache *s, struct slab *slab,
4381 void *head, void *tail, int bulk_cnt,
4382 unsigned long addr)
4383{
4384 struct kmem_cache_node *n = get_node(s, slab_nid(slab));
4385 struct slab *slab_free = NULL;
4386 int cnt = bulk_cnt;
4387 unsigned long flags;
4388 depot_stack_handle_t handle = 0;
4389
4390 if (s->flags & SLAB_STORE_USER)
4391 handle = set_track_prepare();
4392
4393 spin_lock_irqsave(&n->list_lock, flags);
4394
4395 if (free_debug_processing(s, slab, head, tail, &cnt, addr, handle)) {
4396 void *prior = slab->freelist;
4397
4398 /* Perform the actual freeing while we still hold the locks */
4399 slab->inuse -= cnt;
4400 set_freepointer(s, tail, prior);
4401 slab->freelist = head;
4402
4403 /*
4404 * If the slab is empty, and node's partial list is full,
4405 * it should be discarded anyway no matter it's on full or
4406 * partial list.
4407 */
4408 if (slab->inuse == 0 && n->nr_partial >= s->min_partial)
4409 slab_free = slab;
4410
4411 if (!prior) {
4412 /* was on full list */
4413 remove_full(s, n, slab);
4414 if (!slab_free) {
4415 add_partial(n, slab, DEACTIVATE_TO_TAIL);
4416 stat(s, FREE_ADD_PARTIAL);
4417 }
4418 } else if (slab_free) {
4419 remove_partial(n, slab);
4420 stat(s, FREE_REMOVE_PARTIAL);
4421 }
4422 }
4423
4424 if (slab_free) {
4425 /*
4426 * Update the counters while still holding n->list_lock to
4427 * prevent spurious validation warnings
4428 */
4429 dec_slabs_node(s, slab_nid(slab_free), slab_free->objects);
4430 }
4431
4432 spin_unlock_irqrestore(&n->list_lock, flags);
4433
4434 if (slab_free) {
4435 stat(s, FREE_SLAB);
4436 free_slab(s, slab_free);
4437 }
4438}
4439
4440/*
4441 * Slow path handling. This may still be called frequently since objects
4442 * have a longer lifetime than the cpu slabs in most processing loads.
4443 *
4444 * So we still attempt to reduce cache line usage. Just take the slab
4445 * lock and free the item. If there is no additional partial slab
4446 * handling required then we can return immediately.
4447 */
4448static void __slab_free(struct kmem_cache *s, struct slab *slab,
4449 void *head, void *tail, int cnt,
4450 unsigned long addr)
4451
4452{
4453 void *prior;
4454 int was_frozen;
4455 struct slab new;
4456 unsigned long counters;
4457 struct kmem_cache_node *n = NULL;
4458 unsigned long flags;
4459 bool on_node_partial;
4460
4461 stat(s, FREE_SLOWPATH);
4462
4463 if (IS_ENABLED(CONFIG_SLUB_TINY) || kmem_cache_debug(s)) {
4464 free_to_partial_list(s, slab, head, tail, cnt, addr);
4465 return;
4466 }
4467
4468 do {
4469 if (unlikely(n)) {
4470 spin_unlock_irqrestore(&n->list_lock, flags);
4471 n = NULL;
4472 }
4473 prior = slab->freelist;
4474 counters = slab->counters;
4475 set_freepointer(s, tail, prior);
4476 new.counters = counters;
4477 was_frozen = new.frozen;
4478 new.inuse -= cnt;
4479 if ((!new.inuse || !prior) && !was_frozen) {
4480 /* Needs to be taken off a list */
4481 if (!kmem_cache_has_cpu_partial(s) || prior) {
4482
4483 n = get_node(s, slab_nid(slab));
4484 /*
4485 * Speculatively acquire the list_lock.
4486 * If the cmpxchg does not succeed then we may
4487 * drop the list_lock without any processing.
4488 *
4489 * Otherwise the list_lock will synchronize with
4490 * other processors updating the list of slabs.
4491 */
4492 spin_lock_irqsave(&n->list_lock, flags);
4493
4494 on_node_partial = slab_test_node_partial(slab);
4495 }
4496 }
4497
4498 } while (!slab_update_freelist(s, slab,
4499 prior, counters,
4500 head, new.counters,
4501 "__slab_free"));
4502
4503 if (likely(!n)) {
4504
4505 if (likely(was_frozen)) {
4506 /*
4507 * The list lock was not taken therefore no list
4508 * activity can be necessary.
4509 */
4510 stat(s, FREE_FROZEN);
4511 } else if (kmem_cache_has_cpu_partial(s) && !prior) {
4512 /*
4513 * If we started with a full slab then put it onto the
4514 * per cpu partial list.
4515 */
4516 put_cpu_partial(s, slab, 1);
4517 stat(s, CPU_PARTIAL_FREE);
4518 }
4519
4520 return;
4521 }
4522
4523 /*
4524 * This slab was partially empty but not on the per-node partial list,
4525 * in which case we shouldn't manipulate its list, just return.
4526 */
4527 if (prior && !on_node_partial) {
4528 spin_unlock_irqrestore(&n->list_lock, flags);
4529 return;
4530 }
4531
4532 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
4533 goto slab_empty;
4534
4535 /*
4536 * Objects left in the slab. If it was not on the partial list before
4537 * then add it.
4538 */
4539 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
4540 add_partial(n, slab, DEACTIVATE_TO_TAIL);
4541 stat(s, FREE_ADD_PARTIAL);
4542 }
4543 spin_unlock_irqrestore(&n->list_lock, flags);
4544 return;
4545
4546slab_empty:
4547 if (prior) {
4548 /*
4549 * Slab on the partial list.
4550 */
4551 remove_partial(n, slab);
4552 stat(s, FREE_REMOVE_PARTIAL);
4553 }
4554
4555 spin_unlock_irqrestore(&n->list_lock, flags);
4556 stat(s, FREE_SLAB);
4557 discard_slab(s, slab);
4558}
4559
4560#ifndef CONFIG_SLUB_TINY
4561/*
4562 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
4563 * can perform fastpath freeing without additional function calls.
4564 *
4565 * The fastpath is only possible if we are freeing to the current cpu slab
4566 * of this processor. This typically the case if we have just allocated
4567 * the item before.
4568 *
4569 * If fastpath is not possible then fall back to __slab_free where we deal
4570 * with all sorts of special processing.
4571 *
4572 * Bulk free of a freelist with several objects (all pointing to the
4573 * same slab) possible by specifying head and tail ptr, plus objects
4574 * count (cnt). Bulk free indicated by tail pointer being set.
4575 */
4576static __always_inline void do_slab_free(struct kmem_cache *s,
4577 struct slab *slab, void *head, void *tail,
4578 int cnt, unsigned long addr)
4579{
4580 struct kmem_cache_cpu *c;
4581 unsigned long tid;
4582 void **freelist;
4583
4584redo:
4585 /*
4586 * Determine the currently cpus per cpu slab.
4587 * The cpu may change afterward. However that does not matter since
4588 * data is retrieved via this pointer. If we are on the same cpu
4589 * during the cmpxchg then the free will succeed.
4590 */
4591 c = raw_cpu_ptr(s->cpu_slab);
4592 tid = READ_ONCE(c->tid);
4593
4594 /* Same with comment on barrier() in __slab_alloc_node() */
4595 barrier();
4596
4597 if (unlikely(slab != c->slab)) {
4598 __slab_free(s, slab, head, tail, cnt, addr);
4599 return;
4600 }
4601
4602 if (USE_LOCKLESS_FAST_PATH()) {
4603 freelist = READ_ONCE(c->freelist);
4604
4605 set_freepointer(s, tail, freelist);
4606
4607 if (unlikely(!__update_cpu_freelist_fast(s, freelist, head, tid))) {
4608 note_cmpxchg_failure("slab_free", s, tid);
4609 goto redo;
4610 }
4611 } else {
4612 /* Update the free list under the local lock */
4613 local_lock(&s->cpu_slab->lock);
4614 c = this_cpu_ptr(s->cpu_slab);
4615 if (unlikely(slab != c->slab)) {
4616 local_unlock(&s->cpu_slab->lock);
4617 goto redo;
4618 }
4619 tid = c->tid;
4620 freelist = c->freelist;
4621
4622 set_freepointer(s, tail, freelist);
4623 c->freelist = head;
4624 c->tid = next_tid(tid);
4625
4626 local_unlock(&s->cpu_slab->lock);
4627 }
4628 stat_add(s, FREE_FASTPATH, cnt);
4629}
4630#else /* CONFIG_SLUB_TINY */
4631static void do_slab_free(struct kmem_cache *s,
4632 struct slab *slab, void *head, void *tail,
4633 int cnt, unsigned long addr)
4634{
4635 __slab_free(s, slab, head, tail, cnt, addr);
4636}
4637#endif /* CONFIG_SLUB_TINY */
4638
4639static __fastpath_inline
4640void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
4641 unsigned long addr)
4642{
4643 memcg_slab_free_hook(s, slab, &object, 1);
4644 alloc_tagging_slab_free_hook(s, slab, &object, 1);
4645
4646 if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false)))
4647 do_slab_free(s, slab, object, object, 1, addr);
4648}
4649
4650#ifdef CONFIG_MEMCG
4651/* Do not inline the rare memcg charging failed path into the allocation path */
4652static noinline
4653void memcg_alloc_abort_single(struct kmem_cache *s, void *object)
4654{
4655 if (likely(slab_free_hook(s, object, slab_want_init_on_free(s), false)))
4656 do_slab_free(s, virt_to_slab(object), object, object, 1, _RET_IP_);
4657}
4658#endif
4659
4660static __fastpath_inline
4661void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head,
4662 void *tail, void **p, int cnt, unsigned long addr)
4663{
4664 memcg_slab_free_hook(s, slab, p, cnt);
4665 alloc_tagging_slab_free_hook(s, slab, p, cnt);
4666 /*
4667 * With KASAN enabled slab_free_freelist_hook modifies the freelist
4668 * to remove objects, whose reuse must be delayed.
4669 */
4670 if (likely(slab_free_freelist_hook(s, &head, &tail, &cnt)))
4671 do_slab_free(s, slab, head, tail, cnt, addr);
4672}
4673
4674#ifdef CONFIG_SLUB_RCU_DEBUG
4675static void slab_free_after_rcu_debug(struct rcu_head *rcu_head)
4676{
4677 struct rcu_delayed_free *delayed_free =
4678 container_of(rcu_head, struct rcu_delayed_free, head);
4679 void *object = delayed_free->object;
4680 struct slab *slab = virt_to_slab(object);
4681 struct kmem_cache *s;
4682
4683 kfree(delayed_free);
4684
4685 if (WARN_ON(is_kfence_address(object)))
4686 return;
4687
4688 /* find the object and the cache again */
4689 if (WARN_ON(!slab))
4690 return;
4691 s = slab->slab_cache;
4692 if (WARN_ON(!(s->flags & SLAB_TYPESAFE_BY_RCU)))
4693 return;
4694
4695 /* resume freeing */
4696 if (slab_free_hook(s, object, slab_want_init_on_free(s), true))
4697 do_slab_free(s, slab, object, object, 1, _THIS_IP_);
4698}
4699#endif /* CONFIG_SLUB_RCU_DEBUG */
4700
4701#ifdef CONFIG_KASAN_GENERIC
4702void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
4703{
4704 do_slab_free(cache, virt_to_slab(x), x, x, 1, addr);
4705}
4706#endif
4707
4708static inline struct kmem_cache *virt_to_cache(const void *obj)
4709{
4710 struct slab *slab;
4711
4712 slab = virt_to_slab(obj);
4713 if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", __func__))
4714 return NULL;
4715 return slab->slab_cache;
4716}
4717
4718static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
4719{
4720 struct kmem_cache *cachep;
4721
4722 if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
4723 !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
4724 return s;
4725
4726 cachep = virt_to_cache(x);
4727 if (WARN(cachep && cachep != s,
4728 "%s: Wrong slab cache. %s but object is from %s\n",
4729 __func__, s->name, cachep->name))
4730 print_tracking(cachep, x);
4731 return cachep;
4732}
4733
4734/**
4735 * kmem_cache_free - Deallocate an object
4736 * @s: The cache the allocation was from.
4737 * @x: The previously allocated object.
4738 *
4739 * Free an object which was previously allocated from this
4740 * cache.
4741 */
4742void kmem_cache_free(struct kmem_cache *s, void *x)
4743{
4744 s = cache_from_obj(s, x);
4745 if (!s)
4746 return;
4747 trace_kmem_cache_free(_RET_IP_, x, s);
4748 slab_free(s, virt_to_slab(x), x, _RET_IP_);
4749}
4750EXPORT_SYMBOL(kmem_cache_free);
4751
4752static void free_large_kmalloc(struct folio *folio, void *object)
4753{
4754 unsigned int order = folio_order(folio);
4755
4756 if (WARN_ON_ONCE(!folio_test_large_kmalloc(folio))) {
4757 dump_page(&folio->page, "Not a kmalloc allocation");
4758 return;
4759 }
4760
4761 if (WARN_ON_ONCE(order == 0))
4762 pr_warn_once("object pointer: 0x%p\n", object);
4763
4764 kmemleak_free(object);
4765 kasan_kfree_large(object);
4766 kmsan_kfree_large(object);
4767
4768 lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
4769 -(PAGE_SIZE << order));
4770 __folio_clear_large_kmalloc(folio);
4771 folio_put(folio);
4772}
4773
4774/*
4775 * Given an rcu_head embedded within an object obtained from kvmalloc at an
4776 * offset < 4k, free the object in question.
4777 */
4778void kvfree_rcu_cb(struct rcu_head *head)
4779{
4780 void *obj = head;
4781 struct folio *folio;
4782 struct slab *slab;
4783 struct kmem_cache *s;
4784 void *slab_addr;
4785
4786 if (is_vmalloc_addr(obj)) {
4787 obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj);
4788 vfree(obj);
4789 return;
4790 }
4791
4792 folio = virt_to_folio(obj);
4793 if (!folio_test_slab(folio)) {
4794 /*
4795 * rcu_head offset can be only less than page size so no need to
4796 * consider folio order
4797 */
4798 obj = (void *) PAGE_ALIGN_DOWN((unsigned long)obj);
4799 free_large_kmalloc(folio, obj);
4800 return;
4801 }
4802
4803 slab = folio_slab(folio);
4804 s = slab->slab_cache;
4805 slab_addr = folio_address(folio);
4806
4807 if (is_kfence_address(obj)) {
4808 obj = kfence_object_start(obj);
4809 } else {
4810 unsigned int idx = __obj_to_index(s, slab_addr, obj);
4811
4812 obj = slab_addr + s->size * idx;
4813 obj = fixup_red_left(s, obj);
4814 }
4815
4816 slab_free(s, slab, obj, _RET_IP_);
4817}
4818
4819/**
4820 * kfree - free previously allocated memory
4821 * @object: pointer returned by kmalloc() or kmem_cache_alloc()
4822 *
4823 * If @object is NULL, no operation is performed.
4824 */
4825void kfree(const void *object)
4826{
4827 struct folio *folio;
4828 struct slab *slab;
4829 struct kmem_cache *s;
4830 void *x = (void *)object;
4831
4832 trace_kfree(_RET_IP_, object);
4833
4834 if (unlikely(ZERO_OR_NULL_PTR(object)))
4835 return;
4836
4837 folio = virt_to_folio(object);
4838 if (unlikely(!folio_test_slab(folio))) {
4839 free_large_kmalloc(folio, (void *)object);
4840 return;
4841 }
4842
4843 slab = folio_slab(folio);
4844 s = slab->slab_cache;
4845 slab_free(s, slab, x, _RET_IP_);
4846}
4847EXPORT_SYMBOL(kfree);
4848
4849static __always_inline __realloc_size(2) void *
4850__do_krealloc(const void *p, size_t new_size, gfp_t flags)
4851{
4852 void *ret;
4853 size_t ks = 0;
4854 int orig_size = 0;
4855 struct kmem_cache *s = NULL;
4856
4857 if (unlikely(ZERO_OR_NULL_PTR(p)))
4858 goto alloc_new;
4859
4860 /* Check for double-free. */
4861 if (!kasan_check_byte(p))
4862 return NULL;
4863
4864 if (is_kfence_address(p)) {
4865 ks = orig_size = kfence_ksize(p);
4866 } else {
4867 struct folio *folio;
4868
4869 folio = virt_to_folio(p);
4870 if (unlikely(!folio_test_slab(folio))) {
4871 /* Big kmalloc object */
4872 WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE);
4873 WARN_ON(p != folio_address(folio));
4874 ks = folio_size(folio);
4875 } else {
4876 s = folio_slab(folio)->slab_cache;
4877 orig_size = get_orig_size(s, (void *)p);
4878 ks = s->object_size;
4879 }
4880 }
4881
4882 /* If the old object doesn't fit, allocate a bigger one */
4883 if (new_size > ks)
4884 goto alloc_new;
4885
4886 /* Zero out spare memory. */
4887 if (want_init_on_alloc(flags)) {
4888 kasan_disable_current();
4889 if (orig_size && orig_size < new_size)
4890 memset(kasan_reset_tag(p) + orig_size, 0, new_size - orig_size);
4891 else
4892 memset(kasan_reset_tag(p) + new_size, 0, ks - new_size);
4893 kasan_enable_current();
4894 }
4895
4896 /* Setup kmalloc redzone when needed */
4897 if (s && slub_debug_orig_size(s)) {
4898 set_orig_size(s, (void *)p, new_size);
4899 if (s->flags & SLAB_RED_ZONE && new_size < ks)
4900 memset_no_sanitize_memory(kasan_reset_tag(p) + new_size,
4901 SLUB_RED_ACTIVE, ks - new_size);
4902 }
4903
4904 p = kasan_krealloc(p, new_size, flags);
4905 return (void *)p;
4906
4907alloc_new:
4908 ret = kmalloc_node_track_caller_noprof(new_size, flags, NUMA_NO_NODE, _RET_IP_);
4909 if (ret && p) {
4910 /* Disable KASAN checks as the object's redzone is accessed. */
4911 kasan_disable_current();
4912 memcpy(ret, kasan_reset_tag(p), orig_size ?: ks);
4913 kasan_enable_current();
4914 }
4915
4916 return ret;
4917}
4918
4919/**
4920 * krealloc - reallocate memory. The contents will remain unchanged.
4921 * @p: object to reallocate memory for.
4922 * @new_size: how many bytes of memory are required.
4923 * @flags: the type of memory to allocate.
4924 *
4925 * If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size
4926 * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
4927 *
4928 * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
4929 * initial memory allocation, every subsequent call to this API for the same
4930 * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
4931 * __GFP_ZERO is not fully honored by this API.
4932 *
4933 * When slub_debug_orig_size() is off, krealloc() only knows about the bucket
4934 * size of an allocation (but not the exact size it was allocated with) and
4935 * hence implements the following semantics for shrinking and growing buffers
4936 * with __GFP_ZERO.
4937 *
4938 * new bucket
4939 * 0 size size
4940 * |--------|----------------|
4941 * | keep | zero |
4942 *
4943 * Otherwise, the original allocation size 'orig_size' could be used to
4944 * precisely clear the requested size, and the new size will also be stored
4945 * as the new 'orig_size'.
4946 *
4947 * In any case, the contents of the object pointed to are preserved up to the
4948 * lesser of the new and old sizes.
4949 *
4950 * Return: pointer to the allocated memory or %NULL in case of error
4951 */
4952void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags)
4953{
4954 void *ret;
4955
4956 if (unlikely(!new_size)) {
4957 kfree(p);
4958 return ZERO_SIZE_PTR;
4959 }
4960
4961 ret = __do_krealloc(p, new_size, flags);
4962 if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
4963 kfree(p);
4964
4965 return ret;
4966}
4967EXPORT_SYMBOL(krealloc_noprof);
4968
4969static gfp_t kmalloc_gfp_adjust(gfp_t flags, size_t size)
4970{
4971 /*
4972 * We want to attempt a large physically contiguous block first because
4973 * it is less likely to fragment multiple larger blocks and therefore
4974 * contribute to a long term fragmentation less than vmalloc fallback.
4975 * However make sure that larger requests are not too disruptive - no
4976 * OOM killer and no allocation failure warnings as we have a fallback.
4977 */
4978 if (size > PAGE_SIZE) {
4979 flags |= __GFP_NOWARN;
4980
4981 if (!(flags & __GFP_RETRY_MAYFAIL))
4982 flags |= __GFP_NORETRY;
4983
4984 /* nofail semantic is implemented by the vmalloc fallback */
4985 flags &= ~__GFP_NOFAIL;
4986 }
4987
4988 return flags;
4989}
4990
4991/**
4992 * __kvmalloc_node - attempt to allocate physically contiguous memory, but upon
4993 * failure, fall back to non-contiguous (vmalloc) allocation.
4994 * @size: size of the request.
4995 * @b: which set of kmalloc buckets to allocate from.
4996 * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
4997 * @node: numa node to allocate from
4998 *
4999 * Uses kmalloc to get the memory but if the allocation fails then falls back
5000 * to the vmalloc allocator. Use kvfree for freeing the memory.
5001 *
5002 * GFP_NOWAIT and GFP_ATOMIC are not supported, neither is the __GFP_NORETRY modifier.
5003 * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
5004 * preferable to the vmalloc fallback, due to visible performance drawbacks.
5005 *
5006 * Return: pointer to the allocated memory of %NULL in case of failure
5007 */
5008void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node)
5009{
5010 void *ret;
5011
5012 /*
5013 * It doesn't really make sense to fallback to vmalloc for sub page
5014 * requests
5015 */
5016 ret = __do_kmalloc_node(size, PASS_BUCKET_PARAM(b),
5017 kmalloc_gfp_adjust(flags, size),
5018 node, _RET_IP_);
5019 if (ret || size <= PAGE_SIZE)
5020 return ret;
5021
5022 /* non-sleeping allocations are not supported by vmalloc */
5023 if (!gfpflags_allow_blocking(flags))
5024 return NULL;
5025
5026 /* Don't even allow crazy sizes */
5027 if (unlikely(size > INT_MAX)) {
5028 WARN_ON_ONCE(!(flags & __GFP_NOWARN));
5029 return NULL;
5030 }
5031
5032 /*
5033 * kvmalloc() can always use VM_ALLOW_HUGE_VMAP,
5034 * since the callers already cannot assume anything
5035 * about the resulting pointer, and cannot play
5036 * protection games.
5037 */
5038 return __vmalloc_node_range_noprof(size, 1, VMALLOC_START, VMALLOC_END,
5039 flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
5040 node, __builtin_return_address(0));
5041}
5042EXPORT_SYMBOL(__kvmalloc_node_noprof);
5043
5044/**
5045 * kvfree() - Free memory.
5046 * @addr: Pointer to allocated memory.
5047 *
5048 * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
5049 * It is slightly more efficient to use kfree() or vfree() if you are certain
5050 * that you know which one to use.
5051 *
5052 * Context: Either preemptible task context or not-NMI interrupt.
5053 */
5054void kvfree(const void *addr)
5055{
5056 if (is_vmalloc_addr(addr))
5057 vfree(addr);
5058 else
5059 kfree(addr);
5060}
5061EXPORT_SYMBOL(kvfree);
5062
5063/**
5064 * kvfree_sensitive - Free a data object containing sensitive information.
5065 * @addr: address of the data object to be freed.
5066 * @len: length of the data object.
5067 *
5068 * Use the special memzero_explicit() function to clear the content of a
5069 * kvmalloc'ed object containing sensitive data to make sure that the
5070 * compiler won't optimize out the data clearing.
5071 */
5072void kvfree_sensitive(const void *addr, size_t len)
5073{
5074 if (likely(!ZERO_OR_NULL_PTR(addr))) {
5075 memzero_explicit((void *)addr, len);
5076 kvfree(addr);
5077 }
5078}
5079EXPORT_SYMBOL(kvfree_sensitive);
5080
5081/**
5082 * kvrealloc - reallocate memory; contents remain unchanged
5083 * @p: object to reallocate memory for
5084 * @size: the size to reallocate
5085 * @flags: the flags for the page level allocator
5086 *
5087 * If @p is %NULL, kvrealloc() behaves exactly like kvmalloc(). If @size is 0
5088 * and @p is not a %NULL pointer, the object pointed to is freed.
5089 *
5090 * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
5091 * initial memory allocation, every subsequent call to this API for the same
5092 * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
5093 * __GFP_ZERO is not fully honored by this API.
5094 *
5095 * In any case, the contents of the object pointed to are preserved up to the
5096 * lesser of the new and old sizes.
5097 *
5098 * This function must not be called concurrently with itself or kvfree() for the
5099 * same memory allocation.
5100 *
5101 * Return: pointer to the allocated memory or %NULL in case of error
5102 */
5103void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags)
5104{
5105 void *n;
5106
5107 if (is_vmalloc_addr(p))
5108 return vrealloc_noprof(p, size, flags);
5109
5110 n = krealloc_noprof(p, size, kmalloc_gfp_adjust(flags, size));
5111 if (!n) {
5112 /* We failed to krealloc(), fall back to kvmalloc(). */
5113 n = kvmalloc_noprof(size, flags);
5114 if (!n)
5115 return NULL;
5116
5117 if (p) {
5118 /* We already know that `p` is not a vmalloc address. */
5119 kasan_disable_current();
5120 memcpy(n, kasan_reset_tag(p), ksize(p));
5121 kasan_enable_current();
5122
5123 kfree(p);
5124 }
5125 }
5126
5127 return n;
5128}
5129EXPORT_SYMBOL(kvrealloc_noprof);
5130
5131struct detached_freelist {
5132 struct slab *slab;
5133 void *tail;
5134 void *freelist;
5135 int cnt;
5136 struct kmem_cache *s;
5137};
5138
5139/*
5140 * This function progressively scans the array with free objects (with
5141 * a limited look ahead) and extract objects belonging to the same
5142 * slab. It builds a detached freelist directly within the given
5143 * slab/objects. This can happen without any need for
5144 * synchronization, because the objects are owned by running process.
5145 * The freelist is build up as a single linked list in the objects.
5146 * The idea is, that this detached freelist can then be bulk
5147 * transferred to the real freelist(s), but only requiring a single
5148 * synchronization primitive. Look ahead in the array is limited due
5149 * to performance reasons.
5150 */
5151static inline
5152int build_detached_freelist(struct kmem_cache *s, size_t size,
5153 void **p, struct detached_freelist *df)
5154{
5155 int lookahead = 3;
5156 void *object;
5157 struct folio *folio;
5158 size_t same;
5159
5160 object = p[--size];
5161 folio = virt_to_folio(object);
5162 if (!s) {
5163 /* Handle kalloc'ed objects */
5164 if (unlikely(!folio_test_slab(folio))) {
5165 free_large_kmalloc(folio, object);
5166 df->slab = NULL;
5167 return size;
5168 }
5169 /* Derive kmem_cache from object */
5170 df->slab = folio_slab(folio);
5171 df->s = df->slab->slab_cache;
5172 } else {
5173 df->slab = folio_slab(folio);
5174 df->s = cache_from_obj(s, object); /* Support for memcg */
5175 }
5176
5177 /* Start new detached freelist */
5178 df->tail = object;
5179 df->freelist = object;
5180 df->cnt = 1;
5181
5182 if (is_kfence_address(object))
5183 return size;
5184
5185 set_freepointer(df->s, object, NULL);
5186
5187 same = size;
5188 while (size) {
5189 object = p[--size];
5190 /* df->slab is always set at this point */
5191 if (df->slab == virt_to_slab(object)) {
5192 /* Opportunity build freelist */
5193 set_freepointer(df->s, object, df->freelist);
5194 df->freelist = object;
5195 df->cnt++;
5196 same--;
5197 if (size != same)
5198 swap(p[size], p[same]);
5199 continue;
5200 }
5201
5202 /* Limit look ahead search */
5203 if (!--lookahead)
5204 break;
5205 }
5206
5207 return same;
5208}
5209
5210/*
5211 * Internal bulk free of objects that were not initialised by the post alloc
5212 * hooks and thus should not be processed by the free hooks
5213 */
5214static void __kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
5215{
5216 if (!size)
5217 return;
5218
5219 do {
5220 struct detached_freelist df;
5221
5222 size = build_detached_freelist(s, size, p, &df);
5223 if (!df.slab)
5224 continue;
5225
5226 if (kfence_free(df.freelist))
5227 continue;
5228
5229 do_slab_free(df.s, df.slab, df.freelist, df.tail, df.cnt,
5230 _RET_IP_);
5231 } while (likely(size));
5232}
5233
5234/* Note that interrupts must be enabled when calling this function. */
5235void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
5236{
5237 if (!size)
5238 return;
5239
5240 do {
5241 struct detached_freelist df;
5242
5243 size = build_detached_freelist(s, size, p, &df);
5244 if (!df.slab)
5245 continue;
5246
5247 slab_free_bulk(df.s, df.slab, df.freelist, df.tail, &p[size],
5248 df.cnt, _RET_IP_);
5249 } while (likely(size));
5250}
5251EXPORT_SYMBOL(kmem_cache_free_bulk);
5252
5253#ifndef CONFIG_SLUB_TINY
5254static inline
5255int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
5256 void **p)
5257{
5258 struct kmem_cache_cpu *c;
5259 unsigned long irqflags;
5260 int i;
5261
5262 /*
5263 * Drain objects in the per cpu slab, while disabling local
5264 * IRQs, which protects against PREEMPT and interrupts
5265 * handlers invoking normal fastpath.
5266 */
5267 c = slub_get_cpu_ptr(s->cpu_slab);
5268 local_lock_irqsave(&s->cpu_slab->lock, irqflags);
5269
5270 for (i = 0; i < size; i++) {
5271 void *object = kfence_alloc(s, s->object_size, flags);
5272
5273 if (unlikely(object)) {
5274 p[i] = object;
5275 continue;
5276 }
5277
5278 object = c->freelist;
5279 if (unlikely(!object)) {
5280 /*
5281 * We may have removed an object from c->freelist using
5282 * the fastpath in the previous iteration; in that case,
5283 * c->tid has not been bumped yet.
5284 * Since ___slab_alloc() may reenable interrupts while
5285 * allocating memory, we should bump c->tid now.
5286 */
5287 c->tid = next_tid(c->tid);
5288
5289 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
5290
5291 /*
5292 * Invoking slow path likely have side-effect
5293 * of re-populating per CPU c->freelist
5294 */
5295 p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
5296 _RET_IP_, c, s->object_size);
5297 if (unlikely(!p[i]))
5298 goto error;
5299
5300 c = this_cpu_ptr(s->cpu_slab);
5301 maybe_wipe_obj_freeptr(s, p[i]);
5302
5303 local_lock_irqsave(&s->cpu_slab->lock, irqflags);
5304
5305 continue; /* goto for-loop */
5306 }
5307 c->freelist = get_freepointer(s, object);
5308 p[i] = object;
5309 maybe_wipe_obj_freeptr(s, p[i]);
5310 stat(s, ALLOC_FASTPATH);
5311 }
5312 c->tid = next_tid(c->tid);
5313 local_unlock_irqrestore(&s->cpu_slab->lock, irqflags);
5314 slub_put_cpu_ptr(s->cpu_slab);
5315
5316 return i;
5317
5318error:
5319 slub_put_cpu_ptr(s->cpu_slab);
5320 __kmem_cache_free_bulk(s, i, p);
5321 return 0;
5322
5323}
5324#else /* CONFIG_SLUB_TINY */
5325static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
5326 size_t size, void **p)
5327{
5328 int i;
5329
5330 for (i = 0; i < size; i++) {
5331 void *object = kfence_alloc(s, s->object_size, flags);
5332
5333 if (unlikely(object)) {
5334 p[i] = object;
5335 continue;
5336 }
5337
5338 p[i] = __slab_alloc_node(s, flags, NUMA_NO_NODE,
5339 _RET_IP_, s->object_size);
5340 if (unlikely(!p[i]))
5341 goto error;
5342
5343 maybe_wipe_obj_freeptr(s, p[i]);
5344 }
5345
5346 return i;
5347
5348error:
5349 __kmem_cache_free_bulk(s, i, p);
5350 return 0;
5351}
5352#endif /* CONFIG_SLUB_TINY */
5353
5354/* Note that interrupts must be enabled when calling this function. */
5355int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size,
5356 void **p)
5357{
5358 int i;
5359
5360 if (!size)
5361 return 0;
5362
5363 s = slab_pre_alloc_hook(s, flags);
5364 if (unlikely(!s))
5365 return 0;
5366
5367 i = __kmem_cache_alloc_bulk(s, flags, size, p);
5368 if (unlikely(i == 0))
5369 return 0;
5370
5371 /*
5372 * memcg and kmem_cache debug support and memory initialization.
5373 * Done outside of the IRQ disabled fastpath loop.
5374 */
5375 if (unlikely(!slab_post_alloc_hook(s, NULL, flags, size, p,
5376 slab_want_init_on_alloc(flags, s), s->object_size))) {
5377 return 0;
5378 }
5379 return i;
5380}
5381EXPORT_SYMBOL(kmem_cache_alloc_bulk_noprof);
5382
5383
5384/*
5385 * Object placement in a slab is made very easy because we always start at
5386 * offset 0. If we tune the size of the object to the alignment then we can
5387 * get the required alignment by putting one properly sized object after
5388 * another.
5389 *
5390 * Notice that the allocation order determines the sizes of the per cpu
5391 * caches. Each processor has always one slab available for allocations.
5392 * Increasing the allocation order reduces the number of times that slabs
5393 * must be moved on and off the partial lists and is therefore a factor in
5394 * locking overhead.
5395 */
5396
5397/*
5398 * Minimum / Maximum order of slab pages. This influences locking overhead
5399 * and slab fragmentation. A higher order reduces the number of partial slabs
5400 * and increases the number of allocations possible without having to
5401 * take the list_lock.
5402 */
5403static unsigned int slub_min_order;
5404static unsigned int slub_max_order =
5405 IS_ENABLED(CONFIG_SLUB_TINY) ? 1 : PAGE_ALLOC_COSTLY_ORDER;
5406static unsigned int slub_min_objects;
5407
5408/*
5409 * Calculate the order of allocation given an slab object size.
5410 *
5411 * The order of allocation has significant impact on performance and other
5412 * system components. Generally order 0 allocations should be preferred since
5413 * order 0 does not cause fragmentation in the page allocator. Larger objects
5414 * be problematic to put into order 0 slabs because there may be too much
5415 * unused space left. We go to a higher order if more than 1/16th of the slab
5416 * would be wasted.
5417 *
5418 * In order to reach satisfactory performance we must ensure that a minimum
5419 * number of objects is in one slab. Otherwise we may generate too much
5420 * activity on the partial lists which requires taking the list_lock. This is
5421 * less a concern for large slabs though which are rarely used.
5422 *
5423 * slab_max_order specifies the order where we begin to stop considering the
5424 * number of objects in a slab as critical. If we reach slab_max_order then
5425 * we try to keep the page order as low as possible. So we accept more waste
5426 * of space in favor of a small page order.
5427 *
5428 * Higher order allocations also allow the placement of more objects in a
5429 * slab and thereby reduce object handling overhead. If the user has
5430 * requested a higher minimum order then we start with that one instead of
5431 * the smallest order which will fit the object.
5432 */
5433static inline unsigned int calc_slab_order(unsigned int size,
5434 unsigned int min_order, unsigned int max_order,
5435 unsigned int fract_leftover)
5436{
5437 unsigned int order;
5438
5439 for (order = min_order; order <= max_order; order++) {
5440
5441 unsigned int slab_size = (unsigned int)PAGE_SIZE << order;
5442 unsigned int rem;
5443
5444 rem = slab_size % size;
5445
5446 if (rem <= slab_size / fract_leftover)
5447 break;
5448 }
5449
5450 return order;
5451}
5452
5453static inline int calculate_order(unsigned int size)
5454{
5455 unsigned int order;
5456 unsigned int min_objects;
5457 unsigned int max_objects;
5458 unsigned int min_order;
5459
5460 min_objects = slub_min_objects;
5461 if (!min_objects) {
5462 /*
5463 * Some architectures will only update present cpus when
5464 * onlining them, so don't trust the number if it's just 1. But
5465 * we also don't want to use nr_cpu_ids always, as on some other
5466 * architectures, there can be many possible cpus, but never
5467 * onlined. Here we compromise between trying to avoid too high
5468 * order on systems that appear larger than they are, and too
5469 * low order on systems that appear smaller than they are.
5470 */
5471 unsigned int nr_cpus = num_present_cpus();
5472 if (nr_cpus <= 1)
5473 nr_cpus = nr_cpu_ids;
5474 min_objects = 4 * (fls(nr_cpus) + 1);
5475 }
5476 /* min_objects can't be 0 because get_order(0) is undefined */
5477 max_objects = max(order_objects(slub_max_order, size), 1U);
5478 min_objects = min(min_objects, max_objects);
5479
5480 min_order = max_t(unsigned int, slub_min_order,
5481 get_order(min_objects * size));
5482 if (order_objects(min_order, size) > MAX_OBJS_PER_PAGE)
5483 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
5484
5485 /*
5486 * Attempt to find best configuration for a slab. This works by first
5487 * attempting to generate a layout with the best possible configuration
5488 * and backing off gradually.
5489 *
5490 * We start with accepting at most 1/16 waste and try to find the
5491 * smallest order from min_objects-derived/slab_min_order up to
5492 * slab_max_order that will satisfy the constraint. Note that increasing
5493 * the order can only result in same or less fractional waste, not more.
5494 *
5495 * If that fails, we increase the acceptable fraction of waste and try
5496 * again. The last iteration with fraction of 1/2 would effectively
5497 * accept any waste and give us the order determined by min_objects, as
5498 * long as at least single object fits within slab_max_order.
5499 */
5500 for (unsigned int fraction = 16; fraction > 1; fraction /= 2) {
5501 order = calc_slab_order(size, min_order, slub_max_order,
5502 fraction);
5503 if (order <= slub_max_order)
5504 return order;
5505 }
5506
5507 /*
5508 * Doh this slab cannot be placed using slab_max_order.
5509 */
5510 order = get_order(size);
5511 if (order <= MAX_PAGE_ORDER)
5512 return order;
5513 return -ENOSYS;
5514}
5515
5516static void
5517init_kmem_cache_node(struct kmem_cache_node *n)
5518{
5519 n->nr_partial = 0;
5520 spin_lock_init(&n->list_lock);
5521 INIT_LIST_HEAD(&n->partial);
5522#ifdef CONFIG_SLUB_DEBUG
5523 atomic_long_set(&n->nr_slabs, 0);
5524 atomic_long_set(&n->total_objects, 0);
5525 INIT_LIST_HEAD(&n->full);
5526#endif
5527}
5528
5529#ifndef CONFIG_SLUB_TINY
5530static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
5531{
5532 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
5533 NR_KMALLOC_TYPES * KMALLOC_SHIFT_HIGH *
5534 sizeof(struct kmem_cache_cpu));
5535
5536 /*
5537 * Must align to double word boundary for the double cmpxchg
5538 * instructions to work; see __pcpu_double_call_return_bool().
5539 */
5540 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
5541 2 * sizeof(void *));
5542
5543 if (!s->cpu_slab)
5544 return 0;
5545
5546 init_kmem_cache_cpus(s);
5547
5548 return 1;
5549}
5550#else
5551static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
5552{
5553 return 1;
5554}
5555#endif /* CONFIG_SLUB_TINY */
5556
5557static struct kmem_cache *kmem_cache_node;
5558
5559/*
5560 * No kmalloc_node yet so do it by hand. We know that this is the first
5561 * slab on the node for this slabcache. There are no concurrent accesses
5562 * possible.
5563 *
5564 * Note that this function only works on the kmem_cache_node
5565 * when allocating for the kmem_cache_node. This is used for bootstrapping
5566 * memory on a fresh node that has no slab structures yet.
5567 */
5568static void early_kmem_cache_node_alloc(int node)
5569{
5570 struct slab *slab;
5571 struct kmem_cache_node *n;
5572
5573 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
5574
5575 slab = new_slab(kmem_cache_node, GFP_NOWAIT, node);
5576
5577 BUG_ON(!slab);
5578 if (slab_nid(slab) != node) {
5579 pr_err("SLUB: Unable to allocate memory from node %d\n", node);
5580 pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
5581 }
5582
5583 n = slab->freelist;
5584 BUG_ON(!n);
5585#ifdef CONFIG_SLUB_DEBUG
5586 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
5587#endif
5588 n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false);
5589 slab->freelist = get_freepointer(kmem_cache_node, n);
5590 slab->inuse = 1;
5591 kmem_cache_node->node[node] = n;
5592 init_kmem_cache_node(n);
5593 inc_slabs_node(kmem_cache_node, node, slab->objects);
5594
5595 /*
5596 * No locks need to be taken here as it has just been
5597 * initialized and there is no concurrent access.
5598 */
5599 __add_partial(n, slab, DEACTIVATE_TO_HEAD);
5600}
5601
5602static void free_kmem_cache_nodes(struct kmem_cache *s)
5603{
5604 int node;
5605 struct kmem_cache_node *n;
5606
5607 for_each_kmem_cache_node(s, node, n) {
5608 s->node[node] = NULL;
5609 kmem_cache_free(kmem_cache_node, n);
5610 }
5611}
5612
5613void __kmem_cache_release(struct kmem_cache *s)
5614{
5615 cache_random_seq_destroy(s);
5616#ifndef CONFIG_SLUB_TINY
5617 free_percpu(s->cpu_slab);
5618#endif
5619 free_kmem_cache_nodes(s);
5620}
5621
5622static int init_kmem_cache_nodes(struct kmem_cache *s)
5623{
5624 int node;
5625
5626 for_each_node_mask(node, slab_nodes) {
5627 struct kmem_cache_node *n;
5628
5629 if (slab_state == DOWN) {
5630 early_kmem_cache_node_alloc(node);
5631 continue;
5632 }
5633 n = kmem_cache_alloc_node(kmem_cache_node,
5634 GFP_KERNEL, node);
5635
5636 if (!n) {
5637 free_kmem_cache_nodes(s);
5638 return 0;
5639 }
5640
5641 init_kmem_cache_node(n);
5642 s->node[node] = n;
5643 }
5644 return 1;
5645}
5646
5647static void set_cpu_partial(struct kmem_cache *s)
5648{
5649#ifdef CONFIG_SLUB_CPU_PARTIAL
5650 unsigned int nr_objects;
5651
5652 /*
5653 * cpu_partial determined the maximum number of objects kept in the
5654 * per cpu partial lists of a processor.
5655 *
5656 * Per cpu partial lists mainly contain slabs that just have one
5657 * object freed. If they are used for allocation then they can be
5658 * filled up again with minimal effort. The slab will never hit the
5659 * per node partial lists and therefore no locking will be required.
5660 *
5661 * For backwards compatibility reasons, this is determined as number
5662 * of objects, even though we now limit maximum number of pages, see
5663 * slub_set_cpu_partial()
5664 */
5665 if (!kmem_cache_has_cpu_partial(s))
5666 nr_objects = 0;
5667 else if (s->size >= PAGE_SIZE)
5668 nr_objects = 6;
5669 else if (s->size >= 1024)
5670 nr_objects = 24;
5671 else if (s->size >= 256)
5672 nr_objects = 52;
5673 else
5674 nr_objects = 120;
5675
5676 slub_set_cpu_partial(s, nr_objects);
5677#endif
5678}
5679
5680/*
5681 * calculate_sizes() determines the order and the distribution of data within
5682 * a slab object.
5683 */
5684static int calculate_sizes(struct kmem_cache_args *args, struct kmem_cache *s)
5685{
5686 slab_flags_t flags = s->flags;
5687 unsigned int size = s->object_size;
5688 unsigned int order;
5689
5690 /*
5691 * Round up object size to the next word boundary. We can only
5692 * place the free pointer at word boundaries and this determines
5693 * the possible location of the free pointer.
5694 */
5695 size = ALIGN(size, sizeof(void *));
5696
5697#ifdef CONFIG_SLUB_DEBUG
5698 /*
5699 * Determine if we can poison the object itself. If the user of
5700 * the slab may touch the object after free or before allocation
5701 * then we should never poison the object itself.
5702 */
5703 if ((flags & SLAB_POISON) && !(flags & SLAB_TYPESAFE_BY_RCU) &&
5704 !s->ctor)
5705 s->flags |= __OBJECT_POISON;
5706 else
5707 s->flags &= ~__OBJECT_POISON;
5708
5709
5710 /*
5711 * If we are Redzoning then check if there is some space between the
5712 * end of the object and the free pointer. If not then add an
5713 * additional word to have some bytes to store Redzone information.
5714 */
5715 if ((flags & SLAB_RED_ZONE) && size == s->object_size)
5716 size += sizeof(void *);
5717#endif
5718
5719 /*
5720 * With that we have determined the number of bytes in actual use
5721 * by the object and redzoning.
5722 */
5723 s->inuse = size;
5724
5725 if (((flags & SLAB_TYPESAFE_BY_RCU) && !args->use_freeptr_offset) ||
5726 (flags & SLAB_POISON) || s->ctor ||
5727 ((flags & SLAB_RED_ZONE) &&
5728 (s->object_size < sizeof(void *) || slub_debug_orig_size(s)))) {
5729 /*
5730 * Relocate free pointer after the object if it is not
5731 * permitted to overwrite the first word of the object on
5732 * kmem_cache_free.
5733 *
5734 * This is the case if we do RCU, have a constructor or
5735 * destructor, are poisoning the objects, or are
5736 * redzoning an object smaller than sizeof(void *) or are
5737 * redzoning an object with slub_debug_orig_size() enabled,
5738 * in which case the right redzone may be extended.
5739 *
5740 * The assumption that s->offset >= s->inuse means free
5741 * pointer is outside of the object is used in the
5742 * freeptr_outside_object() function. If that is no
5743 * longer true, the function needs to be modified.
5744 */
5745 s->offset = size;
5746 size += sizeof(void *);
5747 } else if ((flags & SLAB_TYPESAFE_BY_RCU) && args->use_freeptr_offset) {
5748 s->offset = args->freeptr_offset;
5749 } else {
5750 /*
5751 * Store freelist pointer near middle of object to keep
5752 * it away from the edges of the object to avoid small
5753 * sized over/underflows from neighboring allocations.
5754 */
5755 s->offset = ALIGN_DOWN(s->object_size / 2, sizeof(void *));
5756 }
5757
5758#ifdef CONFIG_SLUB_DEBUG
5759 if (flags & SLAB_STORE_USER) {
5760 /*
5761 * Need to store information about allocs and frees after
5762 * the object.
5763 */
5764 size += 2 * sizeof(struct track);
5765
5766 /* Save the original kmalloc request size */
5767 if (flags & SLAB_KMALLOC)
5768 size += sizeof(unsigned int);
5769 }
5770#endif
5771
5772 kasan_cache_create(s, &size, &s->flags);
5773#ifdef CONFIG_SLUB_DEBUG
5774 if (flags & SLAB_RED_ZONE) {
5775 /*
5776 * Add some empty padding so that we can catch
5777 * overwrites from earlier objects rather than let
5778 * tracking information or the free pointer be
5779 * corrupted if a user writes before the start
5780 * of the object.
5781 */
5782 size += sizeof(void *);
5783
5784 s->red_left_pad = sizeof(void *);
5785 s->red_left_pad = ALIGN(s->red_left_pad, s->align);
5786 size += s->red_left_pad;
5787 }
5788#endif
5789
5790 /*
5791 * SLUB stores one object immediately after another beginning from
5792 * offset 0. In order to align the objects we have to simply size
5793 * each object to conform to the alignment.
5794 */
5795 size = ALIGN(size, s->align);
5796 s->size = size;
5797 s->reciprocal_size = reciprocal_value(size);
5798 order = calculate_order(size);
5799
5800 if ((int)order < 0)
5801 return 0;
5802
5803 s->allocflags = __GFP_COMP;
5804
5805 if (s->flags & SLAB_CACHE_DMA)
5806 s->allocflags |= GFP_DMA;
5807
5808 if (s->flags & SLAB_CACHE_DMA32)
5809 s->allocflags |= GFP_DMA32;
5810
5811 if (s->flags & SLAB_RECLAIM_ACCOUNT)
5812 s->allocflags |= __GFP_RECLAIMABLE;
5813
5814 /*
5815 * Determine the number of objects per slab
5816 */
5817 s->oo = oo_make(order, size);
5818 s->min = oo_make(get_order(size), size);
5819
5820 return !!oo_objects(s->oo);
5821}
5822
5823static void list_slab_objects(struct kmem_cache *s, struct slab *slab)
5824{
5825#ifdef CONFIG_SLUB_DEBUG
5826 void *addr = slab_address(slab);
5827 void *p;
5828
5829 if (!slab_add_kunit_errors())
5830 slab_bug(s, "Objects remaining on __kmem_cache_shutdown()");
5831
5832 spin_lock(&object_map_lock);
5833 __fill_map(object_map, s, slab);
5834
5835 for_each_object(p, s, addr, slab->objects) {
5836
5837 if (!test_bit(__obj_to_index(s, addr, p), object_map)) {
5838 if (slab_add_kunit_errors())
5839 continue;
5840 pr_err("Object 0x%p @offset=%tu\n", p, p - addr);
5841 print_tracking(s, p);
5842 }
5843 }
5844 spin_unlock(&object_map_lock);
5845
5846 __slab_err(slab);
5847#endif
5848}
5849
5850/*
5851 * Attempt to free all partial slabs on a node.
5852 * This is called from __kmem_cache_shutdown(). We must take list_lock
5853 * because sysfs file might still access partial list after the shutdowning.
5854 */
5855static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
5856{
5857 LIST_HEAD(discard);
5858 struct slab *slab, *h;
5859
5860 BUG_ON(irqs_disabled());
5861 spin_lock_irq(&n->list_lock);
5862 list_for_each_entry_safe(slab, h, &n->partial, slab_list) {
5863 if (!slab->inuse) {
5864 remove_partial(n, slab);
5865 list_add(&slab->slab_list, &discard);
5866 } else {
5867 list_slab_objects(s, slab);
5868 }
5869 }
5870 spin_unlock_irq(&n->list_lock);
5871
5872 list_for_each_entry_safe(slab, h, &discard, slab_list)
5873 discard_slab(s, slab);
5874}
5875
5876bool __kmem_cache_empty(struct kmem_cache *s)
5877{
5878 int node;
5879 struct kmem_cache_node *n;
5880
5881 for_each_kmem_cache_node(s, node, n)
5882 if (n->nr_partial || node_nr_slabs(n))
5883 return false;
5884 return true;
5885}
5886
5887/*
5888 * Release all resources used by a slab cache.
5889 */
5890int __kmem_cache_shutdown(struct kmem_cache *s)
5891{
5892 int node;
5893 struct kmem_cache_node *n;
5894
5895 flush_all_cpus_locked(s);
5896 /* Attempt to free all objects */
5897 for_each_kmem_cache_node(s, node, n) {
5898 free_partial(s, n);
5899 if (n->nr_partial || node_nr_slabs(n))
5900 return 1;
5901 }
5902 return 0;
5903}
5904
5905#ifdef CONFIG_PRINTK
5906void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
5907{
5908 void *base;
5909 int __maybe_unused i;
5910 unsigned int objnr;
5911 void *objp;
5912 void *objp0;
5913 struct kmem_cache *s = slab->slab_cache;
5914 struct track __maybe_unused *trackp;
5915
5916 kpp->kp_ptr = object;
5917 kpp->kp_slab = slab;
5918 kpp->kp_slab_cache = s;
5919 base = slab_address(slab);
5920 objp0 = kasan_reset_tag(object);
5921#ifdef CONFIG_SLUB_DEBUG
5922 objp = restore_red_left(s, objp0);
5923#else
5924 objp = objp0;
5925#endif
5926 objnr = obj_to_index(s, slab, objp);
5927 kpp->kp_data_offset = (unsigned long)((char *)objp0 - (char *)objp);
5928 objp = base + s->size * objnr;
5929 kpp->kp_objp = objp;
5930 if (WARN_ON_ONCE(objp < base || objp >= base + slab->objects * s->size
5931 || (objp - base) % s->size) ||
5932 !(s->flags & SLAB_STORE_USER))
5933 return;
5934#ifdef CONFIG_SLUB_DEBUG
5935 objp = fixup_red_left(s, objp);
5936 trackp = get_track(s, objp, TRACK_ALLOC);
5937 kpp->kp_ret = (void *)trackp->addr;
5938#ifdef CONFIG_STACKDEPOT
5939 {
5940 depot_stack_handle_t handle;
5941 unsigned long *entries;
5942 unsigned int nr_entries;
5943
5944 handle = READ_ONCE(trackp->handle);
5945 if (handle) {
5946 nr_entries = stack_depot_fetch(handle, &entries);
5947 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
5948 kpp->kp_stack[i] = (void *)entries[i];
5949 }
5950
5951 trackp = get_track(s, objp, TRACK_FREE);
5952 handle = READ_ONCE(trackp->handle);
5953 if (handle) {
5954 nr_entries = stack_depot_fetch(handle, &entries);
5955 for (i = 0; i < KS_ADDRS_COUNT && i < nr_entries; i++)
5956 kpp->kp_free_stack[i] = (void *)entries[i];
5957 }
5958 }
5959#endif
5960#endif
5961}
5962#endif
5963
5964/********************************************************************
5965 * Kmalloc subsystem
5966 *******************************************************************/
5967
5968static int __init setup_slub_min_order(char *str)
5969{
5970 get_option(&str, (int *)&slub_min_order);
5971
5972 if (slub_min_order > slub_max_order)
5973 slub_max_order = slub_min_order;
5974
5975 return 1;
5976}
5977
5978__setup("slab_min_order=", setup_slub_min_order);
5979__setup_param("slub_min_order=", slub_min_order, setup_slub_min_order, 0);
5980
5981
5982static int __init setup_slub_max_order(char *str)
5983{
5984 get_option(&str, (int *)&slub_max_order);
5985 slub_max_order = min_t(unsigned int, slub_max_order, MAX_PAGE_ORDER);
5986
5987 if (slub_min_order > slub_max_order)
5988 slub_min_order = slub_max_order;
5989
5990 return 1;
5991}
5992
5993__setup("slab_max_order=", setup_slub_max_order);
5994__setup_param("slub_max_order=", slub_max_order, setup_slub_max_order, 0);
5995
5996static int __init setup_slub_min_objects(char *str)
5997{
5998 get_option(&str, (int *)&slub_min_objects);
5999
6000 return 1;
6001}
6002
6003__setup("slab_min_objects=", setup_slub_min_objects);
6004__setup_param("slub_min_objects=", slub_min_objects, setup_slub_min_objects, 0);
6005
6006#ifdef CONFIG_NUMA
6007static int __init setup_slab_strict_numa(char *str)
6008{
6009 if (nr_node_ids > 1) {
6010 static_branch_enable(&strict_numa);
6011 pr_info("SLUB: Strict NUMA enabled.\n");
6012 } else {
6013 pr_warn("slab_strict_numa parameter set on non NUMA system.\n");
6014 }
6015
6016 return 1;
6017}
6018
6019__setup("slab_strict_numa", setup_slab_strict_numa);
6020#endif
6021
6022
6023#ifdef CONFIG_HARDENED_USERCOPY
6024/*
6025 * Rejects incorrectly sized objects and objects that are to be copied
6026 * to/from userspace but do not fall entirely within the containing slab
6027 * cache's usercopy region.
6028 *
6029 * Returns NULL if check passes, otherwise const char * to name of cache
6030 * to indicate an error.
6031 */
6032void __check_heap_object(const void *ptr, unsigned long n,
6033 const struct slab *slab, bool to_user)
6034{
6035 struct kmem_cache *s;
6036 unsigned int offset;
6037 bool is_kfence = is_kfence_address(ptr);
6038
6039 ptr = kasan_reset_tag(ptr);
6040
6041 /* Find object and usable object size. */
6042 s = slab->slab_cache;
6043
6044 /* Reject impossible pointers. */
6045 if (ptr < slab_address(slab))
6046 usercopy_abort("SLUB object not in SLUB page?!", NULL,
6047 to_user, 0, n);
6048
6049 /* Find offset within object. */
6050 if (is_kfence)
6051 offset = ptr - kfence_object_start(ptr);
6052 else
6053 offset = (ptr - slab_address(slab)) % s->size;
6054
6055 /* Adjust for redzone and reject if within the redzone. */
6056 if (!is_kfence && kmem_cache_debug_flags(s, SLAB_RED_ZONE)) {
6057 if (offset < s->red_left_pad)
6058 usercopy_abort("SLUB object in left red zone",
6059 s->name, to_user, offset, n);
6060 offset -= s->red_left_pad;
6061 }
6062
6063 /* Allow address range falling entirely within usercopy region. */
6064 if (offset >= s->useroffset &&
6065 offset - s->useroffset <= s->usersize &&
6066 n <= s->useroffset - offset + s->usersize)
6067 return;
6068
6069 usercopy_abort("SLUB object", s->name, to_user, offset, n);
6070}
6071#endif /* CONFIG_HARDENED_USERCOPY */
6072
6073#define SHRINK_PROMOTE_MAX 32
6074
6075/*
6076 * kmem_cache_shrink discards empty slabs and promotes the slabs filled
6077 * up most to the head of the partial lists. New allocations will then
6078 * fill those up and thus they can be removed from the partial lists.
6079 *
6080 * The slabs with the least items are placed last. This results in them
6081 * being allocated from last increasing the chance that the last objects
6082 * are freed in them.
6083 */
6084static int __kmem_cache_do_shrink(struct kmem_cache *s)
6085{
6086 int node;
6087 int i;
6088 struct kmem_cache_node *n;
6089 struct slab *slab;
6090 struct slab *t;
6091 struct list_head discard;
6092 struct list_head promote[SHRINK_PROMOTE_MAX];
6093 unsigned long flags;
6094 int ret = 0;
6095
6096 for_each_kmem_cache_node(s, node, n) {
6097 INIT_LIST_HEAD(&discard);
6098 for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
6099 INIT_LIST_HEAD(promote + i);
6100
6101 spin_lock_irqsave(&n->list_lock, flags);
6102
6103 /*
6104 * Build lists of slabs to discard or promote.
6105 *
6106 * Note that concurrent frees may occur while we hold the
6107 * list_lock. slab->inuse here is the upper limit.
6108 */
6109 list_for_each_entry_safe(slab, t, &n->partial, slab_list) {
6110 int free = slab->objects - slab->inuse;
6111
6112 /* Do not reread slab->inuse */
6113 barrier();
6114
6115 /* We do not keep full slabs on the list */
6116 BUG_ON(free <= 0);
6117
6118 if (free == slab->objects) {
6119 list_move(&slab->slab_list, &discard);
6120 slab_clear_node_partial(slab);
6121 n->nr_partial--;
6122 dec_slabs_node(s, node, slab->objects);
6123 } else if (free <= SHRINK_PROMOTE_MAX)
6124 list_move(&slab->slab_list, promote + free - 1);
6125 }
6126
6127 /*
6128 * Promote the slabs filled up most to the head of the
6129 * partial list.
6130 */
6131 for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
6132 list_splice(promote + i, &n->partial);
6133
6134 spin_unlock_irqrestore(&n->list_lock, flags);
6135
6136 /* Release empty slabs */
6137 list_for_each_entry_safe(slab, t, &discard, slab_list)
6138 free_slab(s, slab);
6139
6140 if (node_nr_slabs(n))
6141 ret = 1;
6142 }
6143
6144 return ret;
6145}
6146
6147int __kmem_cache_shrink(struct kmem_cache *s)
6148{
6149 flush_all(s);
6150 return __kmem_cache_do_shrink(s);
6151}
6152
6153static int slab_mem_going_offline_callback(void *arg)
6154{
6155 struct kmem_cache *s;
6156
6157 mutex_lock(&slab_mutex);
6158 list_for_each_entry(s, &slab_caches, list) {
6159 flush_all_cpus_locked(s);
6160 __kmem_cache_do_shrink(s);
6161 }
6162 mutex_unlock(&slab_mutex);
6163
6164 return 0;
6165}
6166
6167static void slab_mem_offline_callback(void *arg)
6168{
6169 struct memory_notify *marg = arg;
6170 int offline_node;
6171
6172 offline_node = marg->status_change_nid_normal;
6173
6174 /*
6175 * If the node still has available memory. we need kmem_cache_node
6176 * for it yet.
6177 */
6178 if (offline_node < 0)
6179 return;
6180
6181 mutex_lock(&slab_mutex);
6182 node_clear(offline_node, slab_nodes);
6183 /*
6184 * We no longer free kmem_cache_node structures here, as it would be
6185 * racy with all get_node() users, and infeasible to protect them with
6186 * slab_mutex.
6187 */
6188 mutex_unlock(&slab_mutex);
6189}
6190
6191static int slab_mem_going_online_callback(void *arg)
6192{
6193 struct kmem_cache_node *n;
6194 struct kmem_cache *s;
6195 struct memory_notify *marg = arg;
6196 int nid = marg->status_change_nid_normal;
6197 int ret = 0;
6198
6199 /*
6200 * If the node's memory is already available, then kmem_cache_node is
6201 * already created. Nothing to do.
6202 */
6203 if (nid < 0)
6204 return 0;
6205
6206 /*
6207 * We are bringing a node online. No memory is available yet. We must
6208 * allocate a kmem_cache_node structure in order to bring the node
6209 * online.
6210 */
6211 mutex_lock(&slab_mutex);
6212 list_for_each_entry(s, &slab_caches, list) {
6213 /*
6214 * The structure may already exist if the node was previously
6215 * onlined and offlined.
6216 */
6217 if (get_node(s, nid))
6218 continue;
6219 /*
6220 * XXX: kmem_cache_alloc_node will fallback to other nodes
6221 * since memory is not yet available from the node that
6222 * is brought up.
6223 */
6224 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
6225 if (!n) {
6226 ret = -ENOMEM;
6227 goto out;
6228 }
6229 init_kmem_cache_node(n);
6230 s->node[nid] = n;
6231 }
6232 /*
6233 * Any cache created after this point will also have kmem_cache_node
6234 * initialized for the new node.
6235 */
6236 node_set(nid, slab_nodes);
6237out:
6238 mutex_unlock(&slab_mutex);
6239 return ret;
6240}
6241
6242static int slab_memory_callback(struct notifier_block *self,
6243 unsigned long action, void *arg)
6244{
6245 int ret = 0;
6246
6247 switch (action) {
6248 case MEM_GOING_ONLINE:
6249 ret = slab_mem_going_online_callback(arg);
6250 break;
6251 case MEM_GOING_OFFLINE:
6252 ret = slab_mem_going_offline_callback(arg);
6253 break;
6254 case MEM_OFFLINE:
6255 case MEM_CANCEL_ONLINE:
6256 slab_mem_offline_callback(arg);
6257 break;
6258 case MEM_ONLINE:
6259 case MEM_CANCEL_OFFLINE:
6260 break;
6261 }
6262 if (ret)
6263 ret = notifier_from_errno(ret);
6264 else
6265 ret = NOTIFY_OK;
6266 return ret;
6267}
6268
6269/********************************************************************
6270 * Basic setup of slabs
6271 *******************************************************************/
6272
6273/*
6274 * Used for early kmem_cache structures that were allocated using
6275 * the page allocator. Allocate them properly then fix up the pointers
6276 * that may be pointing to the wrong kmem_cache structure.
6277 */
6278
6279static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
6280{
6281 int node;
6282 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
6283 struct kmem_cache_node *n;
6284
6285 memcpy(s, static_cache, kmem_cache->object_size);
6286
6287 /*
6288 * This runs very early, and only the boot processor is supposed to be
6289 * up. Even if it weren't true, IRQs are not up so we couldn't fire
6290 * IPIs around.
6291 */
6292 __flush_cpu_slab(s, smp_processor_id());
6293 for_each_kmem_cache_node(s, node, n) {
6294 struct slab *p;
6295
6296 list_for_each_entry(p, &n->partial, slab_list)
6297 p->slab_cache = s;
6298
6299#ifdef CONFIG_SLUB_DEBUG
6300 list_for_each_entry(p, &n->full, slab_list)
6301 p->slab_cache = s;
6302#endif
6303 }
6304 list_add(&s->list, &slab_caches);
6305 return s;
6306}
6307
6308void __init kmem_cache_init(void)
6309{
6310 static __initdata struct kmem_cache boot_kmem_cache,
6311 boot_kmem_cache_node;
6312 int node;
6313
6314 if (debug_guardpage_minorder())
6315 slub_max_order = 0;
6316
6317 /* Print slub debugging pointers without hashing */
6318 if (__slub_debug_enabled())
6319 no_hash_pointers_enable(NULL);
6320
6321 kmem_cache_node = &boot_kmem_cache_node;
6322 kmem_cache = &boot_kmem_cache;
6323
6324 /*
6325 * Initialize the nodemask for which we will allocate per node
6326 * structures. Here we don't need taking slab_mutex yet.
6327 */
6328 for_each_node_state(node, N_NORMAL_MEMORY)
6329 node_set(node, slab_nodes);
6330
6331 create_boot_cache(kmem_cache_node, "kmem_cache_node",
6332 sizeof(struct kmem_cache_node),
6333 SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0);
6334
6335 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
6336
6337 /* Able to allocate the per node structures */
6338 slab_state = PARTIAL;
6339
6340 create_boot_cache(kmem_cache, "kmem_cache",
6341 offsetof(struct kmem_cache, node) +
6342 nr_node_ids * sizeof(struct kmem_cache_node *),
6343 SLAB_HWCACHE_ALIGN | SLAB_NO_OBJ_EXT, 0, 0);
6344
6345 kmem_cache = bootstrap(&boot_kmem_cache);
6346 kmem_cache_node = bootstrap(&boot_kmem_cache_node);
6347
6348 /* Now we can use the kmem_cache to allocate kmalloc slabs */
6349 setup_kmalloc_cache_index_table();
6350 create_kmalloc_caches();
6351
6352 /* Setup random freelists for each cache */
6353 init_freelist_randomization();
6354
6355 cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
6356 slub_cpu_dead);
6357
6358 pr_info("SLUB: HWalign=%d, Order=%u-%u, MinObjects=%u, CPUs=%u, Nodes=%u\n",
6359 cache_line_size(),
6360 slub_min_order, slub_max_order, slub_min_objects,
6361 nr_cpu_ids, nr_node_ids);
6362}
6363
6364void __init kmem_cache_init_late(void)
6365{
6366#ifndef CONFIG_SLUB_TINY
6367 flushwq = alloc_workqueue("slub_flushwq", WQ_MEM_RECLAIM, 0);
6368 WARN_ON(!flushwq);
6369#endif
6370}
6371
6372struct kmem_cache *
6373__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
6374 slab_flags_t flags, void (*ctor)(void *))
6375{
6376 struct kmem_cache *s;
6377
6378 s = find_mergeable(size, align, flags, name, ctor);
6379 if (s) {
6380 if (sysfs_slab_alias(s, name))
6381 pr_err("SLUB: Unable to add cache alias %s to sysfs\n",
6382 name);
6383
6384 s->refcount++;
6385
6386 /*
6387 * Adjust the object sizes so that we clear
6388 * the complete object on kzalloc.
6389 */
6390 s->object_size = max(s->object_size, size);
6391 s->inuse = max(s->inuse, ALIGN(size, sizeof(void *)));
6392 }
6393
6394 return s;
6395}
6396
6397int do_kmem_cache_create(struct kmem_cache *s, const char *name,
6398 unsigned int size, struct kmem_cache_args *args,
6399 slab_flags_t flags)
6400{
6401 int err = -EINVAL;
6402
6403 s->name = name;
6404 s->size = s->object_size = size;
6405
6406 s->flags = kmem_cache_flags(flags, s->name);
6407#ifdef CONFIG_SLAB_FREELIST_HARDENED
6408 s->random = get_random_long();
6409#endif
6410 s->align = args->align;
6411 s->ctor = args->ctor;
6412#ifdef CONFIG_HARDENED_USERCOPY
6413 s->useroffset = args->useroffset;
6414 s->usersize = args->usersize;
6415#endif
6416
6417 if (!calculate_sizes(args, s))
6418 goto out;
6419 if (disable_higher_order_debug) {
6420 /*
6421 * Disable debugging flags that store metadata if the min slab
6422 * order increased.
6423 */
6424 if (get_order(s->size) > get_order(s->object_size)) {
6425 s->flags &= ~DEBUG_METADATA_FLAGS;
6426 s->offset = 0;
6427 if (!calculate_sizes(args, s))
6428 goto out;
6429 }
6430 }
6431
6432#ifdef system_has_freelist_aba
6433 if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
6434 /* Enable fast mode */
6435 s->flags |= __CMPXCHG_DOUBLE;
6436 }
6437#endif
6438
6439 /*
6440 * The larger the object size is, the more slabs we want on the partial
6441 * list to avoid pounding the page allocator excessively.
6442 */
6443 s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
6444 s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
6445
6446 set_cpu_partial(s);
6447
6448#ifdef CONFIG_NUMA
6449 s->remote_node_defrag_ratio = 1000;
6450#endif
6451
6452 /* Initialize the pre-computed randomized freelist if slab is up */
6453 if (slab_state >= UP) {
6454 if (init_cache_random_seq(s))
6455 goto out;
6456 }
6457
6458 if (!init_kmem_cache_nodes(s))
6459 goto out;
6460
6461 if (!alloc_kmem_cache_cpus(s))
6462 goto out;
6463
6464 err = 0;
6465
6466 /* Mutex is not taken during early boot */
6467 if (slab_state <= UP)
6468 goto out;
6469
6470 /*
6471 * Failing to create sysfs files is not critical to SLUB functionality.
6472 * If it fails, proceed with cache creation without these files.
6473 */
6474 if (sysfs_slab_add(s))
6475 pr_err("SLUB: Unable to add cache %s to sysfs\n", s->name);
6476
6477 if (s->flags & SLAB_STORE_USER)
6478 debugfs_slab_add(s);
6479
6480out:
6481 if (err)
6482 __kmem_cache_release(s);
6483 return err;
6484}
6485
6486#ifdef SLAB_SUPPORTS_SYSFS
6487static int count_inuse(struct slab *slab)
6488{
6489 return slab->inuse;
6490}
6491
6492static int count_total(struct slab *slab)
6493{
6494 return slab->objects;
6495}
6496#endif
6497
6498#ifdef CONFIG_SLUB_DEBUG
6499static void validate_slab(struct kmem_cache *s, struct slab *slab,
6500 unsigned long *obj_map)
6501{
6502 void *p;
6503 void *addr = slab_address(slab);
6504
6505 if (!check_slab(s, slab) || !on_freelist(s, slab, NULL))
6506 return;
6507
6508 /* Now we know that a valid freelist exists */
6509 __fill_map(obj_map, s, slab);
6510 for_each_object(p, s, addr, slab->objects) {
6511 u8 val = test_bit(__obj_to_index(s, addr, p), obj_map) ?
6512 SLUB_RED_INACTIVE : SLUB_RED_ACTIVE;
6513
6514 if (!check_object(s, slab, p, val))
6515 break;
6516 }
6517}
6518
6519static int validate_slab_node(struct kmem_cache *s,
6520 struct kmem_cache_node *n, unsigned long *obj_map)
6521{
6522 unsigned long count = 0;
6523 struct slab *slab;
6524 unsigned long flags;
6525
6526 spin_lock_irqsave(&n->list_lock, flags);
6527
6528 list_for_each_entry(slab, &n->partial, slab_list) {
6529 validate_slab(s, slab, obj_map);
6530 count++;
6531 }
6532 if (count != n->nr_partial) {
6533 pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
6534 s->name, count, n->nr_partial);
6535 slab_add_kunit_errors();
6536 }
6537
6538 if (!(s->flags & SLAB_STORE_USER))
6539 goto out;
6540
6541 list_for_each_entry(slab, &n->full, slab_list) {
6542 validate_slab(s, slab, obj_map);
6543 count++;
6544 }
6545 if (count != node_nr_slabs(n)) {
6546 pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
6547 s->name, count, node_nr_slabs(n));
6548 slab_add_kunit_errors();
6549 }
6550
6551out:
6552 spin_unlock_irqrestore(&n->list_lock, flags);
6553 return count;
6554}
6555
6556long validate_slab_cache(struct kmem_cache *s)
6557{
6558 int node;
6559 unsigned long count = 0;
6560 struct kmem_cache_node *n;
6561 unsigned long *obj_map;
6562
6563 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
6564 if (!obj_map)
6565 return -ENOMEM;
6566
6567 flush_all(s);
6568 for_each_kmem_cache_node(s, node, n)
6569 count += validate_slab_node(s, n, obj_map);
6570
6571 bitmap_free(obj_map);
6572
6573 return count;
6574}
6575EXPORT_SYMBOL(validate_slab_cache);
6576
6577#ifdef CONFIG_DEBUG_FS
6578/*
6579 * Generate lists of code addresses where slabcache objects are allocated
6580 * and freed.
6581 */
6582
6583struct location {
6584 depot_stack_handle_t handle;
6585 unsigned long count;
6586 unsigned long addr;
6587 unsigned long waste;
6588 long long sum_time;
6589 long min_time;
6590 long max_time;
6591 long min_pid;
6592 long max_pid;
6593 DECLARE_BITMAP(cpus, NR_CPUS);
6594 nodemask_t nodes;
6595};
6596
6597struct loc_track {
6598 unsigned long max;
6599 unsigned long count;
6600 struct location *loc;
6601 loff_t idx;
6602};
6603
6604static struct dentry *slab_debugfs_root;
6605
6606static void free_loc_track(struct loc_track *t)
6607{
6608 if (t->max)
6609 free_pages((unsigned long)t->loc,
6610 get_order(sizeof(struct location) * t->max));
6611}
6612
6613static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
6614{
6615 struct location *l;
6616 int order;
6617
6618 order = get_order(sizeof(struct location) * max);
6619
6620 l = (void *)__get_free_pages(flags, order);
6621 if (!l)
6622 return 0;
6623
6624 if (t->count) {
6625 memcpy(l, t->loc, sizeof(struct location) * t->count);
6626 free_loc_track(t);
6627 }
6628 t->max = max;
6629 t->loc = l;
6630 return 1;
6631}
6632
6633static int add_location(struct loc_track *t, struct kmem_cache *s,
6634 const struct track *track,
6635 unsigned int orig_size)
6636{
6637 long start, end, pos;
6638 struct location *l;
6639 unsigned long caddr, chandle, cwaste;
6640 unsigned long age = jiffies - track->when;
6641 depot_stack_handle_t handle = 0;
6642 unsigned int waste = s->object_size - orig_size;
6643
6644#ifdef CONFIG_STACKDEPOT
6645 handle = READ_ONCE(track->handle);
6646#endif
6647 start = -1;
6648 end = t->count;
6649
6650 for ( ; ; ) {
6651 pos = start + (end - start + 1) / 2;
6652
6653 /*
6654 * There is nothing at "end". If we end up there
6655 * we need to add something to before end.
6656 */
6657 if (pos == end)
6658 break;
6659
6660 l = &t->loc[pos];
6661 caddr = l->addr;
6662 chandle = l->handle;
6663 cwaste = l->waste;
6664 if ((track->addr == caddr) && (handle == chandle) &&
6665 (waste == cwaste)) {
6666
6667 l->count++;
6668 if (track->when) {
6669 l->sum_time += age;
6670 if (age < l->min_time)
6671 l->min_time = age;
6672 if (age > l->max_time)
6673 l->max_time = age;
6674
6675 if (track->pid < l->min_pid)
6676 l->min_pid = track->pid;
6677 if (track->pid > l->max_pid)
6678 l->max_pid = track->pid;
6679
6680 cpumask_set_cpu(track->cpu,
6681 to_cpumask(l->cpus));
6682 }
6683 node_set(page_to_nid(virt_to_page(track)), l->nodes);
6684 return 1;
6685 }
6686
6687 if (track->addr < caddr)
6688 end = pos;
6689 else if (track->addr == caddr && handle < chandle)
6690 end = pos;
6691 else if (track->addr == caddr && handle == chandle &&
6692 waste < cwaste)
6693 end = pos;
6694 else
6695 start = pos;
6696 }
6697
6698 /*
6699 * Not found. Insert new tracking element.
6700 */
6701 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
6702 return 0;
6703
6704 l = t->loc + pos;
6705 if (pos < t->count)
6706 memmove(l + 1, l,
6707 (t->count - pos) * sizeof(struct location));
6708 t->count++;
6709 l->count = 1;
6710 l->addr = track->addr;
6711 l->sum_time = age;
6712 l->min_time = age;
6713 l->max_time = age;
6714 l->min_pid = track->pid;
6715 l->max_pid = track->pid;
6716 l->handle = handle;
6717 l->waste = waste;
6718 cpumask_clear(to_cpumask(l->cpus));
6719 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
6720 nodes_clear(l->nodes);
6721 node_set(page_to_nid(virt_to_page(track)), l->nodes);
6722 return 1;
6723}
6724
6725static void process_slab(struct loc_track *t, struct kmem_cache *s,
6726 struct slab *slab, enum track_item alloc,
6727 unsigned long *obj_map)
6728{
6729 void *addr = slab_address(slab);
6730 bool is_alloc = (alloc == TRACK_ALLOC);
6731 void *p;
6732
6733 __fill_map(obj_map, s, slab);
6734
6735 for_each_object(p, s, addr, slab->objects)
6736 if (!test_bit(__obj_to_index(s, addr, p), obj_map))
6737 add_location(t, s, get_track(s, p, alloc),
6738 is_alloc ? get_orig_size(s, p) :
6739 s->object_size);
6740}
6741#endif /* CONFIG_DEBUG_FS */
6742#endif /* CONFIG_SLUB_DEBUG */
6743
6744#ifdef SLAB_SUPPORTS_SYSFS
6745enum slab_stat_type {
6746 SL_ALL, /* All slabs */
6747 SL_PARTIAL, /* Only partially allocated slabs */
6748 SL_CPU, /* Only slabs used for cpu caches */
6749 SL_OBJECTS, /* Determine allocated objects not slabs */
6750 SL_TOTAL /* Determine object capacity not slabs */
6751};
6752
6753#define SO_ALL (1 << SL_ALL)
6754#define SO_PARTIAL (1 << SL_PARTIAL)
6755#define SO_CPU (1 << SL_CPU)
6756#define SO_OBJECTS (1 << SL_OBJECTS)
6757#define SO_TOTAL (1 << SL_TOTAL)
6758
6759static ssize_t show_slab_objects(struct kmem_cache *s,
6760 char *buf, unsigned long flags)
6761{
6762 unsigned long total = 0;
6763 int node;
6764 int x;
6765 unsigned long *nodes;
6766 int len = 0;
6767
6768 nodes = kcalloc(nr_node_ids, sizeof(unsigned long), GFP_KERNEL);
6769 if (!nodes)
6770 return -ENOMEM;
6771
6772 if (flags & SO_CPU) {
6773 int cpu;
6774
6775 for_each_possible_cpu(cpu) {
6776 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
6777 cpu);
6778 int node;
6779 struct slab *slab;
6780
6781 slab = READ_ONCE(c->slab);
6782 if (!slab)
6783 continue;
6784
6785 node = slab_nid(slab);
6786 if (flags & SO_TOTAL)
6787 x = slab->objects;
6788 else if (flags & SO_OBJECTS)
6789 x = slab->inuse;
6790 else
6791 x = 1;
6792
6793 total += x;
6794 nodes[node] += x;
6795
6796#ifdef CONFIG_SLUB_CPU_PARTIAL
6797 slab = slub_percpu_partial_read_once(c);
6798 if (slab) {
6799 node = slab_nid(slab);
6800 if (flags & SO_TOTAL)
6801 WARN_ON_ONCE(1);
6802 else if (flags & SO_OBJECTS)
6803 WARN_ON_ONCE(1);
6804 else
6805 x = data_race(slab->slabs);
6806 total += x;
6807 nodes[node] += x;
6808 }
6809#endif
6810 }
6811 }
6812
6813 /*
6814 * It is impossible to take "mem_hotplug_lock" here with "kernfs_mutex"
6815 * already held which will conflict with an existing lock order:
6816 *
6817 * mem_hotplug_lock->slab_mutex->kernfs_mutex
6818 *
6819 * We don't really need mem_hotplug_lock (to hold off
6820 * slab_mem_going_offline_callback) here because slab's memory hot
6821 * unplug code doesn't destroy the kmem_cache->node[] data.
6822 */
6823
6824#ifdef CONFIG_SLUB_DEBUG
6825 if (flags & SO_ALL) {
6826 struct kmem_cache_node *n;
6827
6828 for_each_kmem_cache_node(s, node, n) {
6829
6830 if (flags & SO_TOTAL)
6831 x = node_nr_objs(n);
6832 else if (flags & SO_OBJECTS)
6833 x = node_nr_objs(n) - count_partial(n, count_free);
6834 else
6835 x = node_nr_slabs(n);
6836 total += x;
6837 nodes[node] += x;
6838 }
6839
6840 } else
6841#endif
6842 if (flags & SO_PARTIAL) {
6843 struct kmem_cache_node *n;
6844
6845 for_each_kmem_cache_node(s, node, n) {
6846 if (flags & SO_TOTAL)
6847 x = count_partial(n, count_total);
6848 else if (flags & SO_OBJECTS)
6849 x = count_partial(n, count_inuse);
6850 else
6851 x = n->nr_partial;
6852 total += x;
6853 nodes[node] += x;
6854 }
6855 }
6856
6857 len += sysfs_emit_at(buf, len, "%lu", total);
6858#ifdef CONFIG_NUMA
6859 for (node = 0; node < nr_node_ids; node++) {
6860 if (nodes[node])
6861 len += sysfs_emit_at(buf, len, " N%d=%lu",
6862 node, nodes[node]);
6863 }
6864#endif
6865 len += sysfs_emit_at(buf, len, "\n");
6866 kfree(nodes);
6867
6868 return len;
6869}
6870
6871#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
6872#define to_slab(n) container_of(n, struct kmem_cache, kobj)
6873
6874struct slab_attribute {
6875 struct attribute attr;
6876 ssize_t (*show)(struct kmem_cache *s, char *buf);
6877 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
6878};
6879
6880#define SLAB_ATTR_RO(_name) \
6881 static struct slab_attribute _name##_attr = __ATTR_RO_MODE(_name, 0400)
6882
6883#define SLAB_ATTR(_name) \
6884 static struct slab_attribute _name##_attr = __ATTR_RW_MODE(_name, 0600)
6885
6886static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
6887{
6888 return sysfs_emit(buf, "%u\n", s->size);
6889}
6890SLAB_ATTR_RO(slab_size);
6891
6892static ssize_t align_show(struct kmem_cache *s, char *buf)
6893{
6894 return sysfs_emit(buf, "%u\n", s->align);
6895}
6896SLAB_ATTR_RO(align);
6897
6898static ssize_t object_size_show(struct kmem_cache *s, char *buf)
6899{
6900 return sysfs_emit(buf, "%u\n", s->object_size);
6901}
6902SLAB_ATTR_RO(object_size);
6903
6904static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
6905{
6906 return sysfs_emit(buf, "%u\n", oo_objects(s->oo));
6907}
6908SLAB_ATTR_RO(objs_per_slab);
6909
6910static ssize_t order_show(struct kmem_cache *s, char *buf)
6911{
6912 return sysfs_emit(buf, "%u\n", oo_order(s->oo));
6913}
6914SLAB_ATTR_RO(order);
6915
6916static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
6917{
6918 return sysfs_emit(buf, "%lu\n", s->min_partial);
6919}
6920
6921static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
6922 size_t length)
6923{
6924 unsigned long min;
6925 int err;
6926
6927 err = kstrtoul(buf, 10, &min);
6928 if (err)
6929 return err;
6930
6931 s->min_partial = min;
6932 return length;
6933}
6934SLAB_ATTR(min_partial);
6935
6936static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
6937{
6938 unsigned int nr_partial = 0;
6939#ifdef CONFIG_SLUB_CPU_PARTIAL
6940 nr_partial = s->cpu_partial;
6941#endif
6942
6943 return sysfs_emit(buf, "%u\n", nr_partial);
6944}
6945
6946static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
6947 size_t length)
6948{
6949 unsigned int objects;
6950 int err;
6951
6952 err = kstrtouint(buf, 10, &objects);
6953 if (err)
6954 return err;
6955 if (objects && !kmem_cache_has_cpu_partial(s))
6956 return -EINVAL;
6957
6958 slub_set_cpu_partial(s, objects);
6959 flush_all(s);
6960 return length;
6961}
6962SLAB_ATTR(cpu_partial);
6963
6964static ssize_t ctor_show(struct kmem_cache *s, char *buf)
6965{
6966 if (!s->ctor)
6967 return 0;
6968 return sysfs_emit(buf, "%pS\n", s->ctor);
6969}
6970SLAB_ATTR_RO(ctor);
6971
6972static ssize_t aliases_show(struct kmem_cache *s, char *buf)
6973{
6974 return sysfs_emit(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
6975}
6976SLAB_ATTR_RO(aliases);
6977
6978static ssize_t partial_show(struct kmem_cache *s, char *buf)
6979{
6980 return show_slab_objects(s, buf, SO_PARTIAL);
6981}
6982SLAB_ATTR_RO(partial);
6983
6984static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
6985{
6986 return show_slab_objects(s, buf, SO_CPU);
6987}
6988SLAB_ATTR_RO(cpu_slabs);
6989
6990static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
6991{
6992 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
6993}
6994SLAB_ATTR_RO(objects_partial);
6995
6996static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
6997{
6998 int objects = 0;
6999 int slabs = 0;
7000 int cpu __maybe_unused;
7001 int len = 0;
7002
7003#ifdef CONFIG_SLUB_CPU_PARTIAL
7004 for_each_online_cpu(cpu) {
7005 struct slab *slab;
7006
7007 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
7008
7009 if (slab)
7010 slabs += data_race(slab->slabs);
7011 }
7012#endif
7013
7014 /* Approximate half-full slabs, see slub_set_cpu_partial() */
7015 objects = (slabs * oo_objects(s->oo)) / 2;
7016 len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs);
7017
7018#ifdef CONFIG_SLUB_CPU_PARTIAL
7019 for_each_online_cpu(cpu) {
7020 struct slab *slab;
7021
7022 slab = slub_percpu_partial(per_cpu_ptr(s->cpu_slab, cpu));
7023 if (slab) {
7024 slabs = data_race(slab->slabs);
7025 objects = (slabs * oo_objects(s->oo)) / 2;
7026 len += sysfs_emit_at(buf, len, " C%d=%d(%d)",
7027 cpu, objects, slabs);
7028 }
7029 }
7030#endif
7031 len += sysfs_emit_at(buf, len, "\n");
7032
7033 return len;
7034}
7035SLAB_ATTR_RO(slabs_cpu_partial);
7036
7037static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
7038{
7039 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
7040}
7041SLAB_ATTR_RO(reclaim_account);
7042
7043static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
7044{
7045 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
7046}
7047SLAB_ATTR_RO(hwcache_align);
7048
7049#ifdef CONFIG_ZONE_DMA
7050static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
7051{
7052 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
7053}
7054SLAB_ATTR_RO(cache_dma);
7055#endif
7056
7057#ifdef CONFIG_HARDENED_USERCOPY
7058static ssize_t usersize_show(struct kmem_cache *s, char *buf)
7059{
7060 return sysfs_emit(buf, "%u\n", s->usersize);
7061}
7062SLAB_ATTR_RO(usersize);
7063#endif
7064
7065static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
7066{
7067 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU));
7068}
7069SLAB_ATTR_RO(destroy_by_rcu);
7070
7071#ifdef CONFIG_SLUB_DEBUG
7072static ssize_t slabs_show(struct kmem_cache *s, char *buf)
7073{
7074 return show_slab_objects(s, buf, SO_ALL);
7075}
7076SLAB_ATTR_RO(slabs);
7077
7078static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
7079{
7080 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
7081}
7082SLAB_ATTR_RO(total_objects);
7083
7084static ssize_t objects_show(struct kmem_cache *s, char *buf)
7085{
7086 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
7087}
7088SLAB_ATTR_RO(objects);
7089
7090static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
7091{
7092 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
7093}
7094SLAB_ATTR_RO(sanity_checks);
7095
7096static ssize_t trace_show(struct kmem_cache *s, char *buf)
7097{
7098 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_TRACE));
7099}
7100SLAB_ATTR_RO(trace);
7101
7102static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
7103{
7104 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
7105}
7106
7107SLAB_ATTR_RO(red_zone);
7108
7109static ssize_t poison_show(struct kmem_cache *s, char *buf)
7110{
7111 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_POISON));
7112}
7113
7114SLAB_ATTR_RO(poison);
7115
7116static ssize_t store_user_show(struct kmem_cache *s, char *buf)
7117{
7118 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
7119}
7120
7121SLAB_ATTR_RO(store_user);
7122
7123static ssize_t validate_show(struct kmem_cache *s, char *buf)
7124{
7125 return 0;
7126}
7127
7128static ssize_t validate_store(struct kmem_cache *s,
7129 const char *buf, size_t length)
7130{
7131 int ret = -EINVAL;
7132
7133 if (buf[0] == '1' && kmem_cache_debug(s)) {
7134 ret = validate_slab_cache(s);
7135 if (ret >= 0)
7136 ret = length;
7137 }
7138 return ret;
7139}
7140SLAB_ATTR(validate);
7141
7142#endif /* CONFIG_SLUB_DEBUG */
7143
7144#ifdef CONFIG_FAILSLAB
7145static ssize_t failslab_show(struct kmem_cache *s, char *buf)
7146{
7147 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
7148}
7149
7150static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
7151 size_t length)
7152{
7153 if (s->refcount > 1)
7154 return -EINVAL;
7155
7156 if (buf[0] == '1')
7157 WRITE_ONCE(s->flags, s->flags | SLAB_FAILSLAB);
7158 else
7159 WRITE_ONCE(s->flags, s->flags & ~SLAB_FAILSLAB);
7160
7161 return length;
7162}
7163SLAB_ATTR(failslab);
7164#endif
7165
7166static ssize_t shrink_show(struct kmem_cache *s, char *buf)
7167{
7168 return 0;
7169}
7170
7171static ssize_t shrink_store(struct kmem_cache *s,
7172 const char *buf, size_t length)
7173{
7174 if (buf[0] == '1')
7175 kmem_cache_shrink(s);
7176 else
7177 return -EINVAL;
7178 return length;
7179}
7180SLAB_ATTR(shrink);
7181
7182#ifdef CONFIG_NUMA
7183static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
7184{
7185 return sysfs_emit(buf, "%u\n", s->remote_node_defrag_ratio / 10);
7186}
7187
7188static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
7189 const char *buf, size_t length)
7190{
7191 unsigned int ratio;
7192 int err;
7193
7194 err = kstrtouint(buf, 10, &ratio);
7195 if (err)
7196 return err;
7197 if (ratio > 100)
7198 return -ERANGE;
7199
7200 s->remote_node_defrag_ratio = ratio * 10;
7201
7202 return length;
7203}
7204SLAB_ATTR(remote_node_defrag_ratio);
7205#endif
7206
7207#ifdef CONFIG_SLUB_STATS
7208static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
7209{
7210 unsigned long sum = 0;
7211 int cpu;
7212 int len = 0;
7213 int *data = kmalloc_array(nr_cpu_ids, sizeof(int), GFP_KERNEL);
7214
7215 if (!data)
7216 return -ENOMEM;
7217
7218 for_each_online_cpu(cpu) {
7219 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
7220
7221 data[cpu] = x;
7222 sum += x;
7223 }
7224
7225 len += sysfs_emit_at(buf, len, "%lu", sum);
7226
7227#ifdef CONFIG_SMP
7228 for_each_online_cpu(cpu) {
7229 if (data[cpu])
7230 len += sysfs_emit_at(buf, len, " C%d=%u",
7231 cpu, data[cpu]);
7232 }
7233#endif
7234 kfree(data);
7235 len += sysfs_emit_at(buf, len, "\n");
7236
7237 return len;
7238}
7239
7240static void clear_stat(struct kmem_cache *s, enum stat_item si)
7241{
7242 int cpu;
7243
7244 for_each_online_cpu(cpu)
7245 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
7246}
7247
7248#define STAT_ATTR(si, text) \
7249static ssize_t text##_show(struct kmem_cache *s, char *buf) \
7250{ \
7251 return show_stat(s, buf, si); \
7252} \
7253static ssize_t text##_store(struct kmem_cache *s, \
7254 const char *buf, size_t length) \
7255{ \
7256 if (buf[0] != '0') \
7257 return -EINVAL; \
7258 clear_stat(s, si); \
7259 return length; \
7260} \
7261SLAB_ATTR(text); \
7262
7263STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
7264STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
7265STAT_ATTR(FREE_FASTPATH, free_fastpath);
7266STAT_ATTR(FREE_SLOWPATH, free_slowpath);
7267STAT_ATTR(FREE_FROZEN, free_frozen);
7268STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
7269STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
7270STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
7271STAT_ATTR(ALLOC_SLAB, alloc_slab);
7272STAT_ATTR(ALLOC_REFILL, alloc_refill);
7273STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
7274STAT_ATTR(FREE_SLAB, free_slab);
7275STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
7276STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
7277STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
7278STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
7279STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
7280STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
7281STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
7282STAT_ATTR(ORDER_FALLBACK, order_fallback);
7283STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
7284STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
7285STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
7286STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
7287STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
7288STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
7289#endif /* CONFIG_SLUB_STATS */
7290
7291#ifdef CONFIG_KFENCE
7292static ssize_t skip_kfence_show(struct kmem_cache *s, char *buf)
7293{
7294 return sysfs_emit(buf, "%d\n", !!(s->flags & SLAB_SKIP_KFENCE));
7295}
7296
7297static ssize_t skip_kfence_store(struct kmem_cache *s,
7298 const char *buf, size_t length)
7299{
7300 int ret = length;
7301
7302 if (buf[0] == '0')
7303 s->flags &= ~SLAB_SKIP_KFENCE;
7304 else if (buf[0] == '1')
7305 s->flags |= SLAB_SKIP_KFENCE;
7306 else
7307 ret = -EINVAL;
7308
7309 return ret;
7310}
7311SLAB_ATTR(skip_kfence);
7312#endif
7313
7314static struct attribute *slab_attrs[] = {
7315 &slab_size_attr.attr,
7316 &object_size_attr.attr,
7317 &objs_per_slab_attr.attr,
7318 &order_attr.attr,
7319 &min_partial_attr.attr,
7320 &cpu_partial_attr.attr,
7321 &objects_partial_attr.attr,
7322 &partial_attr.attr,
7323 &cpu_slabs_attr.attr,
7324 &ctor_attr.attr,
7325 &aliases_attr.attr,
7326 &align_attr.attr,
7327 &hwcache_align_attr.attr,
7328 &reclaim_account_attr.attr,
7329 &destroy_by_rcu_attr.attr,
7330 &shrink_attr.attr,
7331 &slabs_cpu_partial_attr.attr,
7332#ifdef CONFIG_SLUB_DEBUG
7333 &total_objects_attr.attr,
7334 &objects_attr.attr,
7335 &slabs_attr.attr,
7336 &sanity_checks_attr.attr,
7337 &trace_attr.attr,
7338 &red_zone_attr.attr,
7339 &poison_attr.attr,
7340 &store_user_attr.attr,
7341 &validate_attr.attr,
7342#endif
7343#ifdef CONFIG_ZONE_DMA
7344 &cache_dma_attr.attr,
7345#endif
7346#ifdef CONFIG_NUMA
7347 &remote_node_defrag_ratio_attr.attr,
7348#endif
7349#ifdef CONFIG_SLUB_STATS
7350 &alloc_fastpath_attr.attr,
7351 &alloc_slowpath_attr.attr,
7352 &free_fastpath_attr.attr,
7353 &free_slowpath_attr.attr,
7354 &free_frozen_attr.attr,
7355 &free_add_partial_attr.attr,
7356 &free_remove_partial_attr.attr,
7357 &alloc_from_partial_attr.attr,
7358 &alloc_slab_attr.attr,
7359 &alloc_refill_attr.attr,
7360 &alloc_node_mismatch_attr.attr,
7361 &free_slab_attr.attr,
7362 &cpuslab_flush_attr.attr,
7363 &deactivate_full_attr.attr,
7364 &deactivate_empty_attr.attr,
7365 &deactivate_to_head_attr.attr,
7366 &deactivate_to_tail_attr.attr,
7367 &deactivate_remote_frees_attr.attr,
7368 &deactivate_bypass_attr.attr,
7369 &order_fallback_attr.attr,
7370 &cmpxchg_double_fail_attr.attr,
7371 &cmpxchg_double_cpu_fail_attr.attr,
7372 &cpu_partial_alloc_attr.attr,
7373 &cpu_partial_free_attr.attr,
7374 &cpu_partial_node_attr.attr,
7375 &cpu_partial_drain_attr.attr,
7376#endif
7377#ifdef CONFIG_FAILSLAB
7378 &failslab_attr.attr,
7379#endif
7380#ifdef CONFIG_HARDENED_USERCOPY
7381 &usersize_attr.attr,
7382#endif
7383#ifdef CONFIG_KFENCE
7384 &skip_kfence_attr.attr,
7385#endif
7386
7387 NULL
7388};
7389
7390static const struct attribute_group slab_attr_group = {
7391 .attrs = slab_attrs,
7392};
7393
7394static ssize_t slab_attr_show(struct kobject *kobj,
7395 struct attribute *attr,
7396 char *buf)
7397{
7398 struct slab_attribute *attribute;
7399 struct kmem_cache *s;
7400
7401 attribute = to_slab_attr(attr);
7402 s = to_slab(kobj);
7403
7404 if (!attribute->show)
7405 return -EIO;
7406
7407 return attribute->show(s, buf);
7408}
7409
7410static ssize_t slab_attr_store(struct kobject *kobj,
7411 struct attribute *attr,
7412 const char *buf, size_t len)
7413{
7414 struct slab_attribute *attribute;
7415 struct kmem_cache *s;
7416
7417 attribute = to_slab_attr(attr);
7418 s = to_slab(kobj);
7419
7420 if (!attribute->store)
7421 return -EIO;
7422
7423 return attribute->store(s, buf, len);
7424}
7425
7426static void kmem_cache_release(struct kobject *k)
7427{
7428 slab_kmem_cache_release(to_slab(k));
7429}
7430
7431static const struct sysfs_ops slab_sysfs_ops = {
7432 .show = slab_attr_show,
7433 .store = slab_attr_store,
7434};
7435
7436static const struct kobj_type slab_ktype = {
7437 .sysfs_ops = &slab_sysfs_ops,
7438 .release = kmem_cache_release,
7439};
7440
7441static struct kset *slab_kset;
7442
7443static inline struct kset *cache_kset(struct kmem_cache *s)
7444{
7445 return slab_kset;
7446}
7447
7448#define ID_STR_LENGTH 32
7449
7450/* Create a unique string id for a slab cache:
7451 *
7452 * Format :[flags-]size
7453 */
7454static char *create_unique_id(struct kmem_cache *s)
7455{
7456 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
7457 char *p = name;
7458
7459 if (!name)
7460 return ERR_PTR(-ENOMEM);
7461
7462 *p++ = ':';
7463 /*
7464 * First flags affecting slabcache operations. We will only
7465 * get here for aliasable slabs so we do not need to support
7466 * too many flags. The flags here must cover all flags that
7467 * are matched during merging to guarantee that the id is
7468 * unique.
7469 */
7470 if (s->flags & SLAB_CACHE_DMA)
7471 *p++ = 'd';
7472 if (s->flags & SLAB_CACHE_DMA32)
7473 *p++ = 'D';
7474 if (s->flags & SLAB_RECLAIM_ACCOUNT)
7475 *p++ = 'a';
7476 if (s->flags & SLAB_CONSISTENCY_CHECKS)
7477 *p++ = 'F';
7478 if (s->flags & SLAB_ACCOUNT)
7479 *p++ = 'A';
7480 if (p != name + 1)
7481 *p++ = '-';
7482 p += snprintf(p, ID_STR_LENGTH - (p - name), "%07u", s->size);
7483
7484 if (WARN_ON(p > name + ID_STR_LENGTH - 1)) {
7485 kfree(name);
7486 return ERR_PTR(-EINVAL);
7487 }
7488 kmsan_unpoison_memory(name, p - name);
7489 return name;
7490}
7491
7492static int sysfs_slab_add(struct kmem_cache *s)
7493{
7494 int err;
7495 const char *name;
7496 struct kset *kset = cache_kset(s);
7497 int unmergeable = slab_unmergeable(s);
7498
7499 if (!unmergeable && disable_higher_order_debug &&
7500 (slub_debug & DEBUG_METADATA_FLAGS))
7501 unmergeable = 1;
7502
7503 if (unmergeable) {
7504 /*
7505 * Slabcache can never be merged so we can use the name proper.
7506 * This is typically the case for debug situations. In that
7507 * case we can catch duplicate names easily.
7508 */
7509 sysfs_remove_link(&slab_kset->kobj, s->name);
7510 name = s->name;
7511 } else {
7512 /*
7513 * Create a unique name for the slab as a target
7514 * for the symlinks.
7515 */
7516 name = create_unique_id(s);
7517 if (IS_ERR(name))
7518 return PTR_ERR(name);
7519 }
7520
7521 s->kobj.kset = kset;
7522 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
7523 if (err)
7524 goto out;
7525
7526 err = sysfs_create_group(&s->kobj, &slab_attr_group);
7527 if (err)
7528 goto out_del_kobj;
7529
7530 if (!unmergeable) {
7531 /* Setup first alias */
7532 sysfs_slab_alias(s, s->name);
7533 }
7534out:
7535 if (!unmergeable)
7536 kfree(name);
7537 return err;
7538out_del_kobj:
7539 kobject_del(&s->kobj);
7540 goto out;
7541}
7542
7543void sysfs_slab_unlink(struct kmem_cache *s)
7544{
7545 if (s->kobj.state_in_sysfs)
7546 kobject_del(&s->kobj);
7547}
7548
7549void sysfs_slab_release(struct kmem_cache *s)
7550{
7551 kobject_put(&s->kobj);
7552}
7553
7554/*
7555 * Need to buffer aliases during bootup until sysfs becomes
7556 * available lest we lose that information.
7557 */
7558struct saved_alias {
7559 struct kmem_cache *s;
7560 const char *name;
7561 struct saved_alias *next;
7562};
7563
7564static struct saved_alias *alias_list;
7565
7566static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
7567{
7568 struct saved_alias *al;
7569
7570 if (slab_state == FULL) {
7571 /*
7572 * If we have a leftover link then remove it.
7573 */
7574 sysfs_remove_link(&slab_kset->kobj, name);
7575 /*
7576 * The original cache may have failed to generate sysfs file.
7577 * In that case, sysfs_create_link() returns -ENOENT and
7578 * symbolic link creation is skipped.
7579 */
7580 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
7581 }
7582
7583 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
7584 if (!al)
7585 return -ENOMEM;
7586
7587 al->s = s;
7588 al->name = name;
7589 al->next = alias_list;
7590 alias_list = al;
7591 kmsan_unpoison_memory(al, sizeof(*al));
7592 return 0;
7593}
7594
7595static int __init slab_sysfs_init(void)
7596{
7597 struct kmem_cache *s;
7598 int err;
7599
7600 mutex_lock(&slab_mutex);
7601
7602 slab_kset = kset_create_and_add("slab", NULL, kernel_kobj);
7603 if (!slab_kset) {
7604 mutex_unlock(&slab_mutex);
7605 pr_err("Cannot register slab subsystem.\n");
7606 return -ENOMEM;
7607 }
7608
7609 slab_state = FULL;
7610
7611 list_for_each_entry(s, &slab_caches, list) {
7612 err = sysfs_slab_add(s);
7613 if (err)
7614 pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
7615 s->name);
7616 }
7617
7618 while (alias_list) {
7619 struct saved_alias *al = alias_list;
7620
7621 alias_list = alias_list->next;
7622 err = sysfs_slab_alias(al->s, al->name);
7623 if (err)
7624 pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
7625 al->name);
7626 kfree(al);
7627 }
7628
7629 mutex_unlock(&slab_mutex);
7630 return 0;
7631}
7632late_initcall(slab_sysfs_init);
7633#endif /* SLAB_SUPPORTS_SYSFS */
7634
7635#if defined(CONFIG_SLUB_DEBUG) && defined(CONFIG_DEBUG_FS)
7636static int slab_debugfs_show(struct seq_file *seq, void *v)
7637{
7638 struct loc_track *t = seq->private;
7639 struct location *l;
7640 unsigned long idx;
7641
7642 idx = (unsigned long) t->idx;
7643 if (idx < t->count) {
7644 l = &t->loc[idx];
7645
7646 seq_printf(seq, "%7ld ", l->count);
7647
7648 if (l->addr)
7649 seq_printf(seq, "%pS", (void *)l->addr);
7650 else
7651 seq_puts(seq, "<not-available>");
7652
7653 if (l->waste)
7654 seq_printf(seq, " waste=%lu/%lu",
7655 l->count * l->waste, l->waste);
7656
7657 if (l->sum_time != l->min_time) {
7658 seq_printf(seq, " age=%ld/%llu/%ld",
7659 l->min_time, div_u64(l->sum_time, l->count),
7660 l->max_time);
7661 } else
7662 seq_printf(seq, " age=%ld", l->min_time);
7663
7664 if (l->min_pid != l->max_pid)
7665 seq_printf(seq, " pid=%ld-%ld", l->min_pid, l->max_pid);
7666 else
7667 seq_printf(seq, " pid=%ld",
7668 l->min_pid);
7669
7670 if (num_online_cpus() > 1 && !cpumask_empty(to_cpumask(l->cpus)))
7671 seq_printf(seq, " cpus=%*pbl",
7672 cpumask_pr_args(to_cpumask(l->cpus)));
7673
7674 if (nr_online_nodes > 1 && !nodes_empty(l->nodes))
7675 seq_printf(seq, " nodes=%*pbl",
7676 nodemask_pr_args(&l->nodes));
7677
7678#ifdef CONFIG_STACKDEPOT
7679 {
7680 depot_stack_handle_t handle;
7681 unsigned long *entries;
7682 unsigned int nr_entries, j;
7683
7684 handle = READ_ONCE(l->handle);
7685 if (handle) {
7686 nr_entries = stack_depot_fetch(handle, &entries);
7687 seq_puts(seq, "\n");
7688 for (j = 0; j < nr_entries; j++)
7689 seq_printf(seq, " %pS\n", (void *)entries[j]);
7690 }
7691 }
7692#endif
7693 seq_puts(seq, "\n");
7694 }
7695
7696 if (!idx && !t->count)
7697 seq_puts(seq, "No data\n");
7698
7699 return 0;
7700}
7701
7702static void slab_debugfs_stop(struct seq_file *seq, void *v)
7703{
7704}
7705
7706static void *slab_debugfs_next(struct seq_file *seq, void *v, loff_t *ppos)
7707{
7708 struct loc_track *t = seq->private;
7709
7710 t->idx = ++(*ppos);
7711 if (*ppos <= t->count)
7712 return ppos;
7713
7714 return NULL;
7715}
7716
7717static int cmp_loc_by_count(const void *a, const void *b, const void *data)
7718{
7719 struct location *loc1 = (struct location *)a;
7720 struct location *loc2 = (struct location *)b;
7721
7722 if (loc1->count > loc2->count)
7723 return -1;
7724 else
7725 return 1;
7726}
7727
7728static void *slab_debugfs_start(struct seq_file *seq, loff_t *ppos)
7729{
7730 struct loc_track *t = seq->private;
7731
7732 t->idx = *ppos;
7733 return ppos;
7734}
7735
7736static const struct seq_operations slab_debugfs_sops = {
7737 .start = slab_debugfs_start,
7738 .next = slab_debugfs_next,
7739 .stop = slab_debugfs_stop,
7740 .show = slab_debugfs_show,
7741};
7742
7743static int slab_debug_trace_open(struct inode *inode, struct file *filep)
7744{
7745
7746 struct kmem_cache_node *n;
7747 enum track_item alloc;
7748 int node;
7749 struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops,
7750 sizeof(struct loc_track));
7751 struct kmem_cache *s = file_inode(filep)->i_private;
7752 unsigned long *obj_map;
7753
7754 if (!t)
7755 return -ENOMEM;
7756
7757 obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL);
7758 if (!obj_map) {
7759 seq_release_private(inode, filep);
7760 return -ENOMEM;
7761 }
7762
7763 alloc = debugfs_get_aux_num(filep);
7764
7765 if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) {
7766 bitmap_free(obj_map);
7767 seq_release_private(inode, filep);
7768 return -ENOMEM;
7769 }
7770
7771 for_each_kmem_cache_node(s, node, n) {
7772 unsigned long flags;
7773 struct slab *slab;
7774
7775 if (!node_nr_slabs(n))
7776 continue;
7777
7778 spin_lock_irqsave(&n->list_lock, flags);
7779 list_for_each_entry(slab, &n->partial, slab_list)
7780 process_slab(t, s, slab, alloc, obj_map);
7781 list_for_each_entry(slab, &n->full, slab_list)
7782 process_slab(t, s, slab, alloc, obj_map);
7783 spin_unlock_irqrestore(&n->list_lock, flags);
7784 }
7785
7786 /* Sort locations by count */
7787 sort_r(t->loc, t->count, sizeof(struct location),
7788 cmp_loc_by_count, NULL, NULL);
7789
7790 bitmap_free(obj_map);
7791 return 0;
7792}
7793
7794static int slab_debug_trace_release(struct inode *inode, struct file *file)
7795{
7796 struct seq_file *seq = file->private_data;
7797 struct loc_track *t = seq->private;
7798
7799 free_loc_track(t);
7800 return seq_release_private(inode, file);
7801}
7802
7803static const struct file_operations slab_debugfs_fops = {
7804 .open = slab_debug_trace_open,
7805 .read = seq_read,
7806 .llseek = seq_lseek,
7807 .release = slab_debug_trace_release,
7808};
7809
7810static void debugfs_slab_add(struct kmem_cache *s)
7811{
7812 struct dentry *slab_cache_dir;
7813
7814 if (unlikely(!slab_debugfs_root))
7815 return;
7816
7817 slab_cache_dir = debugfs_create_dir(s->name, slab_debugfs_root);
7818
7819 debugfs_create_file_aux_num("alloc_traces", 0400, slab_cache_dir, s,
7820 TRACK_ALLOC, &slab_debugfs_fops);
7821
7822 debugfs_create_file_aux_num("free_traces", 0400, slab_cache_dir, s,
7823 TRACK_FREE, &slab_debugfs_fops);
7824}
7825
7826void debugfs_slab_release(struct kmem_cache *s)
7827{
7828 debugfs_lookup_and_remove(s->name, slab_debugfs_root);
7829}
7830
7831static int __init slab_debugfs_init(void)
7832{
7833 struct kmem_cache *s;
7834
7835 slab_debugfs_root = debugfs_create_dir("slab", NULL);
7836
7837 list_for_each_entry(s, &slab_caches, list)
7838 if (s->flags & SLAB_STORE_USER)
7839 debugfs_slab_add(s);
7840
7841 return 0;
7842
7843}
7844__initcall(slab_debugfs_init);
7845#endif
7846/*
7847 * The /proc/slabinfo ABI
7848 */
7849#ifdef CONFIG_SLUB_DEBUG
7850void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
7851{
7852 unsigned long nr_slabs = 0;
7853 unsigned long nr_objs = 0;
7854 unsigned long nr_free = 0;
7855 int node;
7856 struct kmem_cache_node *n;
7857
7858 for_each_kmem_cache_node(s, node, n) {
7859 nr_slabs += node_nr_slabs(n);
7860 nr_objs += node_nr_objs(n);
7861 nr_free += count_partial_free_approx(n);
7862 }
7863
7864 sinfo->active_objs = nr_objs - nr_free;
7865 sinfo->num_objs = nr_objs;
7866 sinfo->active_slabs = nr_slabs;
7867 sinfo->num_slabs = nr_slabs;
7868 sinfo->objects_per_slab = oo_objects(s->oo);
7869 sinfo->cache_order = oo_order(s->oo);
7870}
7871#endif /* CONFIG_SLUB_DEBUG */