at v6.11 29 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). 4 * 5 * (C) SGI 2006, Christoph Lameter 6 * Cleaned up and restructured to ease the addition of alternative 7 * implementations of SLAB allocators. 8 * (C) Linux Foundation 2008-2013 9 * Unified interface for all slab allocators 10 */ 11 12#ifndef _LINUX_SLAB_H 13#define _LINUX_SLAB_H 14 15#include <linux/cache.h> 16#include <linux/gfp.h> 17#include <linux/overflow.h> 18#include <linux/types.h> 19#include <linux/workqueue.h> 20#include <linux/percpu-refcount.h> 21#include <linux/cleanup.h> 22#include <linux/hash.h> 23 24enum _slab_flag_bits { 25 _SLAB_CONSISTENCY_CHECKS, 26 _SLAB_RED_ZONE, 27 _SLAB_POISON, 28 _SLAB_KMALLOC, 29 _SLAB_HWCACHE_ALIGN, 30 _SLAB_CACHE_DMA, 31 _SLAB_CACHE_DMA32, 32 _SLAB_STORE_USER, 33 _SLAB_PANIC, 34 _SLAB_TYPESAFE_BY_RCU, 35 _SLAB_TRACE, 36#ifdef CONFIG_DEBUG_OBJECTS 37 _SLAB_DEBUG_OBJECTS, 38#endif 39 _SLAB_NOLEAKTRACE, 40 _SLAB_NO_MERGE, 41#ifdef CONFIG_FAILSLAB 42 _SLAB_FAILSLAB, 43#endif 44#ifdef CONFIG_MEMCG 45 _SLAB_ACCOUNT, 46#endif 47#ifdef CONFIG_KASAN_GENERIC 48 _SLAB_KASAN, 49#endif 50 _SLAB_NO_USER_FLAGS, 51#ifdef CONFIG_KFENCE 52 _SLAB_SKIP_KFENCE, 53#endif 54#ifndef CONFIG_SLUB_TINY 55 _SLAB_RECLAIM_ACCOUNT, 56#endif 57 _SLAB_OBJECT_POISON, 58 _SLAB_CMPXCHG_DOUBLE, 59#ifdef CONFIG_SLAB_OBJ_EXT 60 _SLAB_NO_OBJ_EXT, 61#endif 62 _SLAB_FLAGS_LAST_BIT 63}; 64 65#define __SLAB_FLAG_BIT(nr) ((slab_flags_t __force)(1U << (nr))) 66#define __SLAB_FLAG_UNUSED ((slab_flags_t __force)(0U)) 67 68/* 69 * Flags to pass to kmem_cache_create(). 70 * The ones marked DEBUG need CONFIG_SLUB_DEBUG enabled, otherwise are no-op 71 */ 72/* DEBUG: Perform (expensive) checks on alloc/free */ 73#define SLAB_CONSISTENCY_CHECKS __SLAB_FLAG_BIT(_SLAB_CONSISTENCY_CHECKS) 74/* DEBUG: Red zone objs in a cache */ 75#define SLAB_RED_ZONE __SLAB_FLAG_BIT(_SLAB_RED_ZONE) 76/* DEBUG: Poison objects */ 77#define SLAB_POISON __SLAB_FLAG_BIT(_SLAB_POISON) 78/* Indicate a kmalloc slab */ 79#define SLAB_KMALLOC __SLAB_FLAG_BIT(_SLAB_KMALLOC) 80/* Align objs on cache lines */ 81#define SLAB_HWCACHE_ALIGN __SLAB_FLAG_BIT(_SLAB_HWCACHE_ALIGN) 82/* Use GFP_DMA memory */ 83#define SLAB_CACHE_DMA __SLAB_FLAG_BIT(_SLAB_CACHE_DMA) 84/* Use GFP_DMA32 memory */ 85#define SLAB_CACHE_DMA32 __SLAB_FLAG_BIT(_SLAB_CACHE_DMA32) 86/* DEBUG: Store the last owner for bug hunting */ 87#define SLAB_STORE_USER __SLAB_FLAG_BIT(_SLAB_STORE_USER) 88/* Panic if kmem_cache_create() fails */ 89#define SLAB_PANIC __SLAB_FLAG_BIT(_SLAB_PANIC) 90/* 91 * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS! 92 * 93 * This delays freeing the SLAB page by a grace period, it does _NOT_ 94 * delay object freeing. This means that if you do kmem_cache_free() 95 * that memory location is free to be reused at any time. Thus it may 96 * be possible to see another object there in the same RCU grace period. 97 * 98 * This feature only ensures the memory location backing the object 99 * stays valid, the trick to using this is relying on an independent 100 * object validation pass. Something like: 101 * 102 * begin: 103 * rcu_read_lock(); 104 * obj = lockless_lookup(key); 105 * if (obj) { 106 * if (!try_get_ref(obj)) // might fail for free objects 107 * rcu_read_unlock(); 108 * goto begin; 109 * 110 * if (obj->key != key) { // not the object we expected 111 * put_ref(obj); 112 * rcu_read_unlock(); 113 * goto begin; 114 * } 115 * } 116 * rcu_read_unlock(); 117 * 118 * This is useful if we need to approach a kernel structure obliquely, 119 * from its address obtained without the usual locking. We can lock 120 * the structure to stabilize it and check it's still at the given address, 121 * only if we can be sure that the memory has not been meanwhile reused 122 * for some other kind of object (which our subsystem's lock might corrupt). 123 * 124 * rcu_read_lock before reading the address, then rcu_read_unlock after 125 * taking the spinlock within the structure expected at that address. 126 * 127 * Note that it is not possible to acquire a lock within a structure 128 * allocated with SLAB_TYPESAFE_BY_RCU without first acquiring a reference 129 * as described above. The reason is that SLAB_TYPESAFE_BY_RCU pages 130 * are not zeroed before being given to the slab, which means that any 131 * locks must be initialized after each and every kmem_struct_alloc(). 132 * Alternatively, make the ctor passed to kmem_cache_create() initialize 133 * the locks at page-allocation time, as is done in __i915_request_ctor(), 134 * sighand_ctor(), and anon_vma_ctor(). Such a ctor permits readers 135 * to safely acquire those ctor-initialized locks under rcu_read_lock() 136 * protection. 137 * 138 * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU. 139 */ 140/* Defer freeing slabs to RCU */ 141#define SLAB_TYPESAFE_BY_RCU __SLAB_FLAG_BIT(_SLAB_TYPESAFE_BY_RCU) 142/* Trace allocations and frees */ 143#define SLAB_TRACE __SLAB_FLAG_BIT(_SLAB_TRACE) 144 145/* Flag to prevent checks on free */ 146#ifdef CONFIG_DEBUG_OBJECTS 147# define SLAB_DEBUG_OBJECTS __SLAB_FLAG_BIT(_SLAB_DEBUG_OBJECTS) 148#else 149# define SLAB_DEBUG_OBJECTS __SLAB_FLAG_UNUSED 150#endif 151 152/* Avoid kmemleak tracing */ 153#define SLAB_NOLEAKTRACE __SLAB_FLAG_BIT(_SLAB_NOLEAKTRACE) 154 155/* 156 * Prevent merging with compatible kmem caches. This flag should be used 157 * cautiously. Valid use cases: 158 * 159 * - caches created for self-tests (e.g. kunit) 160 * - general caches created and used by a subsystem, only when a 161 * (subsystem-specific) debug option is enabled 162 * - performance critical caches, should be very rare and consulted with slab 163 * maintainers, and not used together with CONFIG_SLUB_TINY 164 */ 165#define SLAB_NO_MERGE __SLAB_FLAG_BIT(_SLAB_NO_MERGE) 166 167/* Fault injection mark */ 168#ifdef CONFIG_FAILSLAB 169# define SLAB_FAILSLAB __SLAB_FLAG_BIT(_SLAB_FAILSLAB) 170#else 171# define SLAB_FAILSLAB __SLAB_FLAG_UNUSED 172#endif 173/* Account to memcg */ 174#ifdef CONFIG_MEMCG 175# define SLAB_ACCOUNT __SLAB_FLAG_BIT(_SLAB_ACCOUNT) 176#else 177# define SLAB_ACCOUNT __SLAB_FLAG_UNUSED 178#endif 179 180#ifdef CONFIG_KASAN_GENERIC 181#define SLAB_KASAN __SLAB_FLAG_BIT(_SLAB_KASAN) 182#else 183#define SLAB_KASAN __SLAB_FLAG_UNUSED 184#endif 185 186/* 187 * Ignore user specified debugging flags. 188 * Intended for caches created for self-tests so they have only flags 189 * specified in the code and other flags are ignored. 190 */ 191#define SLAB_NO_USER_FLAGS __SLAB_FLAG_BIT(_SLAB_NO_USER_FLAGS) 192 193#ifdef CONFIG_KFENCE 194#define SLAB_SKIP_KFENCE __SLAB_FLAG_BIT(_SLAB_SKIP_KFENCE) 195#else 196#define SLAB_SKIP_KFENCE __SLAB_FLAG_UNUSED 197#endif 198 199/* The following flags affect the page allocator grouping pages by mobility */ 200/* Objects are reclaimable */ 201#ifndef CONFIG_SLUB_TINY 202#define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_BIT(_SLAB_RECLAIM_ACCOUNT) 203#else 204#define SLAB_RECLAIM_ACCOUNT __SLAB_FLAG_UNUSED 205#endif 206#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ 207 208/* Slab created using create_boot_cache */ 209#ifdef CONFIG_SLAB_OBJ_EXT 210#define SLAB_NO_OBJ_EXT __SLAB_FLAG_BIT(_SLAB_NO_OBJ_EXT) 211#else 212#define SLAB_NO_OBJ_EXT __SLAB_FLAG_UNUSED 213#endif 214 215/* 216 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. 217 * 218 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. 219 * 220 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. 221 * Both make kfree a no-op. 222 */ 223#define ZERO_SIZE_PTR ((void *)16) 224 225#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ 226 (unsigned long)ZERO_SIZE_PTR) 227 228#include <linux/kasan.h> 229 230struct list_lru; 231struct mem_cgroup; 232/* 233 * struct kmem_cache related prototypes 234 */ 235bool slab_is_available(void); 236 237struct kmem_cache *kmem_cache_create(const char *name, unsigned int size, 238 unsigned int align, slab_flags_t flags, 239 void (*ctor)(void *)); 240struct kmem_cache *kmem_cache_create_usercopy(const char *name, 241 unsigned int size, unsigned int align, 242 slab_flags_t flags, 243 unsigned int useroffset, unsigned int usersize, 244 void (*ctor)(void *)); 245void kmem_cache_destroy(struct kmem_cache *s); 246int kmem_cache_shrink(struct kmem_cache *s); 247 248/* 249 * Please use this macro to create slab caches. Simply specify the 250 * name of the structure and maybe some flags that are listed above. 251 * 252 * The alignment of the struct determines object alignment. If you 253 * f.e. add ____cacheline_aligned_in_smp to the struct declaration 254 * then the objects will be properly aligned in SMP configurations. 255 */ 256#define KMEM_CACHE(__struct, __flags) \ 257 kmem_cache_create(#__struct, sizeof(struct __struct), \ 258 __alignof__(struct __struct), (__flags), NULL) 259 260/* 261 * To whitelist a single field for copying to/from usercopy, use this 262 * macro instead for KMEM_CACHE() above. 263 */ 264#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \ 265 kmem_cache_create_usercopy(#__struct, \ 266 sizeof(struct __struct), \ 267 __alignof__(struct __struct), (__flags), \ 268 offsetof(struct __struct, __field), \ 269 sizeof_field(struct __struct, __field), NULL) 270 271/* 272 * Common kmalloc functions provided by all allocators 273 */ 274void * __must_check krealloc_noprof(const void *objp, size_t new_size, 275 gfp_t flags) __realloc_size(2); 276#define krealloc(...) alloc_hooks(krealloc_noprof(__VA_ARGS__)) 277 278void kfree(const void *objp); 279void kfree_sensitive(const void *objp); 280size_t __ksize(const void *objp); 281 282DEFINE_FREE(kfree, void *, if (!IS_ERR_OR_NULL(_T)) kfree(_T)) 283 284/** 285 * ksize - Report actual allocation size of associated object 286 * 287 * @objp: Pointer returned from a prior kmalloc()-family allocation. 288 * 289 * This should not be used for writing beyond the originally requested 290 * allocation size. Either use krealloc() or round up the allocation size 291 * with kmalloc_size_roundup() prior to allocation. If this is used to 292 * access beyond the originally requested allocation size, UBSAN_BOUNDS 293 * and/or FORTIFY_SOURCE may trip, since they only know about the 294 * originally allocated size via the __alloc_size attribute. 295 */ 296size_t ksize(const void *objp); 297 298#ifdef CONFIG_PRINTK 299bool kmem_dump_obj(void *object); 300#else 301static inline bool kmem_dump_obj(void *object) { return false; } 302#endif 303 304/* 305 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 306 * alignment larger than the alignment of a 64-bit integer. 307 * Setting ARCH_DMA_MINALIGN in arch headers allows that. 308 */ 309#ifdef ARCH_HAS_DMA_MINALIGN 310#if ARCH_DMA_MINALIGN > 8 && !defined(ARCH_KMALLOC_MINALIGN) 311#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN 312#endif 313#endif 314 315#ifndef ARCH_KMALLOC_MINALIGN 316#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 317#elif ARCH_KMALLOC_MINALIGN > 8 318#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN 319#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE) 320#endif 321 322/* 323 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. 324 * Intended for arches that get misalignment faults even for 64 bit integer 325 * aligned buffers. 326 */ 327#ifndef ARCH_SLAB_MINALIGN 328#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 329#endif 330 331/* 332 * Arches can define this function if they want to decide the minimum slab 333 * alignment at runtime. The value returned by the function must be a power 334 * of two and >= ARCH_SLAB_MINALIGN. 335 */ 336#ifndef arch_slab_minalign 337static inline unsigned int arch_slab_minalign(void) 338{ 339 return ARCH_SLAB_MINALIGN; 340} 341#endif 342 343/* 344 * kmem_cache_alloc and friends return pointers aligned to ARCH_SLAB_MINALIGN. 345 * kmalloc and friends return pointers aligned to both ARCH_KMALLOC_MINALIGN 346 * and ARCH_SLAB_MINALIGN, but here we only assume the former alignment. 347 */ 348#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN) 349#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN) 350#define __assume_page_alignment __assume_aligned(PAGE_SIZE) 351 352/* 353 * Kmalloc array related definitions 354 */ 355 356/* 357 * SLUB directly allocates requests fitting in to an order-1 page 358 * (PAGE_SIZE*2). Larger requests are passed to the page allocator. 359 */ 360#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) 361#define KMALLOC_SHIFT_MAX (MAX_PAGE_ORDER + PAGE_SHIFT) 362#ifndef KMALLOC_SHIFT_LOW 363#define KMALLOC_SHIFT_LOW 3 364#endif 365 366/* Maximum allocatable size */ 367#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) 368/* Maximum size for which we actually use a slab cache */ 369#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH) 370/* Maximum order allocatable via the slab allocator */ 371#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT) 372 373/* 374 * Kmalloc subsystem. 375 */ 376#ifndef KMALLOC_MIN_SIZE 377#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) 378#endif 379 380/* 381 * This restriction comes from byte sized index implementation. 382 * Page size is normally 2^12 bytes and, in this case, if we want to use 383 * byte sized index which can represent 2^8 entries, the size of the object 384 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16. 385 * If minimum size of kmalloc is less than 16, we use it as minimum object 386 * size and give up to use byte sized index. 387 */ 388#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \ 389 (KMALLOC_MIN_SIZE) : 16) 390 391#ifdef CONFIG_RANDOM_KMALLOC_CACHES 392#define RANDOM_KMALLOC_CACHES_NR 15 // # of cache copies 393#else 394#define RANDOM_KMALLOC_CACHES_NR 0 395#endif 396 397/* 398 * Whenever changing this, take care of that kmalloc_type() and 399 * create_kmalloc_caches() still work as intended. 400 * 401 * KMALLOC_NORMAL can contain only unaccounted objects whereas KMALLOC_CGROUP 402 * is for accounted but unreclaimable and non-dma objects. All the other 403 * kmem caches can have both accounted and unaccounted objects. 404 */ 405enum kmalloc_cache_type { 406 KMALLOC_NORMAL = 0, 407#ifndef CONFIG_ZONE_DMA 408 KMALLOC_DMA = KMALLOC_NORMAL, 409#endif 410#ifndef CONFIG_MEMCG 411 KMALLOC_CGROUP = KMALLOC_NORMAL, 412#endif 413 KMALLOC_RANDOM_START = KMALLOC_NORMAL, 414 KMALLOC_RANDOM_END = KMALLOC_RANDOM_START + RANDOM_KMALLOC_CACHES_NR, 415#ifdef CONFIG_SLUB_TINY 416 KMALLOC_RECLAIM = KMALLOC_NORMAL, 417#else 418 KMALLOC_RECLAIM, 419#endif 420#ifdef CONFIG_ZONE_DMA 421 KMALLOC_DMA, 422#endif 423#ifdef CONFIG_MEMCG 424 KMALLOC_CGROUP, 425#endif 426 NR_KMALLOC_TYPES 427}; 428 429typedef struct kmem_cache * kmem_buckets[KMALLOC_SHIFT_HIGH + 1]; 430 431extern kmem_buckets kmalloc_caches[NR_KMALLOC_TYPES]; 432 433/* 434 * Define gfp bits that should not be set for KMALLOC_NORMAL. 435 */ 436#define KMALLOC_NOT_NORMAL_BITS \ 437 (__GFP_RECLAIMABLE | \ 438 (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \ 439 (IS_ENABLED(CONFIG_MEMCG) ? __GFP_ACCOUNT : 0)) 440 441extern unsigned long random_kmalloc_seed; 442 443static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags, unsigned long caller) 444{ 445 /* 446 * The most common case is KMALLOC_NORMAL, so test for it 447 * with a single branch for all the relevant flags. 448 */ 449 if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0)) 450#ifdef CONFIG_RANDOM_KMALLOC_CACHES 451 /* RANDOM_KMALLOC_CACHES_NR (=15) copies + the KMALLOC_NORMAL */ 452 return KMALLOC_RANDOM_START + hash_64(caller ^ random_kmalloc_seed, 453 ilog2(RANDOM_KMALLOC_CACHES_NR + 1)); 454#else 455 return KMALLOC_NORMAL; 456#endif 457 458 /* 459 * At least one of the flags has to be set. Their priorities in 460 * decreasing order are: 461 * 1) __GFP_DMA 462 * 2) __GFP_RECLAIMABLE 463 * 3) __GFP_ACCOUNT 464 */ 465 if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA)) 466 return KMALLOC_DMA; 467 if (!IS_ENABLED(CONFIG_MEMCG) || (flags & __GFP_RECLAIMABLE)) 468 return KMALLOC_RECLAIM; 469 else 470 return KMALLOC_CGROUP; 471} 472 473/* 474 * Figure out which kmalloc slab an allocation of a certain size 475 * belongs to. 476 * 0 = zero alloc 477 * 1 = 65 .. 96 bytes 478 * 2 = 129 .. 192 bytes 479 * n = 2^(n-1)+1 .. 2^n 480 * 481 * Note: __kmalloc_index() is compile-time optimized, and not runtime optimized; 482 * typical usage is via kmalloc_index() and therefore evaluated at compile-time. 483 * Callers where !size_is_constant should only be test modules, where runtime 484 * overheads of __kmalloc_index() can be tolerated. Also see kmalloc_slab(). 485 */ 486static __always_inline unsigned int __kmalloc_index(size_t size, 487 bool size_is_constant) 488{ 489 if (!size) 490 return 0; 491 492 if (size <= KMALLOC_MIN_SIZE) 493 return KMALLOC_SHIFT_LOW; 494 495 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) 496 return 1; 497 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) 498 return 2; 499 if (size <= 8) return 3; 500 if (size <= 16) return 4; 501 if (size <= 32) return 5; 502 if (size <= 64) return 6; 503 if (size <= 128) return 7; 504 if (size <= 256) return 8; 505 if (size <= 512) return 9; 506 if (size <= 1024) return 10; 507 if (size <= 2 * 1024) return 11; 508 if (size <= 4 * 1024) return 12; 509 if (size <= 8 * 1024) return 13; 510 if (size <= 16 * 1024) return 14; 511 if (size <= 32 * 1024) return 15; 512 if (size <= 64 * 1024) return 16; 513 if (size <= 128 * 1024) return 17; 514 if (size <= 256 * 1024) return 18; 515 if (size <= 512 * 1024) return 19; 516 if (size <= 1024 * 1024) return 20; 517 if (size <= 2 * 1024 * 1024) return 21; 518 519 if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant) 520 BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()"); 521 else 522 BUG(); 523 524 /* Will never be reached. Needed because the compiler may complain */ 525 return -1; 526} 527static_assert(PAGE_SHIFT <= 20); 528#define kmalloc_index(s) __kmalloc_index(s, true) 529 530#include <linux/alloc_tag.h> 531 532/** 533 * kmem_cache_alloc - Allocate an object 534 * @cachep: The cache to allocate from. 535 * @flags: See kmalloc(). 536 * 537 * Allocate an object from this cache. 538 * See kmem_cache_zalloc() for a shortcut of adding __GFP_ZERO to flags. 539 * 540 * Return: pointer to the new object or %NULL in case of error 541 */ 542void *kmem_cache_alloc_noprof(struct kmem_cache *cachep, 543 gfp_t flags) __assume_slab_alignment __malloc; 544#define kmem_cache_alloc(...) alloc_hooks(kmem_cache_alloc_noprof(__VA_ARGS__)) 545 546void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru, 547 gfp_t gfpflags) __assume_slab_alignment __malloc; 548#define kmem_cache_alloc_lru(...) alloc_hooks(kmem_cache_alloc_lru_noprof(__VA_ARGS__)) 549 550void kmem_cache_free(struct kmem_cache *s, void *objp); 551 552kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags, 553 unsigned int useroffset, unsigned int usersize, 554 void (*ctor)(void *)); 555 556/* 557 * Bulk allocation and freeing operations. These are accelerated in an 558 * allocator specific way to avoid taking locks repeatedly or building 559 * metadata structures unnecessarily. 560 * 561 * Note that interrupts must be enabled when calling these functions. 562 */ 563void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p); 564 565int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size, void **p); 566#define kmem_cache_alloc_bulk(...) alloc_hooks(kmem_cache_alloc_bulk_noprof(__VA_ARGS__)) 567 568static __always_inline void kfree_bulk(size_t size, void **p) 569{ 570 kmem_cache_free_bulk(NULL, size, p); 571} 572 573void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags, 574 int node) __assume_slab_alignment __malloc; 575#define kmem_cache_alloc_node(...) alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__)) 576 577/* 578 * These macros allow declaring a kmem_buckets * parameter alongside size, which 579 * can be compiled out with CONFIG_SLAB_BUCKETS=n so that a large number of call 580 * sites don't have to pass NULL. 581 */ 582#ifdef CONFIG_SLAB_BUCKETS 583#define DECL_BUCKET_PARAMS(_size, _b) size_t (_size), kmem_buckets *(_b) 584#define PASS_BUCKET_PARAMS(_size, _b) (_size), (_b) 585#define PASS_BUCKET_PARAM(_b) (_b) 586#else 587#define DECL_BUCKET_PARAMS(_size, _b) size_t (_size) 588#define PASS_BUCKET_PARAMS(_size, _b) (_size) 589#define PASS_BUCKET_PARAM(_b) NULL 590#endif 591 592/* 593 * The following functions are not to be used directly and are intended only 594 * for internal use from kmalloc() and kmalloc_node() 595 * with the exception of kunit tests 596 */ 597 598void *__kmalloc_noprof(size_t size, gfp_t flags) 599 __assume_kmalloc_alignment __alloc_size(1); 600 601void *__kmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) 602 __assume_kmalloc_alignment __alloc_size(1); 603 604void *__kmalloc_cache_noprof(struct kmem_cache *s, gfp_t flags, size_t size) 605 __assume_kmalloc_alignment __alloc_size(3); 606 607void *__kmalloc_cache_node_noprof(struct kmem_cache *s, gfp_t gfpflags, 608 int node, size_t size) 609 __assume_kmalloc_alignment __alloc_size(4); 610 611void *__kmalloc_large_noprof(size_t size, gfp_t flags) 612 __assume_page_alignment __alloc_size(1); 613 614void *__kmalloc_large_node_noprof(size_t size, gfp_t flags, int node) 615 __assume_page_alignment __alloc_size(1); 616 617/** 618 * kmalloc - allocate kernel memory 619 * @size: how many bytes of memory are required. 620 * @flags: describe the allocation context 621 * 622 * kmalloc is the normal method of allocating memory 623 * for objects smaller than page size in the kernel. 624 * 625 * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN 626 * bytes. For @size of power of two bytes, the alignment is also guaranteed 627 * to be at least to the size. For other sizes, the alignment is guaranteed to 628 * be at least the largest power-of-two divisor of @size. 629 * 630 * The @flags argument may be one of the GFP flags defined at 631 * include/linux/gfp_types.h and described at 632 * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>` 633 * 634 * The recommended usage of the @flags is described at 635 * :ref:`Documentation/core-api/memory-allocation.rst <memory_allocation>` 636 * 637 * Below is a brief outline of the most useful GFP flags 638 * 639 * %GFP_KERNEL 640 * Allocate normal kernel ram. May sleep. 641 * 642 * %GFP_NOWAIT 643 * Allocation will not sleep. 644 * 645 * %GFP_ATOMIC 646 * Allocation will not sleep. May use emergency pools. 647 * 648 * Also it is possible to set different flags by OR'ing 649 * in one or more of the following additional @flags: 650 * 651 * %__GFP_ZERO 652 * Zero the allocated memory before returning. Also see kzalloc(). 653 * 654 * %__GFP_HIGH 655 * This allocation has high priority and may use emergency pools. 656 * 657 * %__GFP_NOFAIL 658 * Indicate that this allocation is in no way allowed to fail 659 * (think twice before using). 660 * 661 * %__GFP_NORETRY 662 * If memory is not immediately available, 663 * then give up at once. 664 * 665 * %__GFP_NOWARN 666 * If allocation fails, don't issue any warnings. 667 * 668 * %__GFP_RETRY_MAYFAIL 669 * Try really hard to succeed the allocation but fail 670 * eventually. 671 */ 672static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t flags) 673{ 674 if (__builtin_constant_p(size) && size) { 675 unsigned int index; 676 677 if (size > KMALLOC_MAX_CACHE_SIZE) 678 return __kmalloc_large_noprof(size, flags); 679 680 index = kmalloc_index(size); 681 return __kmalloc_cache_noprof( 682 kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index], 683 flags, size); 684 } 685 return __kmalloc_noprof(size, flags); 686} 687#define kmalloc(...) alloc_hooks(kmalloc_noprof(__VA_ARGS__)) 688 689#define kmem_buckets_alloc(_b, _size, _flags) \ 690 alloc_hooks(__kmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE)) 691 692#define kmem_buckets_alloc_track_caller(_b, _size, _flags) \ 693 alloc_hooks(__kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE, _RET_IP_)) 694 695static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gfp_t flags, int node) 696{ 697 if (__builtin_constant_p(size) && size) { 698 unsigned int index; 699 700 if (size > KMALLOC_MAX_CACHE_SIZE) 701 return __kmalloc_large_node_noprof(size, flags, node); 702 703 index = kmalloc_index(size); 704 return __kmalloc_cache_node_noprof( 705 kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index], 706 flags, node, size); 707 } 708 return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node); 709} 710#define kmalloc_node(...) alloc_hooks(kmalloc_node_noprof(__VA_ARGS__)) 711 712/** 713 * kmalloc_array - allocate memory for an array. 714 * @n: number of elements. 715 * @size: element size. 716 * @flags: the type of memory to allocate (see kmalloc). 717 */ 718static inline __alloc_size(1, 2) void *kmalloc_array_noprof(size_t n, size_t size, gfp_t flags) 719{ 720 size_t bytes; 721 722 if (unlikely(check_mul_overflow(n, size, &bytes))) 723 return NULL; 724 if (__builtin_constant_p(n) && __builtin_constant_p(size)) 725 return kmalloc_noprof(bytes, flags); 726 return kmalloc_noprof(bytes, flags); 727} 728#define kmalloc_array(...) alloc_hooks(kmalloc_array_noprof(__VA_ARGS__)) 729 730/** 731 * krealloc_array - reallocate memory for an array. 732 * @p: pointer to the memory chunk to reallocate 733 * @new_n: new number of elements to alloc 734 * @new_size: new size of a single member of the array 735 * @flags: the type of memory to allocate (see kmalloc) 736 */ 737static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(void *p, 738 size_t new_n, 739 size_t new_size, 740 gfp_t flags) 741{ 742 size_t bytes; 743 744 if (unlikely(check_mul_overflow(new_n, new_size, &bytes))) 745 return NULL; 746 747 return krealloc_noprof(p, bytes, flags); 748} 749#define krealloc_array(...) alloc_hooks(krealloc_array_noprof(__VA_ARGS__)) 750 751/** 752 * kcalloc - allocate memory for an array. The memory is set to zero. 753 * @n: number of elements. 754 * @size: element size. 755 * @flags: the type of memory to allocate (see kmalloc). 756 */ 757#define kcalloc(n, size, flags) kmalloc_array(n, size, (flags) | __GFP_ZERO) 758 759void *__kmalloc_node_track_caller_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node, 760 unsigned long caller) __alloc_size(1); 761#define kmalloc_node_track_caller_noprof(size, flags, node, caller) \ 762 __kmalloc_node_track_caller_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node, caller) 763#define kmalloc_node_track_caller(...) \ 764 alloc_hooks(kmalloc_node_track_caller_noprof(__VA_ARGS__, _RET_IP_)) 765 766/* 767 * kmalloc_track_caller is a special version of kmalloc that records the 768 * calling function of the routine calling it for slab leak tracking instead 769 * of just the calling function (confusing, eh?). 770 * It's useful when the call to kmalloc comes from a widely-used standard 771 * allocator where we care about the real place the memory allocation 772 * request comes from. 773 */ 774#define kmalloc_track_caller(...) kmalloc_node_track_caller(__VA_ARGS__, NUMA_NO_NODE) 775 776#define kmalloc_track_caller_noprof(...) \ 777 kmalloc_node_track_caller_noprof(__VA_ARGS__, NUMA_NO_NODE, _RET_IP_) 778 779static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, 780 int node) 781{ 782 size_t bytes; 783 784 if (unlikely(check_mul_overflow(n, size, &bytes))) 785 return NULL; 786 if (__builtin_constant_p(n) && __builtin_constant_p(size)) 787 return kmalloc_node_noprof(bytes, flags, node); 788 return __kmalloc_node_noprof(PASS_BUCKET_PARAMS(bytes, NULL), flags, node); 789} 790#define kmalloc_array_node(...) alloc_hooks(kmalloc_array_node_noprof(__VA_ARGS__)) 791 792#define kcalloc_node(_n, _size, _flags, _node) \ 793 kmalloc_array_node(_n, _size, (_flags) | __GFP_ZERO, _node) 794 795/* 796 * Shortcuts 797 */ 798#define kmem_cache_zalloc(_k, _flags) kmem_cache_alloc(_k, (_flags)|__GFP_ZERO) 799 800/** 801 * kzalloc - allocate memory. The memory is set to zero. 802 * @size: how many bytes of memory are required. 803 * @flags: the type of memory to allocate (see kmalloc). 804 */ 805static inline __alloc_size(1) void *kzalloc_noprof(size_t size, gfp_t flags) 806{ 807 return kmalloc_noprof(size, flags | __GFP_ZERO); 808} 809#define kzalloc(...) alloc_hooks(kzalloc_noprof(__VA_ARGS__)) 810#define kzalloc_node(_size, _flags, _node) kmalloc_node(_size, (_flags)|__GFP_ZERO, _node) 811 812void *__kvmalloc_node_noprof(DECL_BUCKET_PARAMS(size, b), gfp_t flags, int node) __alloc_size(1); 813#define kvmalloc_node_noprof(size, flags, node) \ 814 __kvmalloc_node_noprof(PASS_BUCKET_PARAMS(size, NULL), flags, node) 815#define kvmalloc_node(...) alloc_hooks(kvmalloc_node_noprof(__VA_ARGS__)) 816 817#define kvmalloc(_size, _flags) kvmalloc_node(_size, _flags, NUMA_NO_NODE) 818#define kvmalloc_noprof(_size, _flags) kvmalloc_node_noprof(_size, _flags, NUMA_NO_NODE) 819#define kvzalloc(_size, _flags) kvmalloc(_size, (_flags)|__GFP_ZERO) 820 821#define kvzalloc_node(_size, _flags, _node) kvmalloc_node(_size, (_flags)|__GFP_ZERO, _node) 822#define kmem_buckets_valloc(_b, _size, _flags) \ 823 alloc_hooks(__kvmalloc_node_noprof(PASS_BUCKET_PARAMS(_size, _b), _flags, NUMA_NO_NODE)) 824 825static inline __alloc_size(1, 2) void * 826kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node) 827{ 828 size_t bytes; 829 830 if (unlikely(check_mul_overflow(n, size, &bytes))) 831 return NULL; 832 833 return kvmalloc_node_noprof(bytes, flags, node); 834} 835 836#define kvmalloc_array_noprof(...) kvmalloc_array_node_noprof(__VA_ARGS__, NUMA_NO_NODE) 837#define kvcalloc_node_noprof(_n,_s,_f,_node) kvmalloc_array_node_noprof(_n,_s,(_f)|__GFP_ZERO,_node) 838#define kvcalloc_noprof(...) kvcalloc_node_noprof(__VA_ARGS__, NUMA_NO_NODE) 839 840#define kvmalloc_array(...) alloc_hooks(kvmalloc_array_noprof(__VA_ARGS__)) 841#define kvcalloc_node(...) alloc_hooks(kvcalloc_node_noprof(__VA_ARGS__)) 842#define kvcalloc(...) alloc_hooks(kvcalloc_noprof(__VA_ARGS__)) 843 844extern void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flags) 845 __realloc_size(3); 846#define kvrealloc(...) alloc_hooks(kvrealloc_noprof(__VA_ARGS__)) 847 848extern void kvfree(const void *addr); 849DEFINE_FREE(kvfree, void *, if (!IS_ERR_OR_NULL(_T)) kvfree(_T)) 850 851extern void kvfree_sensitive(const void *addr, size_t len); 852 853unsigned int kmem_cache_size(struct kmem_cache *s); 854 855/** 856 * kmalloc_size_roundup - Report allocation bucket size for the given size 857 * 858 * @size: Number of bytes to round up from. 859 * 860 * This returns the number of bytes that would be available in a kmalloc() 861 * allocation of @size bytes. For example, a 126 byte request would be 862 * rounded up to the next sized kmalloc bucket, 128 bytes. (This is strictly 863 * for the general-purpose kmalloc()-based allocations, and is not for the 864 * pre-sized kmem_cache_alloc()-based allocations.) 865 * 866 * Use this to kmalloc() the full bucket size ahead of time instead of using 867 * ksize() to query the size after an allocation. 868 */ 869size_t kmalloc_size_roundup(size_t size); 870 871void __init kmem_cache_init_late(void); 872 873#endif /* _LINUX_SLAB_H */