at v5.5 21 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk). 4 * 5 * (C) SGI 2006, Christoph Lameter 6 * Cleaned up and restructured to ease the addition of alternative 7 * implementations of SLAB allocators. 8 * (C) Linux Foundation 2008-2013 9 * Unified interface for all slab allocators 10 */ 11 12#ifndef _LINUX_SLAB_H 13#define _LINUX_SLAB_H 14 15#include <linux/gfp.h> 16#include <linux/overflow.h> 17#include <linux/types.h> 18#include <linux/workqueue.h> 19#include <linux/percpu-refcount.h> 20 21 22/* 23 * Flags to pass to kmem_cache_create(). 24 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set. 25 */ 26/* DEBUG: Perform (expensive) checks on alloc/free */ 27#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U) 28/* DEBUG: Red zone objs in a cache */ 29#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U) 30/* DEBUG: Poison objects */ 31#define SLAB_POISON ((slab_flags_t __force)0x00000800U) 32/* Align objs on cache lines */ 33#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U) 34/* Use GFP_DMA memory */ 35#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U) 36/* Use GFP_DMA32 memory */ 37#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U) 38/* DEBUG: Store the last owner for bug hunting */ 39#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U) 40/* Panic if kmem_cache_create() fails */ 41#define SLAB_PANIC ((slab_flags_t __force)0x00040000U) 42/* 43 * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS! 44 * 45 * This delays freeing the SLAB page by a grace period, it does _NOT_ 46 * delay object freeing. This means that if you do kmem_cache_free() 47 * that memory location is free to be reused at any time. Thus it may 48 * be possible to see another object there in the same RCU grace period. 49 * 50 * This feature only ensures the memory location backing the object 51 * stays valid, the trick to using this is relying on an independent 52 * object validation pass. Something like: 53 * 54 * rcu_read_lock() 55 * again: 56 * obj = lockless_lookup(key); 57 * if (obj) { 58 * if (!try_get_ref(obj)) // might fail for free objects 59 * goto again; 60 * 61 * if (obj->key != key) { // not the object we expected 62 * put_ref(obj); 63 * goto again; 64 * } 65 * } 66 * rcu_read_unlock(); 67 * 68 * This is useful if we need to approach a kernel structure obliquely, 69 * from its address obtained without the usual locking. We can lock 70 * the structure to stabilize it and check it's still at the given address, 71 * only if we can be sure that the memory has not been meanwhile reused 72 * for some other kind of object (which our subsystem's lock might corrupt). 73 * 74 * rcu_read_lock before reading the address, then rcu_read_unlock after 75 * taking the spinlock within the structure expected at that address. 76 * 77 * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU. 78 */ 79/* Defer freeing slabs to RCU */ 80#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U) 81/* Spread some memory over cpuset */ 82#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U) 83/* Trace allocations and frees */ 84#define SLAB_TRACE ((slab_flags_t __force)0x00200000U) 85 86/* Flag to prevent checks on free */ 87#ifdef CONFIG_DEBUG_OBJECTS 88# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U) 89#else 90# define SLAB_DEBUG_OBJECTS 0 91#endif 92 93/* Avoid kmemleak tracing */ 94#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U) 95 96/* Fault injection mark */ 97#ifdef CONFIG_FAILSLAB 98# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U) 99#else 100# define SLAB_FAILSLAB 0 101#endif 102/* Account to memcg */ 103#ifdef CONFIG_MEMCG_KMEM 104# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U) 105#else 106# define SLAB_ACCOUNT 0 107#endif 108 109#ifdef CONFIG_KASAN 110#define SLAB_KASAN ((slab_flags_t __force)0x08000000U) 111#else 112#define SLAB_KASAN 0 113#endif 114 115/* The following flags affect the page allocator grouping pages by mobility */ 116/* Objects are reclaimable */ 117#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U) 118#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ 119 120/* Slab deactivation flag */ 121#define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U) 122 123/* 124 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. 125 * 126 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. 127 * 128 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. 129 * Both make kfree a no-op. 130 */ 131#define ZERO_SIZE_PTR ((void *)16) 132 133#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ 134 (unsigned long)ZERO_SIZE_PTR) 135 136#include <linux/kasan.h> 137 138struct mem_cgroup; 139/* 140 * struct kmem_cache related prototypes 141 */ 142void __init kmem_cache_init(void); 143bool slab_is_available(void); 144 145extern bool usercopy_fallback; 146 147struct kmem_cache *kmem_cache_create(const char *name, unsigned int size, 148 unsigned int align, slab_flags_t flags, 149 void (*ctor)(void *)); 150struct kmem_cache *kmem_cache_create_usercopy(const char *name, 151 unsigned int size, unsigned int align, 152 slab_flags_t flags, 153 unsigned int useroffset, unsigned int usersize, 154 void (*ctor)(void *)); 155void kmem_cache_destroy(struct kmem_cache *); 156int kmem_cache_shrink(struct kmem_cache *); 157 158void memcg_create_kmem_cache(struct mem_cgroup *, struct kmem_cache *); 159void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *); 160 161/* 162 * Please use this macro to create slab caches. Simply specify the 163 * name of the structure and maybe some flags that are listed above. 164 * 165 * The alignment of the struct determines object alignment. If you 166 * f.e. add ____cacheline_aligned_in_smp to the struct declaration 167 * then the objects will be properly aligned in SMP configurations. 168 */ 169#define KMEM_CACHE(__struct, __flags) \ 170 kmem_cache_create(#__struct, sizeof(struct __struct), \ 171 __alignof__(struct __struct), (__flags), NULL) 172 173/* 174 * To whitelist a single field for copying to/from usercopy, use this 175 * macro instead for KMEM_CACHE() above. 176 */ 177#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \ 178 kmem_cache_create_usercopy(#__struct, \ 179 sizeof(struct __struct), \ 180 __alignof__(struct __struct), (__flags), \ 181 offsetof(struct __struct, __field), \ 182 sizeof_field(struct __struct, __field), NULL) 183 184/* 185 * Common kmalloc functions provided by all allocators 186 */ 187void * __must_check __krealloc(const void *, size_t, gfp_t); 188void * __must_check krealloc(const void *, size_t, gfp_t); 189void kfree(const void *); 190void kzfree(const void *); 191size_t __ksize(const void *); 192size_t ksize(const void *); 193 194#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR 195void __check_heap_object(const void *ptr, unsigned long n, struct page *page, 196 bool to_user); 197#else 198static inline void __check_heap_object(const void *ptr, unsigned long n, 199 struct page *page, bool to_user) { } 200#endif 201 202/* 203 * Some archs want to perform DMA into kmalloc caches and need a guaranteed 204 * alignment larger than the alignment of a 64-bit integer. 205 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that. 206 */ 207#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8 208#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN 209#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN 210#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN) 211#else 212#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) 213#endif 214 215/* 216 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. 217 * Intended for arches that get misalignment faults even for 64 bit integer 218 * aligned buffers. 219 */ 220#ifndef ARCH_SLAB_MINALIGN 221#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long) 222#endif 223 224/* 225 * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned 226 * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN 227 * aligned pointers. 228 */ 229#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN) 230#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN) 231#define __assume_page_alignment __assume_aligned(PAGE_SIZE) 232 233/* 234 * Kmalloc array related definitions 235 */ 236 237#ifdef CONFIG_SLAB 238/* 239 * The largest kmalloc size supported by the SLAB allocators is 240 * 32 megabyte (2^25) or the maximum allocatable page order if that is 241 * less than 32 MB. 242 * 243 * WARNING: Its not easy to increase this value since the allocators have 244 * to do various tricks to work around compiler limitations in order to 245 * ensure proper constant folding. 246 */ 247#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \ 248 (MAX_ORDER + PAGE_SHIFT - 1) : 25) 249#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH 250#ifndef KMALLOC_SHIFT_LOW 251#define KMALLOC_SHIFT_LOW 5 252#endif 253#endif 254 255#ifdef CONFIG_SLUB 256/* 257 * SLUB directly allocates requests fitting in to an order-1 page 258 * (PAGE_SIZE*2). Larger requests are passed to the page allocator. 259 */ 260#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1) 261#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) 262#ifndef KMALLOC_SHIFT_LOW 263#define KMALLOC_SHIFT_LOW 3 264#endif 265#endif 266 267#ifdef CONFIG_SLOB 268/* 269 * SLOB passes all requests larger than one page to the page allocator. 270 * No kmalloc array is necessary since objects of different sizes can 271 * be allocated from the same page. 272 */ 273#define KMALLOC_SHIFT_HIGH PAGE_SHIFT 274#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) 275#ifndef KMALLOC_SHIFT_LOW 276#define KMALLOC_SHIFT_LOW 3 277#endif 278#endif 279 280/* Maximum allocatable size */ 281#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX) 282/* Maximum size for which we actually use a slab cache */ 283#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH) 284/* Maximum order allocatable via the slab allocagtor */ 285#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT) 286 287/* 288 * Kmalloc subsystem. 289 */ 290#ifndef KMALLOC_MIN_SIZE 291#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW) 292#endif 293 294/* 295 * This restriction comes from byte sized index implementation. 296 * Page size is normally 2^12 bytes and, in this case, if we want to use 297 * byte sized index which can represent 2^8 entries, the size of the object 298 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16. 299 * If minimum size of kmalloc is less than 16, we use it as minimum object 300 * size and give up to use byte sized index. 301 */ 302#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \ 303 (KMALLOC_MIN_SIZE) : 16) 304 305/* 306 * Whenever changing this, take care of that kmalloc_type() and 307 * create_kmalloc_caches() still work as intended. 308 */ 309enum kmalloc_cache_type { 310 KMALLOC_NORMAL = 0, 311 KMALLOC_RECLAIM, 312#ifdef CONFIG_ZONE_DMA 313 KMALLOC_DMA, 314#endif 315 NR_KMALLOC_TYPES 316}; 317 318#ifndef CONFIG_SLOB 319extern struct kmem_cache * 320kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1]; 321 322static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags) 323{ 324#ifdef CONFIG_ZONE_DMA 325 /* 326 * The most common case is KMALLOC_NORMAL, so test for it 327 * with a single branch for both flags. 328 */ 329 if (likely((flags & (__GFP_DMA | __GFP_RECLAIMABLE)) == 0)) 330 return KMALLOC_NORMAL; 331 332 /* 333 * At least one of the flags has to be set. If both are, __GFP_DMA 334 * is more important. 335 */ 336 return flags & __GFP_DMA ? KMALLOC_DMA : KMALLOC_RECLAIM; 337#else 338 return flags & __GFP_RECLAIMABLE ? KMALLOC_RECLAIM : KMALLOC_NORMAL; 339#endif 340} 341 342/* 343 * Figure out which kmalloc slab an allocation of a certain size 344 * belongs to. 345 * 0 = zero alloc 346 * 1 = 65 .. 96 bytes 347 * 2 = 129 .. 192 bytes 348 * n = 2^(n-1)+1 .. 2^n 349 */ 350static __always_inline unsigned int kmalloc_index(size_t size) 351{ 352 if (!size) 353 return 0; 354 355 if (size <= KMALLOC_MIN_SIZE) 356 return KMALLOC_SHIFT_LOW; 357 358 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96) 359 return 1; 360 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192) 361 return 2; 362 if (size <= 8) return 3; 363 if (size <= 16) return 4; 364 if (size <= 32) return 5; 365 if (size <= 64) return 6; 366 if (size <= 128) return 7; 367 if (size <= 256) return 8; 368 if (size <= 512) return 9; 369 if (size <= 1024) return 10; 370 if (size <= 2 * 1024) return 11; 371 if (size <= 4 * 1024) return 12; 372 if (size <= 8 * 1024) return 13; 373 if (size <= 16 * 1024) return 14; 374 if (size <= 32 * 1024) return 15; 375 if (size <= 64 * 1024) return 16; 376 if (size <= 128 * 1024) return 17; 377 if (size <= 256 * 1024) return 18; 378 if (size <= 512 * 1024) return 19; 379 if (size <= 1024 * 1024) return 20; 380 if (size <= 2 * 1024 * 1024) return 21; 381 if (size <= 4 * 1024 * 1024) return 22; 382 if (size <= 8 * 1024 * 1024) return 23; 383 if (size <= 16 * 1024 * 1024) return 24; 384 if (size <= 32 * 1024 * 1024) return 25; 385 if (size <= 64 * 1024 * 1024) return 26; 386 BUG(); 387 388 /* Will never be reached. Needed because the compiler may complain */ 389 return -1; 390} 391#endif /* !CONFIG_SLOB */ 392 393void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __malloc; 394void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags) __assume_slab_alignment __malloc; 395void kmem_cache_free(struct kmem_cache *, void *); 396 397/* 398 * Bulk allocation and freeing operations. These are accelerated in an 399 * allocator specific way to avoid taking locks repeatedly or building 400 * metadata structures unnecessarily. 401 * 402 * Note that interrupts must be enabled when calling these functions. 403 */ 404void kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 405int kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 406 407/* 408 * Caller must not use kfree_bulk() on memory not originally allocated 409 * by kmalloc(), because the SLOB allocator cannot handle this. 410 */ 411static __always_inline void kfree_bulk(size_t size, void **p) 412{ 413 kmem_cache_free_bulk(NULL, size, p); 414} 415 416#ifdef CONFIG_NUMA 417void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment __malloc; 418void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node) __assume_slab_alignment __malloc; 419#else 420static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) 421{ 422 return __kmalloc(size, flags); 423} 424 425static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) 426{ 427 return kmem_cache_alloc(s, flags); 428} 429#endif 430 431#ifdef CONFIG_TRACING 432extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t) __assume_slab_alignment __malloc; 433 434#ifdef CONFIG_NUMA 435extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, 436 gfp_t gfpflags, 437 int node, size_t size) __assume_slab_alignment __malloc; 438#else 439static __always_inline void * 440kmem_cache_alloc_node_trace(struct kmem_cache *s, 441 gfp_t gfpflags, 442 int node, size_t size) 443{ 444 return kmem_cache_alloc_trace(s, gfpflags, size); 445} 446#endif /* CONFIG_NUMA */ 447 448#else /* CONFIG_TRACING */ 449static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, 450 gfp_t flags, size_t size) 451{ 452 void *ret = kmem_cache_alloc(s, flags); 453 454 ret = kasan_kmalloc(s, ret, size, flags); 455 return ret; 456} 457 458static __always_inline void * 459kmem_cache_alloc_node_trace(struct kmem_cache *s, 460 gfp_t gfpflags, 461 int node, size_t size) 462{ 463 void *ret = kmem_cache_alloc_node(s, gfpflags, node); 464 465 ret = kasan_kmalloc(s, ret, size, gfpflags); 466 return ret; 467} 468#endif /* CONFIG_TRACING */ 469 470extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; 471 472#ifdef CONFIG_TRACING 473extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment __malloc; 474#else 475static __always_inline void * 476kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) 477{ 478 return kmalloc_order(size, flags, order); 479} 480#endif 481 482static __always_inline void *kmalloc_large(size_t size, gfp_t flags) 483{ 484 unsigned int order = get_order(size); 485 return kmalloc_order_trace(size, flags, order); 486} 487 488/** 489 * kmalloc - allocate memory 490 * @size: how many bytes of memory are required. 491 * @flags: the type of memory to allocate. 492 * 493 * kmalloc is the normal method of allocating memory 494 * for objects smaller than page size in the kernel. 495 * 496 * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN 497 * bytes. For @size of power of two bytes, the alignment is also guaranteed 498 * to be at least to the size. 499 * 500 * The @flags argument may be one of the GFP flags defined at 501 * include/linux/gfp.h and described at 502 * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>` 503 * 504 * The recommended usage of the @flags is described at 505 * :ref:`Documentation/core-api/memory-allocation.rst <memory-allocation>` 506 * 507 * Below is a brief outline of the most useful GFP flags 508 * 509 * %GFP_KERNEL 510 * Allocate normal kernel ram. May sleep. 511 * 512 * %GFP_NOWAIT 513 * Allocation will not sleep. 514 * 515 * %GFP_ATOMIC 516 * Allocation will not sleep. May use emergency pools. 517 * 518 * %GFP_HIGHUSER 519 * Allocate memory from high memory on behalf of user. 520 * 521 * Also it is possible to set different flags by OR'ing 522 * in one or more of the following additional @flags: 523 * 524 * %__GFP_HIGH 525 * This allocation has high priority and may use emergency pools. 526 * 527 * %__GFP_NOFAIL 528 * Indicate that this allocation is in no way allowed to fail 529 * (think twice before using). 530 * 531 * %__GFP_NORETRY 532 * If memory is not immediately available, 533 * then give up at once. 534 * 535 * %__GFP_NOWARN 536 * If allocation fails, don't issue any warnings. 537 * 538 * %__GFP_RETRY_MAYFAIL 539 * Try really hard to succeed the allocation but fail 540 * eventually. 541 */ 542static __always_inline void *kmalloc(size_t size, gfp_t flags) 543{ 544 if (__builtin_constant_p(size)) { 545#ifndef CONFIG_SLOB 546 unsigned int index; 547#endif 548 if (size > KMALLOC_MAX_CACHE_SIZE) 549 return kmalloc_large(size, flags); 550#ifndef CONFIG_SLOB 551 index = kmalloc_index(size); 552 553 if (!index) 554 return ZERO_SIZE_PTR; 555 556 return kmem_cache_alloc_trace( 557 kmalloc_caches[kmalloc_type(flags)][index], 558 flags, size); 559#endif 560 } 561 return __kmalloc(size, flags); 562} 563 564static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) 565{ 566#ifndef CONFIG_SLOB 567 if (__builtin_constant_p(size) && 568 size <= KMALLOC_MAX_CACHE_SIZE) { 569 unsigned int i = kmalloc_index(size); 570 571 if (!i) 572 return ZERO_SIZE_PTR; 573 574 return kmem_cache_alloc_node_trace( 575 kmalloc_caches[kmalloc_type(flags)][i], 576 flags, node, size); 577 } 578#endif 579 return __kmalloc_node(size, flags, node); 580} 581 582int memcg_update_all_caches(int num_memcgs); 583 584/** 585 * kmalloc_array - allocate memory for an array. 586 * @n: number of elements. 587 * @size: element size. 588 * @flags: the type of memory to allocate (see kmalloc). 589 */ 590static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) 591{ 592 size_t bytes; 593 594 if (unlikely(check_mul_overflow(n, size, &bytes))) 595 return NULL; 596 if (__builtin_constant_p(n) && __builtin_constant_p(size)) 597 return kmalloc(bytes, flags); 598 return __kmalloc(bytes, flags); 599} 600 601/** 602 * kcalloc - allocate memory for an array. The memory is set to zero. 603 * @n: number of elements. 604 * @size: element size. 605 * @flags: the type of memory to allocate (see kmalloc). 606 */ 607static inline void *kcalloc(size_t n, size_t size, gfp_t flags) 608{ 609 return kmalloc_array(n, size, flags | __GFP_ZERO); 610} 611 612/* 613 * kmalloc_track_caller is a special version of kmalloc that records the 614 * calling function of the routine calling it for slab leak tracking instead 615 * of just the calling function (confusing, eh?). 616 * It's useful when the call to kmalloc comes from a widely-used standard 617 * allocator where we care about the real place the memory allocation 618 * request comes from. 619 */ 620extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); 621#define kmalloc_track_caller(size, flags) \ 622 __kmalloc_track_caller(size, flags, _RET_IP_) 623 624static inline void *kmalloc_array_node(size_t n, size_t size, gfp_t flags, 625 int node) 626{ 627 size_t bytes; 628 629 if (unlikely(check_mul_overflow(n, size, &bytes))) 630 return NULL; 631 if (__builtin_constant_p(n) && __builtin_constant_p(size)) 632 return kmalloc_node(bytes, flags, node); 633 return __kmalloc_node(bytes, flags, node); 634} 635 636static inline void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node) 637{ 638 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node); 639} 640 641 642#ifdef CONFIG_NUMA 643extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); 644#define kmalloc_node_track_caller(size, flags, node) \ 645 __kmalloc_node_track_caller(size, flags, node, \ 646 _RET_IP_) 647 648#else /* CONFIG_NUMA */ 649 650#define kmalloc_node_track_caller(size, flags, node) \ 651 kmalloc_track_caller(size, flags) 652 653#endif /* CONFIG_NUMA */ 654 655/* 656 * Shortcuts 657 */ 658static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags) 659{ 660 return kmem_cache_alloc(k, flags | __GFP_ZERO); 661} 662 663/** 664 * kzalloc - allocate memory. The memory is set to zero. 665 * @size: how many bytes of memory are required. 666 * @flags: the type of memory to allocate (see kmalloc). 667 */ 668static inline void *kzalloc(size_t size, gfp_t flags) 669{ 670 return kmalloc(size, flags | __GFP_ZERO); 671} 672 673/** 674 * kzalloc_node - allocate zeroed memory from a particular memory node. 675 * @size: how many bytes of memory are required. 676 * @flags: the type of memory to allocate (see kmalloc). 677 * @node: memory node from which to allocate 678 */ 679static inline void *kzalloc_node(size_t size, gfp_t flags, int node) 680{ 681 return kmalloc_node(size, flags | __GFP_ZERO, node); 682} 683 684unsigned int kmem_cache_size(struct kmem_cache *s); 685void __init kmem_cache_init_late(void); 686 687#if defined(CONFIG_SMP) && defined(CONFIG_SLAB) 688int slab_prepare_cpu(unsigned int cpu); 689int slab_dead_cpu(unsigned int cpu); 690#else 691#define slab_prepare_cpu NULL 692#define slab_dead_cpu NULL 693#endif 694 695#endif /* _LINUX_SLAB_H */