Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.14-rc6 523 lines 15 kB view raw
1#ifndef MM_SLAB_H 2#define MM_SLAB_H 3/* 4 * Internal slab definitions 5 */ 6 7#ifdef CONFIG_SLOB 8/* 9 * Common fields provided in kmem_cache by all slab allocators 10 * This struct is either used directly by the allocator (SLOB) 11 * or the allocator must include definitions for all fields 12 * provided in kmem_cache_common in their definition of kmem_cache. 13 * 14 * Once we can do anonymous structs (C11 standard) we could put a 15 * anonymous struct definition in these allocators so that the 16 * separate allocations in the kmem_cache structure of SLAB and 17 * SLUB is no longer needed. 18 */ 19struct kmem_cache { 20 unsigned int object_size;/* The original size of the object */ 21 unsigned int size; /* The aligned/padded/added on size */ 22 unsigned int align; /* Alignment as calculated */ 23 unsigned long flags; /* Active flags on the slab */ 24 const char *name; /* Slab name for sysfs */ 25 int refcount; /* Use counter */ 26 void (*ctor)(void *); /* Called on object slot creation */ 27 struct list_head list; /* List of all slab caches on the system */ 28}; 29 30#endif /* CONFIG_SLOB */ 31 32#ifdef CONFIG_SLAB 33#include <linux/slab_def.h> 34#endif 35 36#ifdef CONFIG_SLUB 37#include <linux/slub_def.h> 38#endif 39 40#include <linux/memcontrol.h> 41#include <linux/fault-inject.h> 42#include <linux/kmemcheck.h> 43#include <linux/kasan.h> 44#include <linux/kmemleak.h> 45#include <linux/random.h> 46#include <linux/sched/mm.h> 47 48/* 49 * State of the slab allocator. 50 * 51 * This is used to describe the states of the allocator during bootup. 52 * Allocators use this to gradually bootstrap themselves. Most allocators 53 * have the problem that the structures used for managing slab caches are 54 * allocated from slab caches themselves. 55 */ 56enum slab_state { 57 DOWN, /* No slab functionality yet */ 58 PARTIAL, /* SLUB: kmem_cache_node available */ 59 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ 60 UP, /* Slab caches usable but not all extras yet */ 61 FULL /* Everything is working */ 62}; 63 64extern enum slab_state slab_state; 65 66/* The slab cache mutex protects the management structures during changes */ 67extern struct mutex slab_mutex; 68 69/* The list of all slab caches on the system */ 70extern struct list_head slab_caches; 71 72/* The slab cache that manages slab cache information */ 73extern struct kmem_cache *kmem_cache; 74 75/* A table of kmalloc cache names and sizes */ 76extern const struct kmalloc_info_struct { 77 const char *name; 78 unsigned long size; 79} kmalloc_info[]; 80 81unsigned long calculate_alignment(unsigned long flags, 82 unsigned long align, unsigned long size); 83 84#ifndef CONFIG_SLOB 85/* Kmalloc array related functions */ 86void setup_kmalloc_cache_index_table(void); 87void create_kmalloc_caches(unsigned long); 88 89/* Find the kmalloc slab corresponding for a certain size */ 90struct kmem_cache *kmalloc_slab(size_t, gfp_t); 91#endif 92 93 94/* Functions provided by the slab allocators */ 95extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); 96 97extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, 98 unsigned long flags); 99extern void create_boot_cache(struct kmem_cache *, const char *name, 100 size_t size, unsigned long flags); 101 102int slab_unmergeable(struct kmem_cache *s); 103struct kmem_cache *find_mergeable(size_t size, size_t align, 104 unsigned long flags, const char *name, void (*ctor)(void *)); 105#ifndef CONFIG_SLOB 106struct kmem_cache * 107__kmem_cache_alias(const char *name, size_t size, size_t align, 108 unsigned long flags, void (*ctor)(void *)); 109 110unsigned long kmem_cache_flags(unsigned long object_size, 111 unsigned long flags, const char *name, 112 void (*ctor)(void *)); 113#else 114static inline struct kmem_cache * 115__kmem_cache_alias(const char *name, size_t size, size_t align, 116 unsigned long flags, void (*ctor)(void *)) 117{ return NULL; } 118 119static inline unsigned long kmem_cache_flags(unsigned long object_size, 120 unsigned long flags, const char *name, 121 void (*ctor)(void *)) 122{ 123 return flags; 124} 125#endif 126 127 128/* Legal flag mask for kmem_cache_create(), for various configurations */ 129#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ 130 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) 131 132#if defined(CONFIG_DEBUG_SLAB) 133#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) 134#elif defined(CONFIG_SLUB_DEBUG) 135#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ 136 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) 137#else 138#define SLAB_DEBUG_FLAGS (0) 139#endif 140 141#if defined(CONFIG_SLAB) 142#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ 143 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ 144 SLAB_NOTRACK | SLAB_ACCOUNT) 145#elif defined(CONFIG_SLUB) 146#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ 147 SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT) 148#else 149#define SLAB_CACHE_FLAGS (0) 150#endif 151 152/* Common flags available with current configuration */ 153#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) 154 155/* Common flags permitted for kmem_cache_create */ 156#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \ 157 SLAB_RED_ZONE | \ 158 SLAB_POISON | \ 159 SLAB_STORE_USER | \ 160 SLAB_TRACE | \ 161 SLAB_CONSISTENCY_CHECKS | \ 162 SLAB_MEM_SPREAD | \ 163 SLAB_NOLEAKTRACE | \ 164 SLAB_RECLAIM_ACCOUNT | \ 165 SLAB_TEMPORARY | \ 166 SLAB_NOTRACK | \ 167 SLAB_ACCOUNT) 168 169int __kmem_cache_shutdown(struct kmem_cache *); 170void __kmem_cache_release(struct kmem_cache *); 171int __kmem_cache_shrink(struct kmem_cache *); 172void __kmemcg_cache_deactivate(struct kmem_cache *s); 173void slab_kmem_cache_release(struct kmem_cache *); 174 175struct seq_file; 176struct file; 177 178struct slabinfo { 179 unsigned long active_objs; 180 unsigned long num_objs; 181 unsigned long active_slabs; 182 unsigned long num_slabs; 183 unsigned long shared_avail; 184 unsigned int limit; 185 unsigned int batchcount; 186 unsigned int shared; 187 unsigned int objects_per_slab; 188 unsigned int cache_order; 189}; 190 191void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); 192void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); 193ssize_t slabinfo_write(struct file *file, const char __user *buffer, 194 size_t count, loff_t *ppos); 195 196/* 197 * Generic implementation of bulk operations 198 * These are useful for situations in which the allocator cannot 199 * perform optimizations. In that case segments of the object listed 200 * may be allocated or freed using these operations. 201 */ 202void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); 203int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 204 205#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) 206 207/* List of all root caches. */ 208extern struct list_head slab_root_caches; 209#define root_caches_node memcg_params.__root_caches_node 210 211/* 212 * Iterate over all memcg caches of the given root cache. The caller must hold 213 * slab_mutex. 214 */ 215#define for_each_memcg_cache(iter, root) \ 216 list_for_each_entry(iter, &(root)->memcg_params.children, \ 217 memcg_params.children_node) 218 219static inline bool is_root_cache(struct kmem_cache *s) 220{ 221 return !s->memcg_params.root_cache; 222} 223 224static inline bool slab_equal_or_root(struct kmem_cache *s, 225 struct kmem_cache *p) 226{ 227 return p == s || p == s->memcg_params.root_cache; 228} 229 230/* 231 * We use suffixes to the name in memcg because we can't have caches 232 * created in the system with the same name. But when we print them 233 * locally, better refer to them with the base name 234 */ 235static inline const char *cache_name(struct kmem_cache *s) 236{ 237 if (!is_root_cache(s)) 238 s = s->memcg_params.root_cache; 239 return s->name; 240} 241 242/* 243 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. 244 * That said the caller must assure the memcg's cache won't go away by either 245 * taking a css reference to the owner cgroup, or holding the slab_mutex. 246 */ 247static inline struct kmem_cache * 248cache_from_memcg_idx(struct kmem_cache *s, int idx) 249{ 250 struct kmem_cache *cachep; 251 struct memcg_cache_array *arr; 252 253 rcu_read_lock(); 254 arr = rcu_dereference(s->memcg_params.memcg_caches); 255 256 /* 257 * Make sure we will access the up-to-date value. The code updating 258 * memcg_caches issues a write barrier to match this (see 259 * memcg_create_kmem_cache()). 260 */ 261 cachep = lockless_dereference(arr->entries[idx]); 262 rcu_read_unlock(); 263 264 return cachep; 265} 266 267static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 268{ 269 if (is_root_cache(s)) 270 return s; 271 return s->memcg_params.root_cache; 272} 273 274static __always_inline int memcg_charge_slab(struct page *page, 275 gfp_t gfp, int order, 276 struct kmem_cache *s) 277{ 278 if (!memcg_kmem_enabled()) 279 return 0; 280 if (is_root_cache(s)) 281 return 0; 282 return memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg); 283} 284 285static __always_inline void memcg_uncharge_slab(struct page *page, int order, 286 struct kmem_cache *s) 287{ 288 if (!memcg_kmem_enabled()) 289 return; 290 memcg_kmem_uncharge(page, order); 291} 292 293extern void slab_init_memcg_params(struct kmem_cache *); 294extern void memcg_link_cache(struct kmem_cache *s); 295extern void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s, 296 void (*deact_fn)(struct kmem_cache *)); 297 298#else /* CONFIG_MEMCG && !CONFIG_SLOB */ 299 300/* If !memcg, all caches are root. */ 301#define slab_root_caches slab_caches 302#define root_caches_node list 303 304#define for_each_memcg_cache(iter, root) \ 305 for ((void)(iter), (void)(root); 0; ) 306 307static inline bool is_root_cache(struct kmem_cache *s) 308{ 309 return true; 310} 311 312static inline bool slab_equal_or_root(struct kmem_cache *s, 313 struct kmem_cache *p) 314{ 315 return true; 316} 317 318static inline const char *cache_name(struct kmem_cache *s) 319{ 320 return s->name; 321} 322 323static inline struct kmem_cache * 324cache_from_memcg_idx(struct kmem_cache *s, int idx) 325{ 326 return NULL; 327} 328 329static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) 330{ 331 return s; 332} 333 334static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order, 335 struct kmem_cache *s) 336{ 337 return 0; 338} 339 340static inline void memcg_uncharge_slab(struct page *page, int order, 341 struct kmem_cache *s) 342{ 343} 344 345static inline void slab_init_memcg_params(struct kmem_cache *s) 346{ 347} 348 349static inline void memcg_link_cache(struct kmem_cache *s) 350{ 351} 352 353#endif /* CONFIG_MEMCG && !CONFIG_SLOB */ 354 355static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 356{ 357 struct kmem_cache *cachep; 358 struct page *page; 359 360 /* 361 * When kmemcg is not being used, both assignments should return the 362 * same value. but we don't want to pay the assignment price in that 363 * case. If it is not compiled in, the compiler should be smart enough 364 * to not do even the assignment. In that case, slab_equal_or_root 365 * will also be a constant. 366 */ 367 if (!memcg_kmem_enabled() && 368 !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS)) 369 return s; 370 371 page = virt_to_head_page(x); 372 cachep = page->slab_cache; 373 if (slab_equal_or_root(cachep, s)) 374 return cachep; 375 376 pr_err("%s: Wrong slab cache. %s but object is from %s\n", 377 __func__, s->name, cachep->name); 378 WARN_ON_ONCE(1); 379 return s; 380} 381 382static inline size_t slab_ksize(const struct kmem_cache *s) 383{ 384#ifndef CONFIG_SLUB 385 return s->object_size; 386 387#else /* CONFIG_SLUB */ 388# ifdef CONFIG_SLUB_DEBUG 389 /* 390 * Debugging requires use of the padding between object 391 * and whatever may come after it. 392 */ 393 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) 394 return s->object_size; 395# endif 396 if (s->flags & SLAB_KASAN) 397 return s->object_size; 398 /* 399 * If we have the need to store the freelist pointer 400 * back there or track user information then we can 401 * only use the space before that information. 402 */ 403 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) 404 return s->inuse; 405 /* 406 * Else we can use all the padding etc for the allocation 407 */ 408 return s->size; 409#endif 410} 411 412static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, 413 gfp_t flags) 414{ 415 flags &= gfp_allowed_mask; 416 417 fs_reclaim_acquire(flags); 418 fs_reclaim_release(flags); 419 420 might_sleep_if(gfpflags_allow_blocking(flags)); 421 422 if (should_failslab(s, flags)) 423 return NULL; 424 425 if (memcg_kmem_enabled() && 426 ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT))) 427 return memcg_kmem_get_cache(s); 428 429 return s; 430} 431 432static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, 433 size_t size, void **p) 434{ 435 size_t i; 436 437 flags &= gfp_allowed_mask; 438 for (i = 0; i < size; i++) { 439 void *object = p[i]; 440 441 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); 442 kmemleak_alloc_recursive(object, s->object_size, 1, 443 s->flags, flags); 444 kasan_slab_alloc(s, object, flags); 445 } 446 447 if (memcg_kmem_enabled()) 448 memcg_kmem_put_cache(s); 449} 450 451#ifndef CONFIG_SLOB 452/* 453 * The slab lists for all objects. 454 */ 455struct kmem_cache_node { 456 spinlock_t list_lock; 457 458#ifdef CONFIG_SLAB 459 struct list_head slabs_partial; /* partial list first, better asm code */ 460 struct list_head slabs_full; 461 struct list_head slabs_free; 462 unsigned long total_slabs; /* length of all slab lists */ 463 unsigned long free_slabs; /* length of free slab list only */ 464 unsigned long free_objects; 465 unsigned int free_limit; 466 unsigned int colour_next; /* Per-node cache coloring */ 467 struct array_cache *shared; /* shared per node */ 468 struct alien_cache **alien; /* on other nodes */ 469 unsigned long next_reap; /* updated without locking */ 470 int free_touched; /* updated without locking */ 471#endif 472 473#ifdef CONFIG_SLUB 474 unsigned long nr_partial; 475 struct list_head partial; 476#ifdef CONFIG_SLUB_DEBUG 477 atomic_long_t nr_slabs; 478 atomic_long_t total_objects; 479 struct list_head full; 480#endif 481#endif 482 483}; 484 485static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) 486{ 487 return s->node[node]; 488} 489 490/* 491 * Iterator over all nodes. The body will be executed for each node that has 492 * a kmem_cache_node structure allocated (which is true for all online nodes) 493 */ 494#define for_each_kmem_cache_node(__s, __node, __n) \ 495 for (__node = 0; __node < nr_node_ids; __node++) \ 496 if ((__n = get_node(__s, __node))) 497 498#endif 499 500void *slab_start(struct seq_file *m, loff_t *pos); 501void *slab_next(struct seq_file *m, void *p, loff_t *pos); 502void slab_stop(struct seq_file *m, void *p); 503void *memcg_slab_start(struct seq_file *m, loff_t *pos); 504void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos); 505void memcg_slab_stop(struct seq_file *m, void *p); 506int memcg_slab_show(struct seq_file *m, void *p); 507 508void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); 509 510#ifdef CONFIG_SLAB_FREELIST_RANDOM 511int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, 512 gfp_t gfp); 513void cache_random_seq_destroy(struct kmem_cache *cachep); 514#else 515static inline int cache_random_seq_create(struct kmem_cache *cachep, 516 unsigned int count, gfp_t gfp) 517{ 518 return 0; 519} 520static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } 521#endif /* CONFIG_SLAB_FREELIST_RANDOM */ 522 523#endif /* MM_SLAB_H */