Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

slab, slub, slob: add slab_flags_t

Add sparse-checked slab_flags_t for struct kmem_cache::flags (SLAB_POISON,
etc).

SLAB is bloated temporarily by switching to "unsigned long", but only
temporarily.

Link: http://lkml.kernel.org/r/20171021100225.GA22428@avx2
Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com>
Acked-by: Pekka Enberg <penberg@kernel.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Alexey Dobriyan and committed by
Linus Torvalds
d50112ed a3ba0744

+97 -81
+1 -1
fs/ecryptfs/main.c
··· 660 660 struct kmem_cache **cache; 661 661 const char *name; 662 662 size_t size; 663 - unsigned long flags; 663 + slab_flags_t flags; 664 664 void (*ctor)(void *obj); 665 665 } ecryptfs_cache_infos[] = { 666 666 {
+1 -1
fs/xfs/kmem.h
··· 104 104 } 105 105 106 106 static inline kmem_zone_t * 107 - kmem_zone_init_flags(int size, char *zone_name, unsigned long flags, 107 + kmem_zone_init_flags(int size, char *zone_name, slab_flags_t flags, 108 108 void (*construct)(void *)) 109 109 { 110 110 return kmem_cache_create(zone_name, size, 0, flags, construct);
+2 -2
include/linux/kasan.h
··· 46 46 void kasan_free_pages(struct page *page, unsigned int order); 47 47 48 48 void kasan_cache_create(struct kmem_cache *cache, size_t *size, 49 - unsigned long *flags); 49 + slab_flags_t *flags); 50 50 void kasan_cache_shrink(struct kmem_cache *cache); 51 51 void kasan_cache_shutdown(struct kmem_cache *cache); 52 52 ··· 95 95 96 96 static inline void kasan_cache_create(struct kmem_cache *cache, 97 97 size_t *size, 98 - unsigned long *flags) {} 98 + slab_flags_t *flags) {} 99 99 static inline void kasan_cache_shrink(struct kmem_cache *cache) {} 100 100 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} 101 101
+4 -4
include/linux/kmemleak.h
··· 48 48 extern void kmemleak_ignore_phys(phys_addr_t phys) __ref; 49 49 50 50 static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, 51 - int min_count, unsigned long flags, 51 + int min_count, slab_flags_t flags, 52 52 gfp_t gfp) 53 53 { 54 54 if (!(flags & SLAB_NOLEAKTRACE)) 55 55 kmemleak_alloc(ptr, size, min_count, gfp); 56 56 } 57 57 58 - static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) 58 + static inline void kmemleak_free_recursive(const void *ptr, slab_flags_t flags) 59 59 { 60 60 if (!(flags & SLAB_NOLEAKTRACE)) 61 61 kmemleak_free(ptr); ··· 76 76 { 77 77 } 78 78 static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, 79 - int min_count, unsigned long flags, 79 + int min_count, slab_flags_t flags, 80 80 gfp_t gfp) 81 81 { 82 82 } ··· 94 94 static inline void kmemleak_free_part(const void *ptr, size_t size) 95 95 { 96 96 } 97 - static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) 97 + static inline void kmemleak_free_recursive(const void *ptr, slab_flags_t flags) 98 98 { 99 99 } 100 100 static inline void kmemleak_free_percpu(const void __percpu *ptr)
+37 -23
include/linux/slab.h
··· 21 21 * Flags to pass to kmem_cache_create(). 22 22 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set. 23 23 */ 24 - #define SLAB_CONSISTENCY_CHECKS 0x00000100UL /* DEBUG: Perform (expensive) checks on alloc/free */ 25 - #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */ 26 - #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */ 27 - #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */ 28 - #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */ 29 - #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */ 30 - #define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */ 24 + /* DEBUG: Perform (expensive) checks on alloc/free */ 25 + #define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100UL) 26 + /* DEBUG: Red zone objs in a cache */ 27 + #define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400UL) 28 + /* DEBUG: Poison objects */ 29 + #define SLAB_POISON ((slab_flags_t __force)0x00000800UL) 30 + /* Align objs on cache lines */ 31 + #define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000UL) 32 + /* Use GFP_DMA memory */ 33 + #define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000UL) 34 + /* DEBUG: Store the last owner for bug hunting */ 35 + #define SLAB_STORE_USER ((slab_flags_t __force)0x00010000UL) 36 + /* Panic if kmem_cache_create() fails */ 37 + #define SLAB_PANIC ((slab_flags_t __force)0x00040000UL) 31 38 /* 32 39 * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS! 33 40 * ··· 72 65 * 73 66 * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU. 74 67 */ 75 - #define SLAB_TYPESAFE_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */ 76 - #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */ 77 - #define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */ 68 + /* Defer freeing slabs to RCU */ 69 + #define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000UL) 70 + /* Spread some memory over cpuset */ 71 + #define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000UL) 72 + /* Trace allocations and frees */ 73 + #define SLAB_TRACE ((slab_flags_t __force)0x00200000UL) 78 74 79 75 /* Flag to prevent checks on free */ 80 76 #ifdef CONFIG_DEBUG_OBJECTS 81 - # define SLAB_DEBUG_OBJECTS 0x00400000UL 77 + # define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000UL) 82 78 #else 83 - # define SLAB_DEBUG_OBJECTS 0x00000000UL 79 + # define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00000000UL) 84 80 #endif 85 81 86 - #define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ 82 + /* Avoid kmemleak tracing */ 83 + #define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000UL) 87 84 88 85 /* Don't track use of uninitialized memory */ 89 86 #ifdef CONFIG_KMEMCHECK 90 - # define SLAB_NOTRACK 0x01000000UL 87 + # define SLAB_NOTRACK ((slab_flags_t __force)0x01000000UL) 91 88 #else 92 - # define SLAB_NOTRACK 0x00000000UL 89 + # define SLAB_NOTRACK ((slab_flags_t __force)0x00000000UL) 93 90 #endif 91 + /* Fault injection mark */ 94 92 #ifdef CONFIG_FAILSLAB 95 - # define SLAB_FAILSLAB 0x02000000UL /* Fault injection mark */ 93 + # define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000UL) 96 94 #else 97 - # define SLAB_FAILSLAB 0x00000000UL 95 + # define SLAB_FAILSLAB ((slab_flags_t __force)0x00000000UL) 98 96 #endif 97 + /* Account to memcg */ 99 98 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) 100 - # define SLAB_ACCOUNT 0x04000000UL /* Account to memcg */ 99 + # define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000UL) 101 100 #else 102 - # define SLAB_ACCOUNT 0x00000000UL 101 + # define SLAB_ACCOUNT ((slab_flags_t __force)0x00000000UL) 103 102 #endif 104 103 105 104 #ifdef CONFIG_KASAN 106 - #define SLAB_KASAN 0x08000000UL 105 + #define SLAB_KASAN ((slab_flags_t __force)0x08000000UL) 107 106 #else 108 - #define SLAB_KASAN 0x00000000UL 107 + #define SLAB_KASAN ((slab_flags_t __force)0x00000000UL) 109 108 #endif 110 109 111 110 /* The following flags affect the page allocator grouping pages by mobility */ 112 - #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ 111 + /* Objects are reclaimable */ 112 + #define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000UL) 113 113 #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ 114 114 /* 115 115 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. ··· 142 128 bool slab_is_available(void); 143 129 144 130 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, 145 - unsigned long, 131 + slab_flags_t, 146 132 void (*)(void *)); 147 133 void kmem_cache_destroy(struct kmem_cache *); 148 134 int kmem_cache_shrink(struct kmem_cache *);
+1 -1
include/linux/slab_def.h
··· 20 20 struct reciprocal_value reciprocal_buffer_size; 21 21 /* 2) touched by every alloc & free from the backend */ 22 22 23 - unsigned int flags; /* constant flags */ 23 + slab_flags_t flags; /* constant flags */ 24 24 unsigned int num; /* # of objs per slab */ 25 25 26 26 /* 3) cache_grow/shrink */
+1 -1
include/linux/slub_def.h
··· 82 82 struct kmem_cache { 83 83 struct kmem_cache_cpu __percpu *cpu_slab; 84 84 /* Used for retriving partial slabs etc */ 85 - unsigned long flags; 85 + slab_flags_t flags; 86 86 unsigned long min_partial; 87 87 int size; /* The size of an object including meta data */ 88 88 int object_size; /* The size of an object without meta data */
+1
include/linux/types.h
··· 156 156 #endif 157 157 158 158 typedef unsigned __bitwise gfp_t; 159 + typedef unsigned long __bitwise slab_flags_t; 159 160 typedef unsigned __bitwise fmode_t; 160 161 161 162 #ifdef CONFIG_PHYS_ADDR_T_64BIT
+1 -1
include/net/sock.h
··· 1105 1105 1106 1106 struct kmem_cache *slab; 1107 1107 unsigned int obj_size; 1108 - int slab_flags; 1108 + slab_flags_t slab_flags; 1109 1109 1110 1110 struct percpu_counter *orphan_count; 1111 1111
+1 -1
mm/kasan/kasan.c
··· 337 337 } 338 338 339 339 void kasan_cache_create(struct kmem_cache *cache, size_t *size, 340 - unsigned long *flags) 340 + slab_flags_t *flags) 341 341 { 342 342 int redzone_adjust; 343 343 int orig_size = *size;
+11 -12
mm/slab.c
··· 252 252 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \ 253 253 } while (0) 254 254 255 - #define CFLGS_OBJFREELIST_SLAB (0x40000000UL) 256 - #define CFLGS_OFF_SLAB (0x80000000UL) 255 + #define CFLGS_OBJFREELIST_SLAB ((slab_flags_t __force)0x40000000UL) 256 + #define CFLGS_OFF_SLAB ((slab_flags_t __force)0x80000000UL) 257 257 #define OBJFREELIST_SLAB(x) ((x)->flags & CFLGS_OBJFREELIST_SLAB) 258 258 #define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB) 259 259 ··· 441 441 * Calculate the number of objects and left-over bytes for a given buffer size. 442 442 */ 443 443 static unsigned int cache_estimate(unsigned long gfporder, size_t buffer_size, 444 - unsigned long flags, size_t *left_over) 444 + slab_flags_t flags, size_t *left_over) 445 445 { 446 446 unsigned int num; 447 447 size_t slab_size = PAGE_SIZE << gfporder; ··· 1759 1759 * towards high-order requests, this should be changed. 1760 1760 */ 1761 1761 static size_t calculate_slab_order(struct kmem_cache *cachep, 1762 - size_t size, unsigned long flags) 1762 + size_t size, slab_flags_t flags) 1763 1763 { 1764 1764 size_t left_over = 0; 1765 1765 int gfporder; ··· 1886 1886 return 0; 1887 1887 } 1888 1888 1889 - unsigned long kmem_cache_flags(unsigned long object_size, 1890 - unsigned long flags, const char *name, 1889 + slab_flags_t kmem_cache_flags(unsigned long object_size, 1890 + slab_flags_t flags, const char *name, 1891 1891 void (*ctor)(void *)) 1892 1892 { 1893 1893 return flags; ··· 1895 1895 1896 1896 struct kmem_cache * 1897 1897 __kmem_cache_alias(const char *name, size_t size, size_t align, 1898 - unsigned long flags, void (*ctor)(void *)) 1898 + slab_flags_t flags, void (*ctor)(void *)) 1899 1899 { 1900 1900 struct kmem_cache *cachep; 1901 1901 ··· 1913 1913 } 1914 1914 1915 1915 static bool set_objfreelist_slab_cache(struct kmem_cache *cachep, 1916 - size_t size, unsigned long flags) 1916 + size_t size, slab_flags_t flags) 1917 1917 { 1918 1918 size_t left; 1919 1919 ··· 1936 1936 } 1937 1937 1938 1938 static bool set_off_slab_cache(struct kmem_cache *cachep, 1939 - size_t size, unsigned long flags) 1939 + size_t size, slab_flags_t flags) 1940 1940 { 1941 1941 size_t left; 1942 1942 ··· 1970 1970 } 1971 1971 1972 1972 static bool set_on_slab_cache(struct kmem_cache *cachep, 1973 - size_t size, unsigned long flags) 1973 + size_t size, slab_flags_t flags) 1974 1974 { 1975 1975 size_t left; 1976 1976 ··· 2006 2006 * cacheline. This can be beneficial if you're counting cycles as closely 2007 2007 * as davem. 2008 2008 */ 2009 - int 2010 - __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) 2009 + int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags) 2011 2010 { 2012 2011 size_t ralign = BYTES_PER_WORD; 2013 2012 gfp_t gfp;
+13 -13
mm/slab.h
··· 21 21 unsigned int object_size;/* The original size of the object */ 22 22 unsigned int size; /* The aligned/padded/added on size */ 23 23 unsigned int align; /* Alignment as calculated */ 24 - unsigned long flags; /* Active flags on the slab */ 24 + slab_flags_t flags; /* Active flags on the slab */ 25 25 const char *name; /* Slab name for sysfs */ 26 26 int refcount; /* Use counter */ 27 27 void (*ctor)(void *); /* Called on object slot creation */ ··· 79 79 unsigned long size; 80 80 } kmalloc_info[]; 81 81 82 - unsigned long calculate_alignment(unsigned long flags, 82 + unsigned long calculate_alignment(slab_flags_t flags, 83 83 unsigned long align, unsigned long size); 84 84 85 85 #ifndef CONFIG_SLOB 86 86 /* Kmalloc array related functions */ 87 87 void setup_kmalloc_cache_index_table(void); 88 - void create_kmalloc_caches(unsigned long); 88 + void create_kmalloc_caches(slab_flags_t); 89 89 90 90 /* Find the kmalloc slab corresponding for a certain size */ 91 91 struct kmem_cache *kmalloc_slab(size_t, gfp_t); ··· 93 93 94 94 95 95 /* Functions provided by the slab allocators */ 96 - extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); 96 + int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); 97 97 98 98 extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, 99 - unsigned long flags); 99 + slab_flags_t flags); 100 100 extern void create_boot_cache(struct kmem_cache *, const char *name, 101 - size_t size, unsigned long flags); 101 + size_t size, slab_flags_t flags); 102 102 103 103 int slab_unmergeable(struct kmem_cache *s); 104 104 struct kmem_cache *find_mergeable(size_t size, size_t align, 105 - unsigned long flags, const char *name, void (*ctor)(void *)); 105 + slab_flags_t flags, const char *name, void (*ctor)(void *)); 106 106 #ifndef CONFIG_SLOB 107 107 struct kmem_cache * 108 108 __kmem_cache_alias(const char *name, size_t size, size_t align, 109 - unsigned long flags, void (*ctor)(void *)); 109 + slab_flags_t flags, void (*ctor)(void *)); 110 110 111 - unsigned long kmem_cache_flags(unsigned long object_size, 112 - unsigned long flags, const char *name, 111 + slab_flags_t kmem_cache_flags(unsigned long object_size, 112 + slab_flags_t flags, const char *name, 113 113 void (*ctor)(void *)); 114 114 #else 115 115 static inline struct kmem_cache * 116 116 __kmem_cache_alias(const char *name, size_t size, size_t align, 117 - unsigned long flags, void (*ctor)(void *)) 117 + slab_flags_t flags, void (*ctor)(void *)) 118 118 { return NULL; } 119 119 120 - static inline unsigned long kmem_cache_flags(unsigned long object_size, 121 - unsigned long flags, const char *name, 120 + static inline slab_flags_t kmem_cache_flags(unsigned long object_size, 121 + slab_flags_t flags, const char *name, 122 122 void (*ctor)(void *)) 123 123 { 124 124 return flags;
+8 -8
mm/slab_common.c
··· 291 291 } 292 292 293 293 struct kmem_cache *find_mergeable(size_t size, size_t align, 294 - unsigned long flags, const char *name, void (*ctor)(void *)) 294 + slab_flags_t flags, const char *name, void (*ctor)(void *)) 295 295 { 296 296 struct kmem_cache *s; 297 297 ··· 341 341 * Figure out what the alignment of the objects will be given a set of 342 342 * flags, a user specified alignment and the size of the objects. 343 343 */ 344 - unsigned long calculate_alignment(unsigned long flags, 344 + unsigned long calculate_alignment(slab_flags_t flags, 345 345 unsigned long align, unsigned long size) 346 346 { 347 347 /* ··· 366 366 367 367 static struct kmem_cache *create_cache(const char *name, 368 368 size_t object_size, size_t size, size_t align, 369 - unsigned long flags, void (*ctor)(void *), 369 + slab_flags_t flags, void (*ctor)(void *), 370 370 struct mem_cgroup *memcg, struct kmem_cache *root_cache) 371 371 { 372 372 struct kmem_cache *s; ··· 431 431 */ 432 432 struct kmem_cache * 433 433 kmem_cache_create(const char *name, size_t size, size_t align, 434 - unsigned long flags, void (*ctor)(void *)) 434 + slab_flags_t flags, void (*ctor)(void *)) 435 435 { 436 436 struct kmem_cache *s = NULL; 437 437 const char *cache_name; ··· 879 879 #ifndef CONFIG_SLOB 880 880 /* Create a cache during boot when no slab services are available yet */ 881 881 void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size, 882 - unsigned long flags) 882 + slab_flags_t flags) 883 883 { 884 884 int err; 885 885 ··· 899 899 } 900 900 901 901 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size, 902 - unsigned long flags) 902 + slab_flags_t flags) 903 903 { 904 904 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 905 905 ··· 1057 1057 } 1058 1058 } 1059 1059 1060 - static void __init new_kmalloc_cache(int idx, unsigned long flags) 1060 + static void __init new_kmalloc_cache(int idx, slab_flags_t flags) 1061 1061 { 1062 1062 kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name, 1063 1063 kmalloc_info[idx].size, flags); ··· 1068 1068 * may already have been created because they were needed to 1069 1069 * enable allocations for slab creation. 1070 1070 */ 1071 - void __init create_kmalloc_caches(unsigned long flags) 1071 + void __init create_kmalloc_caches(slab_flags_t flags) 1072 1072 { 1073 1073 int i; 1074 1074
+1 -1
mm/slob.c
··· 524 524 } 525 525 EXPORT_SYMBOL(ksize); 526 526 527 - int __kmem_cache_create(struct kmem_cache *c, unsigned long flags) 527 + int __kmem_cache_create(struct kmem_cache *c, slab_flags_t flags) 528 528 { 529 529 if (flags & SLAB_TYPESAFE_BY_RCU) { 530 530 /* leave room for rcu footer at the end of object */
+14 -12
mm/slub.c
··· 193 193 #define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */ 194 194 195 195 /* Internal SLUB flags */ 196 - #define __OBJECT_POISON 0x80000000UL /* Poison object */ 197 - #define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */ 196 + /* Poison object */ 197 + #define __OBJECT_POISON ((slab_flags_t __force)0x80000000UL) 198 + /* Use cmpxchg_double */ 199 + #define __CMPXCHG_DOUBLE ((slab_flags_t __force)0x40000000UL) 198 200 199 201 /* 200 202 * Tracking user of a slab. ··· 487 485 * Debug settings: 488 486 */ 489 487 #if defined(CONFIG_SLUB_DEBUG_ON) 490 - static int slub_debug = DEBUG_DEFAULT_FLAGS; 488 + static slab_flags_t slub_debug = DEBUG_DEFAULT_FLAGS; 491 489 #else 492 - static int slub_debug; 490 + static slab_flags_t slub_debug; 493 491 #endif 494 492 495 493 static char *slub_debug_slabs; ··· 1291 1289 1292 1290 __setup("slub_debug", setup_slub_debug); 1293 1291 1294 - unsigned long kmem_cache_flags(unsigned long object_size, 1295 - unsigned long flags, const char *name, 1292 + slab_flags_t kmem_cache_flags(unsigned long object_size, 1293 + slab_flags_t flags, const char *name, 1296 1294 void (*ctor)(void *)) 1297 1295 { 1298 1296 /* ··· 1324 1322 struct page *page) {} 1325 1323 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, 1326 1324 struct page *page) {} 1327 - unsigned long kmem_cache_flags(unsigned long object_size, 1328 - unsigned long flags, const char *name, 1325 + slab_flags_t kmem_cache_flags(unsigned long object_size, 1326 + slab_flags_t flags, const char *name, 1329 1327 void (*ctor)(void *)) 1330 1328 { 1331 1329 return flags; ··· 3479 3477 */ 3480 3478 static int calculate_sizes(struct kmem_cache *s, int forced_order) 3481 3479 { 3482 - unsigned long flags = s->flags; 3480 + slab_flags_t flags = s->flags; 3483 3481 size_t size = s->object_size; 3484 3482 int order; 3485 3483 ··· 3595 3593 return !!oo_objects(s->oo); 3596 3594 } 3597 3595 3598 - static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) 3596 + static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) 3599 3597 { 3600 3598 s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor); 3601 3599 s->reserved = 0; ··· 4247 4245 4248 4246 struct kmem_cache * 4249 4247 __kmem_cache_alias(const char *name, size_t size, size_t align, 4250 - unsigned long flags, void (*ctor)(void *)) 4248 + slab_flags_t flags, void (*ctor)(void *)) 4251 4249 { 4252 4250 struct kmem_cache *s, *c; 4253 4251 ··· 4277 4275 return s; 4278 4276 } 4279 4277 4280 - int __kmem_cache_create(struct kmem_cache *s, unsigned long flags) 4278 + int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags) 4281 4279 { 4282 4280 int err; 4283 4281