Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xfs: simplify kmem_{zone_}zalloc

Introduce flag KM_ZERO which is used to alloc zeroed entry, and convert
kmem_{zone_}zalloc to call kmem_{zone_}alloc() with KM_ZERO directly,
in order to avoid the setting to zero step.
And following Dave's suggestion, make kmem_{zone_}zalloc static inline
into kmem.h as they're now just a simple wrapper.

V2:
Make kmem_{zone_}zalloc static inline into kmem.h as Dave suggested.

Signed-off-by: Gu Zheng <guz.fnst@cn.fujitsu.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Ben Myers <bpm@sgi.com>

authored by

Gu Zheng and committed by
Ben Myers
359d992b d123031a

+18 -25
-22
fs/xfs/kmem.c
··· 63 63 } 64 64 65 65 void * 66 - kmem_zalloc(size_t size, xfs_km_flags_t flags) 67 - { 68 - void *ptr; 69 - 70 - ptr = kmem_alloc(size, flags); 71 - if (ptr) 72 - memset((char *)ptr, 0, (int)size); 73 - return ptr; 74 - } 75 - 76 - void * 77 66 kmem_zalloc_large(size_t size, xfs_km_flags_t flags) 78 67 { 79 68 void *ptr; ··· 116 127 __func__, lflags); 117 128 congestion_wait(BLK_RW_ASYNC, HZ/50); 118 129 } while (1); 119 - } 120 - 121 - void * 122 - kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags) 123 - { 124 - void *ptr; 125 - 126 - ptr = kmem_zone_alloc(zone, flags); 127 - if (ptr) 128 - memset((char *)ptr, 0, kmem_cache_size(zone)); 129 - return ptr; 130 130 }
+18 -3
fs/xfs/kmem.h
··· 32 32 #define KM_NOSLEEP ((__force xfs_km_flags_t)0x0002u) 33 33 #define KM_NOFS ((__force xfs_km_flags_t)0x0004u) 34 34 #define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u) 35 + #define KM_ZERO ((__force xfs_km_flags_t)0x0010u) 35 36 36 37 /* 37 38 * We use a special process flag to avoid recursive callbacks into ··· 44 43 { 45 44 gfp_t lflags; 46 45 47 - BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL)); 46 + BUG_ON(flags & ~(KM_SLEEP|KM_NOSLEEP|KM_NOFS|KM_MAYFAIL|KM_ZERO)); 48 47 49 48 if (flags & KM_NOSLEEP) { 50 49 lflags = GFP_ATOMIC | __GFP_NOWARN; ··· 53 52 if ((current->flags & PF_FSTRANS) || (flags & KM_NOFS)) 54 53 lflags &= ~__GFP_FS; 55 54 } 55 + 56 + if (flags & KM_ZERO) 57 + lflags |= __GFP_ZERO; 58 + 56 59 return lflags; 57 60 } 58 61 59 62 extern void *kmem_alloc(size_t, xfs_km_flags_t); 60 - extern void *kmem_zalloc(size_t, xfs_km_flags_t); 61 63 extern void *kmem_zalloc_large(size_t size, xfs_km_flags_t); 62 64 extern void *kmem_realloc(const void *, size_t, size_t, xfs_km_flags_t); 63 65 extern void kmem_free(const void *); 64 66 65 67 66 68 extern void *kmem_zalloc_greedy(size_t *, size_t, size_t); 69 + 70 + static inline void * 71 + kmem_zalloc(size_t size, xfs_km_flags_t flags) 72 + { 73 + return kmem_alloc(size, flags | KM_ZERO); 74 + } 67 75 68 76 /* 69 77 * Zone interfaces ··· 112 102 } 113 103 114 104 extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t); 115 - extern void *kmem_zone_zalloc(kmem_zone_t *, xfs_km_flags_t); 105 + 106 + static inline void * 107 + kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags) 108 + { 109 + return kmem_zone_alloc(zone, flags | KM_ZERO); 110 + } 116 111 117 112 #endif /* __XFS_SUPPORT_KMEM_H__ */