at v6.14 1.6 kB view raw
1#ifndef IOU_ALLOC_CACHE_H 2#define IOU_ALLOC_CACHE_H 3 4#include <linux/io_uring_types.h> 5 6/* 7 * Don't allow the cache to grow beyond this size. 8 */ 9#define IO_ALLOC_CACHE_MAX 128 10 11void io_alloc_cache_free(struct io_alloc_cache *cache, 12 void (*free)(const void *)); 13bool io_alloc_cache_init(struct io_alloc_cache *cache, 14 unsigned max_nr, unsigned int size, 15 unsigned int init_bytes); 16 17void *io_cache_alloc_new(struct io_alloc_cache *cache, gfp_t gfp); 18 19static inline void io_alloc_cache_kasan(struct iovec **iov, int *nr) 20{ 21 if (IS_ENABLED(CONFIG_KASAN)) { 22 kfree(*iov); 23 *iov = NULL; 24 *nr = 0; 25 } 26} 27 28static inline bool io_alloc_cache_put(struct io_alloc_cache *cache, 29 void *entry) 30{ 31 if (cache->nr_cached < cache->max_cached) { 32 if (!kasan_mempool_poison_object(entry)) 33 return false; 34 cache->entries[cache->nr_cached++] = entry; 35 return true; 36 } 37 return false; 38} 39 40static inline void *io_alloc_cache_get(struct io_alloc_cache *cache) 41{ 42 if (cache->nr_cached) { 43 void *entry = cache->entries[--cache->nr_cached]; 44 45 /* 46 * If KASAN is enabled, always clear the initial bytes that 47 * must be zeroed post alloc, in case any of them overlap 48 * with KASAN storage. 49 */ 50#if defined(CONFIG_KASAN) 51 kasan_mempool_unpoison_object(entry, cache->elem_size); 52 if (cache->init_clear) 53 memset(entry, 0, cache->init_clear); 54#endif 55 return entry; 56 } 57 58 return NULL; 59} 60 61static inline void *io_cache_alloc(struct io_alloc_cache *cache, gfp_t gfp) 62{ 63 void *obj; 64 65 obj = io_alloc_cache_get(cache); 66 if (obj) 67 return obj; 68 return io_cache_alloc_new(cache, gfp); 69} 70 71#endif