Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

zsmalloc: require GFP in zs_malloc()

Pass GFP flags to zs_malloc() instead of using a fixed mask supplied to
zs_create_pool(), so we can be more flexible, but, more importantly, we
need this to switch zram to per-cpu compression streams -- zram will try
to allocate handle with preemption disabled in a fast path and switch to
a slow path (using different gfp mask) if the fast one has failed.

Apart from that, this also align zs_malloc() interface with zspool/zbud.

[sergey.senozhatsky@gmail.com: pass GFP flags to zs_malloc() instead of using a fixed mask]
Link: http://lkml.kernel.org/r/20160429150942.GA637@swordfish
Link: http://lkml.kernel.org/r/20160429150942.GA637@swordfish
Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Acked-by: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Sergey Senozhatsky and committed by
Linus Torvalds
d0d8da2d 1ee47165

+17 -15
+2 -2
drivers/block/zram/zram_drv.c
··· 514 514 goto out_error; 515 515 } 516 516 517 - meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM); 517 + meta->mem_pool = zs_create_pool(pool_name); 518 518 if (!meta->mem_pool) { 519 519 pr_err("Error creating memory pool\n"); 520 520 goto out_error; ··· 717 717 src = uncmem; 718 718 } 719 719 720 - handle = zs_malloc(meta->mem_pool, clen); 720 + handle = zs_malloc(meta->mem_pool, clen, GFP_NOIO | __GFP_HIGHMEM); 721 721 if (!handle) { 722 722 pr_err("Error allocating memory for compressed page: %u, size=%zu\n", 723 723 index, clen);
+2 -2
include/linux/zsmalloc.h
··· 41 41 42 42 struct zs_pool; 43 43 44 - struct zs_pool *zs_create_pool(const char *name, gfp_t flags); 44 + struct zs_pool *zs_create_pool(const char *name); 45 45 void zs_destroy_pool(struct zs_pool *pool); 46 46 47 - unsigned long zs_malloc(struct zs_pool *pool, size_t size); 47 + unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags); 48 48 void zs_free(struct zs_pool *pool, unsigned long obj); 49 49 50 50 void *zs_map_object(struct zs_pool *pool, unsigned long handle,
+13 -11
mm/zsmalloc.c
··· 247 247 struct size_class **size_class; 248 248 struct kmem_cache *handle_cachep; 249 249 250 - gfp_t flags; /* allocation flags used when growing pool */ 251 250 atomic_long_t pages_allocated; 252 251 253 252 struct zs_pool_stats stats; ··· 294 295 kmem_cache_destroy(pool->handle_cachep); 295 296 } 296 297 297 - static unsigned long alloc_handle(struct zs_pool *pool) 298 + static unsigned long alloc_handle(struct zs_pool *pool, gfp_t gfp) 298 299 { 299 300 return (unsigned long)kmem_cache_alloc(pool->handle_cachep, 300 - pool->flags & ~__GFP_HIGHMEM); 301 + gfp & ~__GFP_HIGHMEM); 301 302 } 302 303 303 304 static void free_handle(struct zs_pool *pool, unsigned long handle) ··· 323 324 const struct zpool_ops *zpool_ops, 324 325 struct zpool *zpool) 325 326 { 326 - return zs_create_pool(name, gfp); 327 + /* 328 + * Ignore global gfp flags: zs_malloc() may be invoked from 329 + * different contexts and its caller must provide a valid 330 + * gfp mask. 331 + */ 332 + return zs_create_pool(name); 327 333 } 328 334 329 335 static void zs_zpool_destroy(void *pool) ··· 339 335 static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp, 340 336 unsigned long *handle) 341 337 { 342 - *handle = zs_malloc(pool, size); 338 + *handle = zs_malloc(pool, size, gfp); 343 339 return *handle ? 0 : -1; 344 340 } 345 341 static void zs_zpool_free(void *pool, unsigned long handle) ··· 1395 1391 * otherwise 0. 1396 1392 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail. 1397 1393 */ 1398 - unsigned long zs_malloc(struct zs_pool *pool, size_t size) 1394 + unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) 1399 1395 { 1400 1396 unsigned long handle, obj; 1401 1397 struct size_class *class; ··· 1404 1400 if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE)) 1405 1401 return 0; 1406 1402 1407 - handle = alloc_handle(pool); 1403 + handle = alloc_handle(pool, gfp); 1408 1404 if (!handle) 1409 1405 return 0; 1410 1406 ··· 1417 1413 1418 1414 if (!first_page) { 1419 1415 spin_unlock(&class->lock); 1420 - first_page = alloc_zspage(class, pool->flags); 1416 + first_page = alloc_zspage(class, gfp); 1421 1417 if (unlikely(!first_page)) { 1422 1418 free_handle(pool, handle); 1423 1419 return 0; ··· 1882 1878 * On success, a pointer to the newly created pool is returned, 1883 1879 * otherwise NULL. 1884 1880 */ 1885 - struct zs_pool *zs_create_pool(const char *name, gfp_t flags) 1881 + struct zs_pool *zs_create_pool(const char *name) 1886 1882 { 1887 1883 int i; 1888 1884 struct zs_pool *pool; ··· 1951 1947 1952 1948 prev_class = class; 1953 1949 } 1954 - 1955 - pool->flags = flags; 1956 1950 1957 1951 if (zs_pool_stat_create(pool, name)) 1958 1952 goto err;