Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

zswap: dynamic pool creation

Add dynamic creation of pools. Move the static crypto compression per-cpu
transforms into each pool. Add a pointer to zswap_entry to the pool it's
in.

This is required by the following patch which enables changing the zswap
zpool and compressor params at runtime.

[akpm@linux-foundation.org: fix merge snafus]
Signed-off-by: Dan Streetman <ddstreet@ieee.org>
Acked-by: Seth Jennings <sjennings@variantweb.net>
Cc: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Dan Streetman and committed by
Linus Torvalds
f1c54846 3f0e1312

+413 -151
+413 -151
mm/zswap.c
··· 99 99 static struct zpool *zswap_pool; 100 100 101 101 /********************************* 102 - * compression functions 103 - **********************************/ 104 - /* per-cpu compression transforms */ 105 - static struct crypto_comp * __percpu *zswap_comp_pcpu_tfms; 106 - 107 - enum comp_op { 108 - ZSWAP_COMPOP_COMPRESS, 109 - ZSWAP_COMPOP_DECOMPRESS 110 - }; 111 - 112 - static int zswap_comp_op(enum comp_op op, const u8 *src, unsigned int slen, 113 - u8 *dst, unsigned int *dlen) 114 - { 115 - struct crypto_comp *tfm; 116 - int ret; 117 - 118 - tfm = *per_cpu_ptr(zswap_comp_pcpu_tfms, get_cpu()); 119 - switch (op) { 120 - case ZSWAP_COMPOP_COMPRESS: 121 - ret = crypto_comp_compress(tfm, src, slen, dst, dlen); 122 - break; 123 - case ZSWAP_COMPOP_DECOMPRESS: 124 - ret = crypto_comp_decompress(tfm, src, slen, dst, dlen); 125 - break; 126 - default: 127 - ret = -EINVAL; 128 - } 129 - 130 - put_cpu(); 131 - return ret; 132 - } 133 - 134 - static int __init zswap_comp_init(void) 135 - { 136 - if (!crypto_has_comp(zswap_compressor, 0, 0)) { 137 - pr_info("%s compressor not available\n", zswap_compressor); 138 - /* fall back to default compressor */ 139 - zswap_compressor = ZSWAP_COMPRESSOR_DEFAULT; 140 - if (!crypto_has_comp(zswap_compressor, 0, 0)) 141 - /* can't even load the default compressor */ 142 - return -ENODEV; 143 - } 144 - pr_info("using %s compressor\n", zswap_compressor); 145 - 146 - /* alloc percpu transforms */ 147 - zswap_comp_pcpu_tfms = alloc_percpu(struct crypto_comp *); 148 - if (!zswap_comp_pcpu_tfms) 149 - return -ENOMEM; 150 - return 0; 151 - } 152 - 153 - static void __init zswap_comp_exit(void) 154 - { 155 - /* free percpu transforms */ 156 - free_percpu(zswap_comp_pcpu_tfms); 157 - } 158 - 159 - /********************************* 160 102 * data structures 161 103 **********************************/ 104 + 105 + struct zswap_pool { 106 + struct zpool *zpool; 107 + struct crypto_comp * __percpu *tfm; 108 + struct kref kref; 109 + struct list_head list; 110 + struct rcu_head rcu_head; 111 + struct notifier_block notifier; 112 + char tfm_name[CRYPTO_MAX_ALG_NAME]; 113 + }; 114 + 162 115 /* 163 116 * struct zswap_entry 164 117 * ··· 119 166 * page within zswap. 120 167 * 121 168 * rbnode - links the entry into red-black tree for the appropriate swap type 169 + * offset - the swap offset for the entry. Index into the red-black tree. 122 170 * refcount - the number of outstanding reference to the entry. This is needed 123 171 * to protect against premature freeing of the entry by code 124 172 * concurrent calls to load, invalidate, and writeback. The lock 125 173 * for the zswap_tree structure that contains the entry must 126 174 * be held while changing the refcount. Since the lock must 127 175 * be held, there is no reason to also make refcount atomic. 128 - * offset - the swap offset for the entry. Index into the red-black tree. 129 - * handle - zpool allocation handle that stores the compressed page data 130 176 * length - the length in bytes of the compressed page data. Needed during 131 177 * decompression 178 + * pool - the zswap_pool the entry's data is in 179 + * handle - zpool allocation handle that stores the compressed page data 132 180 */ 133 181 struct zswap_entry { 134 182 struct rb_node rbnode; 135 183 pgoff_t offset; 136 184 int refcount; 137 185 unsigned int length; 186 + struct zswap_pool *pool; 138 187 unsigned long handle; 139 188 }; 140 189 ··· 155 200 }; 156 201 157 202 static struct zswap_tree *zswap_trees[MAX_SWAPFILES]; 203 + 204 + /* RCU-protected iteration */ 205 + static LIST_HEAD(zswap_pools); 206 + /* protects zswap_pools list modification */ 207 + static DEFINE_SPINLOCK(zswap_pools_lock); 208 + 209 + /********************************* 210 + * helpers and fwd declarations 211 + **********************************/ 212 + 213 + #define zswap_pool_debug(msg, p) \ 214 + pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \ 215 + zpool_get_type((p)->zpool)) 216 + 217 + static int zswap_writeback_entry(struct zpool *pool, unsigned long handle); 218 + static int zswap_pool_get(struct zswap_pool *pool); 219 + static void zswap_pool_put(struct zswap_pool *pool); 220 + 221 + static const struct zpool_ops zswap_zpool_ops = { 222 + .evict = zswap_writeback_entry 223 + }; 224 + 225 + static bool zswap_is_full(void) 226 + { 227 + return totalram_pages * zswap_max_pool_percent / 100 < 228 + DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE); 229 + } 230 + 231 + static void zswap_update_total_size(void) 232 + { 233 + struct zswap_pool *pool; 234 + u64 total = 0; 235 + 236 + rcu_read_lock(); 237 + 238 + list_for_each_entry_rcu(pool, &zswap_pools, list) 239 + total += zpool_get_total_size(pool->zpool); 240 + 241 + rcu_read_unlock(); 242 + 243 + zswap_pool_total_size = total; 244 + } 158 245 159 246 /********************************* 160 247 * zswap entry functions ··· 291 294 */ 292 295 static void zswap_free_entry(struct zswap_entry *entry) 293 296 { 294 - zpool_free(zswap_pool, entry->handle); 297 + zpool_free(entry->pool->zpool, entry->handle); 298 + zswap_pool_put(entry->pool); 295 299 zswap_entry_cache_free(entry); 296 300 atomic_dec(&zswap_stored_pages); 297 - zswap_pool_total_size = zpool_get_total_size(zswap_pool); 301 + zswap_update_total_size(); 298 302 } 299 303 300 304 /* caller must hold the tree lock */ ··· 337 339 **********************************/ 338 340 static DEFINE_PER_CPU(u8 *, zswap_dstmem); 339 341 340 - static int __zswap_cpu_notifier(unsigned long action, unsigned long cpu) 342 + static int __zswap_cpu_dstmem_notifier(unsigned long action, unsigned long cpu) 341 343 { 342 - struct crypto_comp *tfm; 343 344 u8 *dst; 344 345 345 346 switch (action) { 346 347 case CPU_UP_PREPARE: 347 - tfm = crypto_alloc_comp(zswap_compressor, 0, 0); 348 - if (IS_ERR(tfm)) { 349 - pr_err("can't allocate compressor transform\n"); 350 - return NOTIFY_BAD; 351 - } 352 - *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = tfm; 353 348 dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu)); 354 349 if (!dst) { 355 350 pr_err("can't allocate compressor buffer\n"); 356 - crypto_free_comp(tfm); 357 - *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL; 358 351 return NOTIFY_BAD; 359 352 } 360 353 per_cpu(zswap_dstmem, cpu) = dst; 361 354 break; 362 355 case CPU_DEAD: 363 356 case CPU_UP_CANCELED: 364 - tfm = *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu); 365 - if (tfm) { 366 - crypto_free_comp(tfm); 367 - *per_cpu_ptr(zswap_comp_pcpu_tfms, cpu) = NULL; 368 - } 369 357 dst = per_cpu(zswap_dstmem, cpu); 370 358 kfree(dst); 371 359 per_cpu(zswap_dstmem, cpu) = NULL; ··· 362 378 return NOTIFY_OK; 363 379 } 364 380 365 - static int zswap_cpu_notifier(struct notifier_block *nb, 366 - unsigned long action, void *pcpu) 381 + static int zswap_cpu_dstmem_notifier(struct notifier_block *nb, 382 + unsigned long action, void *pcpu) 367 383 { 368 - unsigned long cpu = (unsigned long)pcpu; 369 - return __zswap_cpu_notifier(action, cpu); 384 + return __zswap_cpu_dstmem_notifier(action, (unsigned long)pcpu); 370 385 } 371 386 372 - static struct notifier_block zswap_cpu_notifier_block = { 373 - .notifier_call = zswap_cpu_notifier 387 + static struct notifier_block zswap_dstmem_notifier = { 388 + .notifier_call = zswap_cpu_dstmem_notifier, 374 389 }; 375 390 376 - static int __init zswap_cpu_init(void) 391 + static int __init zswap_cpu_dstmem_init(void) 377 392 { 378 393 unsigned long cpu; 379 394 380 395 cpu_notifier_register_begin(); 381 396 for_each_online_cpu(cpu) 382 - if (__zswap_cpu_notifier(CPU_UP_PREPARE, cpu) != NOTIFY_OK) 397 + if (__zswap_cpu_dstmem_notifier(CPU_UP_PREPARE, cpu) == 398 + NOTIFY_BAD) 383 399 goto cleanup; 384 - __register_cpu_notifier(&zswap_cpu_notifier_block); 400 + __register_cpu_notifier(&zswap_dstmem_notifier); 385 401 cpu_notifier_register_done(); 386 402 return 0; 387 403 388 404 cleanup: 389 405 for_each_online_cpu(cpu) 390 - __zswap_cpu_notifier(CPU_UP_CANCELED, cpu); 406 + __zswap_cpu_dstmem_notifier(CPU_UP_CANCELED, cpu); 391 407 cpu_notifier_register_done(); 392 408 return -ENOMEM; 393 409 } 394 410 395 - /********************************* 396 - * helpers 397 - **********************************/ 398 - static bool zswap_is_full(void) 411 + static void zswap_cpu_dstmem_destroy(void) 399 412 { 400 - return totalram_pages * zswap_max_pool_percent / 100 < 401 - DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE); 413 + unsigned long cpu; 414 + 415 + cpu_notifier_register_begin(); 416 + for_each_online_cpu(cpu) 417 + __zswap_cpu_dstmem_notifier(CPU_UP_CANCELED, cpu); 418 + __unregister_cpu_notifier(&zswap_dstmem_notifier); 419 + cpu_notifier_register_done(); 420 + } 421 + 422 + static int __zswap_cpu_comp_notifier(struct zswap_pool *pool, 423 + unsigned long action, unsigned long cpu) 424 + { 425 + struct crypto_comp *tfm; 426 + 427 + switch (action) { 428 + case CPU_UP_PREPARE: 429 + if (WARN_ON(*per_cpu_ptr(pool->tfm, cpu))) 430 + break; 431 + tfm = crypto_alloc_comp(pool->tfm_name, 0, 0); 432 + if (IS_ERR_OR_NULL(tfm)) { 433 + pr_err("could not alloc crypto comp %s : %ld\n", 434 + pool->tfm_name, PTR_ERR(tfm)); 435 + return NOTIFY_BAD; 436 + } 437 + *per_cpu_ptr(pool->tfm, cpu) = tfm; 438 + break; 439 + case CPU_DEAD: 440 + case CPU_UP_CANCELED: 441 + tfm = *per_cpu_ptr(pool->tfm, cpu); 442 + if (!IS_ERR_OR_NULL(tfm)) 443 + crypto_free_comp(tfm); 444 + *per_cpu_ptr(pool->tfm, cpu) = NULL; 445 + break; 446 + default: 447 + break; 448 + } 449 + return NOTIFY_OK; 450 + } 451 + 452 + static int zswap_cpu_comp_notifier(struct notifier_block *nb, 453 + unsigned long action, void *pcpu) 454 + { 455 + unsigned long cpu = (unsigned long)pcpu; 456 + struct zswap_pool *pool = container_of(nb, typeof(*pool), notifier); 457 + 458 + return __zswap_cpu_comp_notifier(pool, action, cpu); 459 + } 460 + 461 + static int zswap_cpu_comp_init(struct zswap_pool *pool) 462 + { 463 + unsigned long cpu; 464 + 465 + memset(&pool->notifier, 0, sizeof(pool->notifier)); 466 + pool->notifier.notifier_call = zswap_cpu_comp_notifier; 467 + 468 + cpu_notifier_register_begin(); 469 + for_each_online_cpu(cpu) 470 + if (__zswap_cpu_comp_notifier(pool, CPU_UP_PREPARE, cpu) == 471 + NOTIFY_BAD) 472 + goto cleanup; 473 + __register_cpu_notifier(&pool->notifier); 474 + cpu_notifier_register_done(); 475 + return 0; 476 + 477 + cleanup: 478 + for_each_online_cpu(cpu) 479 + __zswap_cpu_comp_notifier(pool, CPU_UP_CANCELED, cpu); 480 + cpu_notifier_register_done(); 481 + return -ENOMEM; 482 + } 483 + 484 + static void zswap_cpu_comp_destroy(struct zswap_pool *pool) 485 + { 486 + unsigned long cpu; 487 + 488 + cpu_notifier_register_begin(); 489 + for_each_online_cpu(cpu) 490 + __zswap_cpu_comp_notifier(pool, CPU_UP_CANCELED, cpu); 491 + __unregister_cpu_notifier(&pool->notifier); 492 + cpu_notifier_register_done(); 493 + } 494 + 495 + /********************************* 496 + * pool functions 497 + **********************************/ 498 + 499 + static struct zswap_pool *__zswap_pool_current(void) 500 + { 501 + struct zswap_pool *pool; 502 + 503 + pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list); 504 + WARN_ON(!pool); 505 + 506 + return pool; 507 + } 508 + 509 + static struct zswap_pool *zswap_pool_current(void) 510 + { 511 + assert_spin_locked(&zswap_pools_lock); 512 + 513 + return __zswap_pool_current(); 514 + } 515 + 516 + static struct zswap_pool *zswap_pool_current_get(void) 517 + { 518 + struct zswap_pool *pool; 519 + 520 + rcu_read_lock(); 521 + 522 + pool = __zswap_pool_current(); 523 + if (!pool || !zswap_pool_get(pool)) 524 + pool = NULL; 525 + 526 + rcu_read_unlock(); 527 + 528 + return pool; 529 + } 530 + 531 + static struct zswap_pool *zswap_pool_last_get(void) 532 + { 533 + struct zswap_pool *pool, *last = NULL; 534 + 535 + rcu_read_lock(); 536 + 537 + list_for_each_entry_rcu(pool, &zswap_pools, list) 538 + last = pool; 539 + if (!WARN_ON(!last) && !zswap_pool_get(last)) 540 + last = NULL; 541 + 542 + rcu_read_unlock(); 543 + 544 + return last; 545 + } 546 + 547 + static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) 548 + { 549 + struct zswap_pool *pool; 550 + 551 + assert_spin_locked(&zswap_pools_lock); 552 + 553 + list_for_each_entry_rcu(pool, &zswap_pools, list) { 554 + if (strncmp(pool->tfm_name, compressor, sizeof(pool->tfm_name))) 555 + continue; 556 + if (strncmp(zpool_get_type(pool->zpool), type, 557 + sizeof(zswap_zpool_type))) 558 + continue; 559 + /* if we can't get it, it's about to be destroyed */ 560 + if (!zswap_pool_get(pool)) 561 + continue; 562 + return pool; 563 + } 564 + 565 + return NULL; 566 + } 567 + 568 + static struct zswap_pool *zswap_pool_create(char *type, char *compressor) 569 + { 570 + struct zswap_pool *pool; 571 + gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN; 572 + 573 + pool = kzalloc(sizeof(*pool), GFP_KERNEL); 574 + if (!pool) { 575 + pr_err("pool alloc failed\n"); 576 + return NULL; 577 + } 578 + 579 + pool->zpool = zpool_create_pool(type, "zswap", gfp, &zswap_zpool_ops); 580 + if (!pool->zpool) { 581 + pr_err("%s zpool not available\n", type); 582 + goto error; 583 + } 584 + pr_debug("using %s zpool\n", zpool_get_type(pool->zpool)); 585 + 586 + strlcpy(pool->tfm_name, compressor, sizeof(pool->tfm_name)); 587 + pool->tfm = alloc_percpu(struct crypto_comp *); 588 + if (!pool->tfm) { 589 + pr_err("percpu alloc failed\n"); 590 + goto error; 591 + } 592 + 593 + if (zswap_cpu_comp_init(pool)) 594 + goto error; 595 + pr_debug("using %s compressor\n", pool->tfm_name); 596 + 597 + /* being the current pool takes 1 ref; this func expects the 598 + * caller to always add the new pool as the current pool 599 + */ 600 + kref_init(&pool->kref); 601 + INIT_LIST_HEAD(&pool->list); 602 + 603 + zswap_pool_debug("created", pool); 604 + 605 + return pool; 606 + 607 + error: 608 + free_percpu(pool->tfm); 609 + if (pool->zpool) 610 + zpool_destroy_pool(pool->zpool); 611 + kfree(pool); 612 + return NULL; 613 + } 614 + 615 + static struct zswap_pool *__zswap_pool_create_fallback(void) 616 + { 617 + if (!crypto_has_comp(zswap_compressor, 0, 0)) { 618 + pr_err("compressor %s not available, using default %s\n", 619 + zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT); 620 + strncpy(zswap_compressor, ZSWAP_COMPRESSOR_DEFAULT, 621 + sizeof(zswap_compressor)); 622 + } 623 + if (!zpool_has_pool(zswap_zpool_type)) { 624 + pr_err("zpool %s not available, using default %s\n", 625 + zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT); 626 + strncpy(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT, 627 + sizeof(zswap_zpool_type)); 628 + } 629 + 630 + return zswap_pool_create(zswap_zpool_type, zswap_compressor); 631 + } 632 + 633 + static void zswap_pool_destroy(struct zswap_pool *pool) 634 + { 635 + zswap_pool_debug("destroying", pool); 636 + 637 + zswap_cpu_comp_destroy(pool); 638 + free_percpu(pool->tfm); 639 + zpool_destroy_pool(pool->zpool); 640 + kfree(pool); 641 + } 642 + 643 + static int __must_check zswap_pool_get(struct zswap_pool *pool) 644 + { 645 + return kref_get_unless_zero(&pool->kref); 646 + } 647 + 648 + static void __zswap_pool_release(struct rcu_head *head) 649 + { 650 + struct zswap_pool *pool = container_of(head, typeof(*pool), rcu_head); 651 + 652 + /* nobody should have been able to get a kref... */ 653 + WARN_ON(kref_get_unless_zero(&pool->kref)); 654 + 655 + /* pool is now off zswap_pools list and has no references. */ 656 + zswap_pool_destroy(pool); 657 + } 658 + 659 + static void __zswap_pool_empty(struct kref *kref) 660 + { 661 + struct zswap_pool *pool; 662 + 663 + pool = container_of(kref, typeof(*pool), kref); 664 + 665 + spin_lock(&zswap_pools_lock); 666 + 667 + WARN_ON(pool == zswap_pool_current()); 668 + 669 + list_del_rcu(&pool->list); 670 + call_rcu(&pool->rcu_head, __zswap_pool_release); 671 + 672 + spin_unlock(&zswap_pools_lock); 673 + } 674 + 675 + static void zswap_pool_put(struct zswap_pool *pool) 676 + { 677 + kref_put(&pool->kref, __zswap_pool_empty); 402 678 } 403 679 404 680 /********************************* ··· 721 477 pgoff_t offset; 722 478 struct zswap_entry *entry; 723 479 struct page *page; 480 + struct crypto_comp *tfm; 724 481 u8 *src, *dst; 725 482 unsigned int dlen; 726 483 int ret; ··· 762 517 case ZSWAP_SWAPCACHE_NEW: /* page is locked */ 763 518 /* decompress */ 764 519 dlen = PAGE_SIZE; 765 - src = (u8 *)zpool_map_handle(zswap_pool, entry->handle, 520 + src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle, 766 521 ZPOOL_MM_RO) + sizeof(struct zswap_header); 767 522 dst = kmap_atomic(page); 768 - ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, 769 - entry->length, dst, &dlen); 523 + tfm = *get_cpu_ptr(entry->pool->tfm); 524 + ret = crypto_comp_decompress(tfm, src, entry->length, 525 + dst, &dlen); 526 + put_cpu_ptr(entry->pool->tfm); 770 527 kunmap_atomic(dst); 771 - zpool_unmap_handle(zswap_pool, entry->handle); 528 + zpool_unmap_handle(entry->pool->zpool, entry->handle); 772 529 BUG_ON(ret); 773 530 BUG_ON(dlen != PAGE_SIZE); 774 531 ··· 819 572 return ret; 820 573 } 821 574 575 + static int zswap_shrink(void) 576 + { 577 + struct zswap_pool *pool; 578 + int ret; 579 + 580 + pool = zswap_pool_last_get(); 581 + if (!pool) 582 + return -ENOENT; 583 + 584 + ret = zpool_shrink(pool->zpool, 1, NULL); 585 + 586 + zswap_pool_put(pool); 587 + 588 + return ret; 589 + } 590 + 822 591 /********************************* 823 592 * frontswap hooks 824 593 **********************************/ ··· 844 581 { 845 582 struct zswap_tree *tree = zswap_trees[type]; 846 583 struct zswap_entry *entry, *dupentry; 584 + struct crypto_comp *tfm; 847 585 int ret; 848 586 unsigned int dlen = PAGE_SIZE, len; 849 587 unsigned long handle; ··· 860 596 /* reclaim space if needed */ 861 597 if (zswap_is_full()) { 862 598 zswap_pool_limit_hit++; 863 - if (zpool_shrink(zswap_pool, 1, NULL)) { 599 + if (zswap_shrink()) { 864 600 zswap_reject_reclaim_fail++; 865 601 ret = -ENOMEM; 866 602 goto reject; ··· 875 611 goto reject; 876 612 } 877 613 878 - /* compress */ 879 - dst = get_cpu_var(zswap_dstmem); 880 - src = kmap_atomic(page); 881 - ret = zswap_comp_op(ZSWAP_COMPOP_COMPRESS, src, PAGE_SIZE, dst, &dlen); 882 - kunmap_atomic(src); 883 - if (ret) { 614 + /* if entry is successfully added, it keeps the reference */ 615 + entry->pool = zswap_pool_current_get(); 616 + if (!entry->pool) { 884 617 ret = -EINVAL; 885 618 goto freepage; 886 619 } 887 620 621 + /* compress */ 622 + dst = get_cpu_var(zswap_dstmem); 623 + tfm = *get_cpu_ptr(entry->pool->tfm); 624 + src = kmap_atomic(page); 625 + ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen); 626 + kunmap_atomic(src); 627 + put_cpu_ptr(entry->pool->tfm); 628 + if (ret) { 629 + ret = -EINVAL; 630 + goto put_dstmem; 631 + } 632 + 888 633 /* store */ 889 634 len = dlen + sizeof(struct zswap_header); 890 - ret = zpool_malloc(zswap_pool, len, __GFP_NORETRY | __GFP_NOWARN, 891 - &handle); 635 + ret = zpool_malloc(entry->pool->zpool, len, 636 + __GFP_NORETRY | __GFP_NOWARN, &handle); 892 637 if (ret == -ENOSPC) { 893 638 zswap_reject_compress_poor++; 894 - goto freepage; 639 + goto put_dstmem; 895 640 } 896 641 if (ret) { 897 642 zswap_reject_alloc_fail++; 898 - goto freepage; 643 + goto put_dstmem; 899 644 } 900 - zhdr = zpool_map_handle(zswap_pool, handle, ZPOOL_MM_RW); 645 + zhdr = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_RW); 901 646 zhdr->swpentry = swp_entry(type, offset); 902 647 buf = (u8 *)(zhdr + 1); 903 648 memcpy(buf, dst, dlen); 904 - zpool_unmap_handle(zswap_pool, handle); 649 + zpool_unmap_handle(entry->pool->zpool, handle); 905 650 put_cpu_var(zswap_dstmem); 906 651 907 652 /* populate entry */ ··· 933 660 934 661 /* update stats */ 935 662 atomic_inc(&zswap_stored_pages); 936 - zswap_pool_total_size = zpool_get_total_size(zswap_pool); 663 + zswap_update_total_size(); 937 664 938 665 return 0; 939 666 940 - freepage: 667 + put_dstmem: 941 668 put_cpu_var(zswap_dstmem); 669 + zswap_pool_put(entry->pool); 670 + freepage: 942 671 zswap_entry_cache_free(entry); 943 672 reject: 944 673 return ret; ··· 955 680 { 956 681 struct zswap_tree *tree = zswap_trees[type]; 957 682 struct zswap_entry *entry; 683 + struct crypto_comp *tfm; 958 684 u8 *src, *dst; 959 685 unsigned int dlen; 960 686 int ret; ··· 972 696 973 697 /* decompress */ 974 698 dlen = PAGE_SIZE; 975 - src = (u8 *)zpool_map_handle(zswap_pool, entry->handle, 699 + src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle, 976 700 ZPOOL_MM_RO) + sizeof(struct zswap_header); 977 701 dst = kmap_atomic(page); 978 - ret = zswap_comp_op(ZSWAP_COMPOP_DECOMPRESS, src, entry->length, 979 - dst, &dlen); 702 + tfm = *get_cpu_ptr(entry->pool->tfm); 703 + ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen); 704 + put_cpu_ptr(entry->pool->tfm); 980 705 kunmap_atomic(dst); 981 - zpool_unmap_handle(zswap_pool, entry->handle); 706 + zpool_unmap_handle(entry->pool->zpool, entry->handle); 982 707 BUG_ON(ret); 983 708 984 709 spin_lock(&tree->lock); ··· 1031 754 kfree(tree); 1032 755 zswap_trees[type] = NULL; 1033 756 } 1034 - 1035 - static const struct zpool_ops zswap_zpool_ops = { 1036 - .evict = zswap_writeback_entry 1037 - }; 1038 757 1039 758 static void zswap_frontswap_init(unsigned type) 1040 759 { ··· 1112 839 **********************************/ 1113 840 static int __init init_zswap(void) 1114 841 { 1115 - gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN; 1116 - 1117 - pr_info("loading zswap\n"); 1118 - 1119 - zswap_pool = zpool_create_pool(zswap_zpool_type, "zswap", gfp, 1120 - &zswap_zpool_ops); 1121 - if (!zswap_pool && strcmp(zswap_zpool_type, ZSWAP_ZPOOL_DEFAULT)) { 1122 - pr_info("%s zpool not available\n", zswap_zpool_type); 1123 - zswap_zpool_type = ZSWAP_ZPOOL_DEFAULT; 1124 - zswap_pool = zpool_create_pool(zswap_zpool_type, "zswap", gfp, 1125 - &zswap_zpool_ops); 1126 - } 1127 - if (!zswap_pool) { 1128 - pr_err("%s zpool not available\n", zswap_zpool_type); 1129 - pr_err("zpool creation failed\n"); 1130 - goto error; 1131 - } 1132 - pr_info("using %s pool\n", zswap_zpool_type); 842 + struct zswap_pool *pool; 1133 843 1134 844 if (zswap_entry_cache_create()) { 1135 845 pr_err("entry cache creation failed\n"); 1136 - goto cachefail; 846 + goto cache_fail; 1137 847 } 1138 - if (zswap_comp_init()) { 1139 - pr_err("compressor initialization failed\n"); 1140 - goto compfail; 848 + 849 + if (zswap_cpu_dstmem_init()) { 850 + pr_err("dstmem alloc failed\n"); 851 + goto dstmem_fail; 1141 852 } 1142 - if (zswap_cpu_init()) { 1143 - pr_err("per-cpu initialization failed\n"); 1144 - goto pcpufail; 853 + 854 + pool = __zswap_pool_create_fallback(); 855 + if (!pool) { 856 + pr_err("pool creation failed\n"); 857 + goto pool_fail; 1145 858 } 859 + pr_info("loaded using pool %s/%s\n", pool->tfm_name, 860 + zpool_get_type(pool->zpool)); 861 + 862 + list_add(&pool->list, &zswap_pools); 1146 863 1147 864 frontswap_register_ops(&zswap_frontswap_ops); 1148 865 if (zswap_debugfs_init()) 1149 866 pr_warn("debugfs initialization failed\n"); 1150 867 return 0; 1151 - pcpufail: 1152 - zswap_comp_exit(); 1153 - compfail: 868 + 869 + pool_fail: 870 + zswap_cpu_dstmem_destroy(); 871 + dstmem_fail: 1154 872 zswap_entry_cache_destroy(); 1155 - cachefail: 1156 - zpool_destroy_pool(zswap_pool); 1157 - error: 873 + cache_fail: 1158 874 return -ENOMEM; 1159 875 } 1160 876 /* must be late so crypto has time to come up */