Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

zsmalloc/zram: introduce zs_pool_stats api

`zs_compact_control' accounts the number of migrated objects but it has
a limited lifespan -- we lose it as soon as zs_compaction() returns back
to zram. It worked fine, because (a) zram had it's own counter of
migrated objects and (b) only zram could trigger compaction. However,
this does not work for automatic pool compaction (not issued by zram).
To account objects migrated during auto-compaction (issued by the
shrinker) we need to store this number in zs_pool.

Define a new `struct zs_pool_stats' structure to keep zs_pool's stats
there. It provides only `num_migrated', as of this writing, but it
surely can be extended.

A new zsmalloc zs_pool_stats() symbol exports zs_pool's stats back to
caller.

Use zs_pool_stats() in zram and remove `num_migrated' from zram_stats.

Signed-off-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Suggested-by: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Sergey Senozhatsky and committed by
Linus Torvalds
7d3f3938 0dc63d48

+30 -21
+9 -6
drivers/block/zram/zram_drv.c
··· 388 388 static ssize_t compact_store(struct device *dev, 389 389 struct device_attribute *attr, const char *buf, size_t len) 390 390 { 391 - unsigned long nr_migrated; 392 391 struct zram *zram = dev_to_zram(dev); 393 392 struct zram_meta *meta; 394 393 ··· 398 399 } 399 400 400 401 meta = zram->meta; 401 - nr_migrated = zs_compact(meta->mem_pool); 402 - atomic64_add(nr_migrated, &zram->stats.num_migrated); 402 + zs_compact(meta->mem_pool); 403 403 up_read(&zram->init_lock); 404 404 405 405 return len; ··· 426 428 struct device_attribute *attr, char *buf) 427 429 { 428 430 struct zram *zram = dev_to_zram(dev); 431 + struct zs_pool_stats pool_stats; 429 432 u64 orig_size, mem_used = 0; 430 433 long max_used; 431 434 ssize_t ret; 432 435 436 + memset(&pool_stats, 0x00, sizeof(struct zs_pool_stats)); 437 + 433 438 down_read(&zram->init_lock); 434 - if (init_done(zram)) 439 + if (init_done(zram)) { 435 440 mem_used = zs_get_total_pages(zram->meta->mem_pool); 441 + zs_pool_stats(zram->meta->mem_pool, &pool_stats); 442 + } 436 443 437 444 orig_size = atomic64_read(&zram->stats.pages_stored); 438 445 max_used = atomic_long_read(&zram->stats.max_used_pages); 439 446 440 447 ret = scnprintf(buf, PAGE_SIZE, 441 - "%8llu %8llu %8llu %8lu %8ld %8llu %8llu\n", 448 + "%8llu %8llu %8llu %8lu %8ld %8llu %8lu\n", 442 449 orig_size << PAGE_SHIFT, 443 450 (u64)atomic64_read(&zram->stats.compr_data_size), 444 451 mem_used << PAGE_SHIFT, 445 452 zram->limit_pages << PAGE_SHIFT, 446 453 max_used << PAGE_SHIFT, 447 454 (u64)atomic64_read(&zram->stats.zero_pages), 448 - (u64)atomic64_read(&zram->stats.num_migrated)); 455 + pool_stats.num_migrated); 449 456 up_read(&zram->init_lock); 450 457 451 458 return ret;
-1
drivers/block/zram/zram_drv.h
··· 78 78 atomic64_t compr_data_size; /* compressed size of pages stored */ 79 79 atomic64_t num_reads; /* failed + successful */ 80 80 atomic64_t num_writes; /* --do-- */ 81 - atomic64_t num_migrated; /* no. of migrated object */ 82 81 atomic64_t failed_reads; /* can happen when memory is too low */ 83 82 atomic64_t failed_writes; /* can happen when memory is too low */ 84 83 atomic64_t invalid_io; /* non-page-aligned I/O requests */
+6
include/linux/zsmalloc.h
··· 34 34 */ 35 35 }; 36 36 37 + struct zs_pool_stats { 38 + /* How many objects were migrated */ 39 + unsigned long num_migrated; 40 + }; 41 + 37 42 struct zs_pool; 38 43 39 44 struct zs_pool *zs_create_pool(char *name, gfp_t flags); ··· 54 49 unsigned long zs_get_total_pages(struct zs_pool *pool); 55 50 unsigned long zs_compact(struct zs_pool *pool); 56 51 52 + void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats); 57 53 #endif
+15 -14
mm/zsmalloc.c
··· 245 245 gfp_t flags; /* allocation flags used when growing pool */ 246 246 atomic_long_t pages_allocated; 247 247 248 + struct zs_pool_stats stats; 248 249 #ifdef CONFIG_ZSMALLOC_STAT 249 250 struct dentry *stat_dentry; 250 251 #endif ··· 1579 1578 /* Starting object index within @s_page which used for live object 1580 1579 * in the subpage. */ 1581 1580 int index; 1582 - /* how many of objects are migrated */ 1581 + /* How many of objects were migrated */ 1583 1582 int nr_migrated; 1584 1583 }; 1585 1584 ··· 1591 1590 struct page *s_page = cc->s_page; 1592 1591 struct page *d_page = cc->d_page; 1593 1592 unsigned long index = cc->index; 1594 - int nr_migrated = 0; 1595 1593 int ret = 0; 1596 1594 1597 1595 while (1) { ··· 1617 1617 record_obj(handle, free_obj); 1618 1618 unpin_tag(handle); 1619 1619 obj_free(pool, class, used_obj); 1620 - nr_migrated++; 1620 + cc->nr_migrated++; 1621 1621 } 1622 1622 1623 1623 /* Remember last position in this iteration */ 1624 1624 cc->s_page = s_page; 1625 1625 cc->index = index; 1626 - cc->nr_migrated = nr_migrated; 1627 1626 1628 1627 return ret; 1629 1628 } ··· 1698 1699 return obj_wasted * get_pages_per_zspage(class->size); 1699 1700 } 1700 1701 1701 - static unsigned long __zs_compact(struct zs_pool *pool, 1702 - struct size_class *class) 1702 + static void __zs_compact(struct zs_pool *pool, struct size_class *class) 1703 1703 { 1704 1704 struct zs_compact_control cc; 1705 1705 struct page *src_page; 1706 1706 struct page *dst_page = NULL; 1707 - unsigned long nr_total_migrated = 0; 1708 1707 1708 + cc.nr_migrated = 0; 1709 1709 spin_lock(&class->lock); 1710 1710 while ((src_page = isolate_source_page(class))) { 1711 1711 ··· 1726 1728 break; 1727 1729 1728 1730 putback_zspage(pool, class, dst_page); 1729 - nr_total_migrated += cc.nr_migrated; 1730 1731 } 1731 1732 1732 1733 /* Stop if we couldn't find slot */ ··· 1735 1738 putback_zspage(pool, class, dst_page); 1736 1739 putback_zspage(pool, class, src_page); 1737 1740 spin_unlock(&class->lock); 1738 - nr_total_migrated += cc.nr_migrated; 1739 1741 cond_resched(); 1740 1742 spin_lock(&class->lock); 1741 1743 } ··· 1742 1746 if (src_page) 1743 1747 putback_zspage(pool, class, src_page); 1744 1748 1745 - spin_unlock(&class->lock); 1749 + pool->stats.num_migrated += cc.nr_migrated; 1746 1750 1747 - return nr_total_migrated; 1751 + spin_unlock(&class->lock); 1748 1752 } 1749 1753 1750 1754 unsigned long zs_compact(struct zs_pool *pool) 1751 1755 { 1752 1756 int i; 1753 - unsigned long nr_migrated = 0; 1754 1757 struct size_class *class; 1755 1758 1756 1759 for (i = zs_size_classes - 1; i >= 0; i--) { ··· 1758 1763 continue; 1759 1764 if (class->index != i) 1760 1765 continue; 1761 - nr_migrated += __zs_compact(pool, class); 1766 + __zs_compact(pool, class); 1762 1767 } 1763 1768 1764 - return nr_migrated; 1769 + return pool->stats.num_migrated; 1765 1770 } 1766 1771 EXPORT_SYMBOL_GPL(zs_compact); 1772 + 1773 + void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats) 1774 + { 1775 + memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats)); 1776 + } 1777 + EXPORT_SYMBOL_GPL(zs_pool_stats); 1767 1778 1768 1779 /** 1769 1780 * zs_create_pool - Creates an allocation pool to work from.