Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

zsmalloc: move huge compressed obj from page to zspage

The flag aims for zspage, not per page. Let's move it to zspage.

Link: https://lkml.kernel.org/r/20211115185909.3949505-6-minchan@kernel.org
Signed-off-by: Minchan Kim <minchan@kernel.org>
Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Mike Galbraith <umgwanakikbuti@gmail.com>
Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Minchan Kim and committed by
Linus Torvalds
a41ec880 3ae92ac2

+26 -24
+26 -24
mm/zsmalloc.c
··· 121 121 #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS) 122 122 #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1) 123 123 124 + #define HUGE_BITS 1 124 125 #define FULLNESS_BITS 2 125 126 #define CLASS_BITS 8 126 127 #define ISOLATED_BITS 3 ··· 214 213 struct zs_size_stat stats; 215 214 }; 216 215 217 - /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */ 218 - static void SetPageHugeObject(struct page *page) 219 - { 220 - SetPageOwnerPriv1(page); 221 - } 222 - 223 - static void ClearPageHugeObject(struct page *page) 224 - { 225 - ClearPageOwnerPriv1(page); 226 - } 227 - 228 - static int PageHugeObject(struct page *page) 229 - { 230 - return PageOwnerPriv1(page); 231 - } 232 - 233 216 /* 234 217 * Placed within free objects to form a singly linked list. 235 218 * For every zspage, zspage->freeobj gives head of this list. ··· 263 278 264 279 struct zspage { 265 280 struct { 281 + unsigned int huge:HUGE_BITS; 266 282 unsigned int fullness:FULLNESS_BITS; 267 283 unsigned int class:CLASS_BITS + 1; 268 284 unsigned int isolated:ISOLATED_BITS; ··· 283 297 char *vm_addr; /* address of kmap_atomic()'ed pages */ 284 298 enum zs_mapmode vm_mm; /* mapping mode */ 285 299 }; 300 + 301 + /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */ 302 + static void SetZsHugePage(struct zspage *zspage) 303 + { 304 + zspage->huge = 1; 305 + } 306 + 307 + static bool ZsHugePage(struct zspage *zspage) 308 + { 309 + return zspage->huge; 310 + } 286 311 287 312 #ifdef CONFIG_COMPACTION 288 313 static int zs_register_migration(struct zs_pool *pool); ··· 827 830 828 831 static struct page *get_next_page(struct page *page) 829 832 { 830 - if (unlikely(PageHugeObject(page))) 833 + struct zspage *zspage = get_zspage(page); 834 + 835 + if (unlikely(ZsHugePage(zspage))) 831 836 return NULL; 832 837 833 838 return (struct page *)page->index; ··· 879 880 static bool obj_allocated(struct page *page, void *obj, unsigned long *phandle) 880 881 { 881 882 unsigned long handle; 883 + struct zspage *zspage = get_zspage(page); 882 884 883 - if (unlikely(PageHugeObject(page))) { 885 + if (unlikely(ZsHugePage(zspage))) { 884 886 VM_BUG_ON_PAGE(!is_first_page(page), page); 885 887 handle = page->index; 886 888 } else ··· 920 920 ClearPagePrivate(page); 921 921 set_page_private(page, 0); 922 922 page_mapcount_reset(page); 923 - ClearPageHugeObject(page); 924 923 page->index = 0; 925 924 } 926 925 ··· 1061 1062 SetPagePrivate(page); 1062 1063 if (unlikely(class->objs_per_zspage == 1 && 1063 1064 class->pages_per_zspage == 1)) 1064 - SetPageHugeObject(page); 1065 + SetZsHugePage(zspage); 1065 1066 } else { 1066 1067 prev_page->index = (unsigned long)page; 1067 1068 } ··· 1306 1307 1307 1308 ret = __zs_map_object(area, pages, off, class->size); 1308 1309 out: 1309 - if (likely(!PageHugeObject(page))) 1310 + if (likely(!ZsHugePage(zspage))) 1310 1311 ret += ZS_HANDLE_SIZE; 1311 1312 1312 1313 return ret; ··· 1394 1395 vaddr = kmap_atomic(m_page); 1395 1396 link = (struct link_free *)vaddr + m_offset / sizeof(*link); 1396 1397 set_freeobj(zspage, link->next >> OBJ_TAG_BITS); 1397 - if (likely(!PageHugeObject(m_page))) 1398 + if (likely(!ZsHugePage(zspage))) 1398 1399 /* record handle in the header of allocated chunk */ 1399 1400 link->handle = handle; 1400 1401 else ··· 1495 1496 1496 1497 /* Insert this object in containing zspage's freelist */ 1497 1498 link = (struct link_free *)(vaddr + f_offset); 1498 - link->next = get_freeobj(zspage) << OBJ_TAG_BITS; 1499 + if (likely(!ZsHugePage(zspage))) 1500 + link->next = get_freeobj(zspage) << OBJ_TAG_BITS; 1501 + else 1502 + f_page->index = 0; 1499 1503 kunmap_atomic(vaddr); 1500 1504 set_freeobj(zspage, f_objidx); 1501 1505 mod_zspage_inuse(zspage, -1); ··· 1869 1867 1870 1868 create_page_chain(class, zspage, pages); 1871 1869 set_first_obj_offset(newpage, get_first_obj_offset(oldpage)); 1872 - if (unlikely(PageHugeObject(oldpage))) 1870 + if (unlikely(ZsHugePage(zspage))) 1873 1871 newpage->index = oldpage->index; 1874 1872 __SetPageMovable(newpage, page_mapping(oldpage)); 1875 1873 }