Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: list_lru: rename list_lru_per_memcg to list_lru_memcg

The name of list_lru_memcg was occupied before and became free since
last commit. Rename list_lru_per_memcg to list_lru_memcg since the name
is brief.

Link: https://lkml.kernel.org/r/20220228122126.37293-16-songmuchun@bytedance.com
Signed-off-by: Muchun Song <songmuchun@bytedance.com>
Cc: Alex Shi <alexs@kernel.org>
Cc: Anna Schumaker <Anna.Schumaker@Netapp.com>
Cc: Chao Yu <chao@kernel.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Fam Zheng <fam.zheng@bytedance.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Kari Argillander <kari.argillander@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Shakeel Butt <shakeelb@google.com>
Cc: Theodore Ts'o <tytso@mit.edu>
Cc: Trond Myklebust <trond.myklebust@hammerspace.com>
Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Wei Yang <richard.weiyang@gmail.com>
Cc: Xiongchun Duan <duanxiongchun@bytedance.com>
Cc: Yang Shi <shy828301@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Muchun Song and committed by
Linus Torvalds
d7011070 be740503

+10 -10
+1 -1
include/linux/list_lru.h
··· 32 32 long nr_items; 33 33 }; 34 34 35 - struct list_lru_per_memcg { 35 + struct list_lru_memcg { 36 36 struct rcu_head rcu; 37 37 /* array of per cgroup per node lists, indexed by node id */ 38 38 struct list_lru_one node[];
+9 -9
mm/list_lru.c
··· 53 53 list_lru_from_memcg_idx(struct list_lru *lru, int nid, int idx) 54 54 { 55 55 if (list_lru_memcg_aware(lru) && idx >= 0) { 56 - struct list_lru_per_memcg *mlru = xa_load(&lru->xa, idx); 56 + struct list_lru_memcg *mlru = xa_load(&lru->xa, idx); 57 57 58 58 return mlru ? &mlru->node[nid] : NULL; 59 59 } ··· 306 306 307 307 #ifdef CONFIG_MEMCG_KMEM 308 308 if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) { 309 - struct list_lru_per_memcg *mlru; 309 + struct list_lru_memcg *mlru; 310 310 unsigned long index; 311 311 312 312 xa_for_each(&lru->xa, index, mlru) { ··· 335 335 } 336 336 337 337 #ifdef CONFIG_MEMCG_KMEM 338 - static struct list_lru_per_memcg *memcg_init_list_lru_one(gfp_t gfp) 338 + static struct list_lru_memcg *memcg_init_list_lru_one(gfp_t gfp) 339 339 { 340 340 int nid; 341 - struct list_lru_per_memcg *mlru; 341 + struct list_lru_memcg *mlru; 342 342 343 343 mlru = kmalloc(struct_size(mlru, node, nr_node_ids), gfp); 344 344 if (!mlru) ··· 352 352 353 353 static void memcg_list_lru_free(struct list_lru *lru, int src_idx) 354 354 { 355 - struct list_lru_per_memcg *mlru = xa_erase_irq(&lru->xa, src_idx); 355 + struct list_lru_memcg *mlru = xa_erase_irq(&lru->xa, src_idx); 356 356 357 357 /* 358 358 * The __list_lru_walk_one() can walk the list of this node. ··· 374 374 static void memcg_destroy_list_lru(struct list_lru *lru) 375 375 { 376 376 XA_STATE(xas, &lru->xa, 0); 377 - struct list_lru_per_memcg *mlru; 377 + struct list_lru_memcg *mlru; 378 378 379 379 if (!list_lru_memcg_aware(lru)) 380 380 return; ··· 475 475 int i; 476 476 unsigned long flags; 477 477 struct list_lru_memcg_table { 478 - struct list_lru_per_memcg *mlru; 478 + struct list_lru_memcg *mlru; 479 479 struct mem_cgroup *memcg; 480 480 } *table; 481 481 XA_STATE(xas, &lru->xa, 0); ··· 491 491 /* 492 492 * Because the list_lru can be reparented to the parent cgroup's 493 493 * list_lru, we should make sure that this cgroup and all its 494 - * ancestors have allocated list_lru_per_memcg. 494 + * ancestors have allocated list_lru_memcg. 495 495 */ 496 496 for (i = 0; memcg; memcg = parent_mem_cgroup(memcg), i++) { 497 497 if (memcg_list_lru_allocated(memcg, lru)) ··· 510 510 xas_lock_irqsave(&xas, flags); 511 511 while (i--) { 512 512 int index = READ_ONCE(table[i].memcg->kmemcg_id); 513 - struct list_lru_per_memcg *mlru = table[i].mlru; 513 + struct list_lru_memcg *mlru = table[i].mlru; 514 514 515 515 xas_set(&xas, index); 516 516 retry: