at v6.8 9.3 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved. 4 * Authors: David Chinner and Glauber Costa 5 * 6 * Generic LRU infrastructure 7 */ 8#ifndef _LRU_LIST_H 9#define _LRU_LIST_H 10 11#include <linux/list.h> 12#include <linux/nodemask.h> 13#include <linux/shrinker.h> 14#include <linux/xarray.h> 15 16struct mem_cgroup; 17 18/* list_lru_walk_cb has to always return one of those */ 19enum lru_status { 20 LRU_REMOVED, /* item removed from list */ 21 LRU_REMOVED_RETRY, /* item removed, but lock has been 22 dropped and reacquired */ 23 LRU_ROTATE, /* item referenced, give another pass */ 24 LRU_SKIP, /* item cannot be locked, skip */ 25 LRU_RETRY, /* item not freeable. May drop the lock 26 internally, but has to return locked. */ 27}; 28 29struct list_lru_one { 30 struct list_head list; 31 /* may become negative during memcg reparenting */ 32 long nr_items; 33}; 34 35struct list_lru_memcg { 36 struct rcu_head rcu; 37 /* array of per cgroup per node lists, indexed by node id */ 38 struct list_lru_one node[]; 39}; 40 41struct list_lru_node { 42 /* protects all lists on the node, including per cgroup */ 43 spinlock_t lock; 44 /* global list, used for the root cgroup in cgroup aware lrus */ 45 struct list_lru_one lru; 46 long nr_items; 47} ____cacheline_aligned_in_smp; 48 49struct list_lru { 50 struct list_lru_node *node; 51#ifdef CONFIG_MEMCG_KMEM 52 struct list_head list; 53 int shrinker_id; 54 bool memcg_aware; 55 struct xarray xa; 56#endif 57}; 58 59void list_lru_destroy(struct list_lru *lru); 60int __list_lru_init(struct list_lru *lru, bool memcg_aware, 61 struct lock_class_key *key, struct shrinker *shrinker); 62 63#define list_lru_init(lru) \ 64 __list_lru_init((lru), false, NULL, NULL) 65#define list_lru_init_key(lru, key) \ 66 __list_lru_init((lru), false, (key), NULL) 67#define list_lru_init_memcg(lru, shrinker) \ 68 __list_lru_init((lru), true, NULL, shrinker) 69 70int memcg_list_lru_alloc(struct mem_cgroup *memcg, struct list_lru *lru, 71 gfp_t gfp); 72void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *parent); 73 74/** 75 * list_lru_add: add an element to the lru list's tail 76 * @lru: the lru pointer 77 * @item: the item to be added. 78 * @nid: the node id of the sublist to add the item to. 79 * @memcg: the cgroup of the sublist to add the item to. 80 * 81 * If the element is already part of a list, this function returns doing 82 * nothing. Therefore the caller does not need to keep state about whether or 83 * not the element already belongs in the list and is allowed to lazy update 84 * it. Note however that this is valid for *a* list, not *this* list. If 85 * the caller organize itself in a way that elements can be in more than 86 * one type of list, it is up to the caller to fully remove the item from 87 * the previous list (with list_lru_del() for instance) before moving it 88 * to @lru. 89 * 90 * Return: true if the list was updated, false otherwise 91 */ 92bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid, 93 struct mem_cgroup *memcg); 94 95/** 96 * list_lru_add_obj: add an element to the lru list's tail 97 * @lru: the lru pointer 98 * @item: the item to be added. 99 * 100 * This function is similar to list_lru_add(), but the NUMA node and the 101 * memcg of the sublist is determined by @item list_head. This assumption is 102 * valid for slab objects LRU such as dentries, inodes, etc. 103 * 104 * Return value: true if the list was updated, false otherwise 105 */ 106bool list_lru_add_obj(struct list_lru *lru, struct list_head *item); 107 108/** 109 * list_lru_del: delete an element from the lru list 110 * @lru: the lru pointer 111 * @item: the item to be deleted. 112 * @nid: the node id of the sublist to delete the item from. 113 * @memcg: the cgroup of the sublist to delete the item from. 114 * 115 * This function works analogously as list_lru_add() in terms of list 116 * manipulation. The comments about an element already pertaining to 117 * a list are also valid for list_lru_del(). 118 * 119 * Return: true if the list was updated, false otherwise 120 */ 121bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid, 122 struct mem_cgroup *memcg); 123 124/** 125 * list_lru_del_obj: delete an element from the lru list 126 * @lru: the lru pointer 127 * @item: the item to be deleted. 128 * 129 * This function is similar to list_lru_del(), but the NUMA node and the 130 * memcg of the sublist is determined by @item list_head. This assumption is 131 * valid for slab objects LRU such as dentries, inodes, etc. 132 * 133 * Return value: true if the list was updated, false otherwise. 134 */ 135bool list_lru_del_obj(struct list_lru *lru, struct list_head *item); 136 137/** 138 * list_lru_count_one: return the number of objects currently held by @lru 139 * @lru: the lru pointer. 140 * @nid: the node id to count from. 141 * @memcg: the cgroup to count from. 142 * 143 * There is no guarantee that the list is not updated while the count is being 144 * computed. Callers that want such a guarantee need to provide an outer lock. 145 * 146 * Return: 0 for empty lists, otherwise the number of objects 147 * currently held by @lru. 148 */ 149unsigned long list_lru_count_one(struct list_lru *lru, 150 int nid, struct mem_cgroup *memcg); 151unsigned long list_lru_count_node(struct list_lru *lru, int nid); 152 153static inline unsigned long list_lru_shrink_count(struct list_lru *lru, 154 struct shrink_control *sc) 155{ 156 return list_lru_count_one(lru, sc->nid, sc->memcg); 157} 158 159static inline unsigned long list_lru_count(struct list_lru *lru) 160{ 161 long count = 0; 162 int nid; 163 164 for_each_node_state(nid, N_NORMAL_MEMORY) 165 count += list_lru_count_node(lru, nid); 166 167 return count; 168} 169 170void list_lru_isolate(struct list_lru_one *list, struct list_head *item); 171void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item, 172 struct list_head *head); 173/** 174 * list_lru_putback: undo list_lru_isolate 175 * @lru: the lru pointer. 176 * @item: the item to put back. 177 * @nid: the node id of the sublist to put the item back to. 178 * @memcg: the cgroup of the sublist to put the item back to. 179 * 180 * Put back an isolated item into its original LRU. Note that unlike 181 * list_lru_add, this does not increment the node LRU count (as 182 * list_lru_isolate does not originally decrement this count). 183 * 184 * Since we might have dropped the LRU lock in between, recompute list_lru_one 185 * from the node's id and memcg. 186 */ 187void list_lru_putback(struct list_lru *lru, struct list_head *item, int nid, 188 struct mem_cgroup *memcg); 189 190typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item, 191 struct list_lru_one *list, spinlock_t *lock, void *cb_arg); 192 193/** 194 * list_lru_walk_one: walk a @lru, isolating and disposing freeable items. 195 * @lru: the lru pointer. 196 * @nid: the node id to scan from. 197 * @memcg: the cgroup to scan from. 198 * @isolate: callback function that is responsible for deciding what to do with 199 * the item currently being scanned 200 * @cb_arg: opaque type that will be passed to @isolate 201 * @nr_to_walk: how many items to scan. 202 * 203 * This function will scan all elements in a particular @lru, calling the 204 * @isolate callback for each of those items, along with the current list 205 * spinlock and a caller-provided opaque. The @isolate callback can choose to 206 * drop the lock internally, but *must* return with the lock held. The callback 207 * will return an enum lru_status telling the @lru infrastructure what to 208 * do with the object being scanned. 209 * 210 * Please note that @nr_to_walk does not mean how many objects will be freed, 211 * just how many objects will be scanned. 212 * 213 * Return: the number of objects effectively removed from the LRU. 214 */ 215unsigned long list_lru_walk_one(struct list_lru *lru, 216 int nid, struct mem_cgroup *memcg, 217 list_lru_walk_cb isolate, void *cb_arg, 218 unsigned long *nr_to_walk); 219/** 220 * list_lru_walk_one_irq: walk a @lru, isolating and disposing freeable items. 221 * @lru: the lru pointer. 222 * @nid: the node id to scan from. 223 * @memcg: the cgroup to scan from. 224 * @isolate: callback function that is responsible for deciding what to do with 225 * the item currently being scanned 226 * @cb_arg: opaque type that will be passed to @isolate 227 * @nr_to_walk: how many items to scan. 228 * 229 * Same as list_lru_walk_one() except that the spinlock is acquired with 230 * spin_lock_irq(). 231 */ 232unsigned long list_lru_walk_one_irq(struct list_lru *lru, 233 int nid, struct mem_cgroup *memcg, 234 list_lru_walk_cb isolate, void *cb_arg, 235 unsigned long *nr_to_walk); 236unsigned long list_lru_walk_node(struct list_lru *lru, int nid, 237 list_lru_walk_cb isolate, void *cb_arg, 238 unsigned long *nr_to_walk); 239 240static inline unsigned long 241list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc, 242 list_lru_walk_cb isolate, void *cb_arg) 243{ 244 return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg, 245 &sc->nr_to_scan); 246} 247 248static inline unsigned long 249list_lru_shrink_walk_irq(struct list_lru *lru, struct shrink_control *sc, 250 list_lru_walk_cb isolate, void *cb_arg) 251{ 252 return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg, 253 &sc->nr_to_scan); 254} 255 256static inline unsigned long 257list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate, 258 void *cb_arg, unsigned long nr_to_walk) 259{ 260 long isolated = 0; 261 int nid; 262 263 for_each_node_state(nid, N_NORMAL_MEMORY) { 264 isolated += list_lru_walk_node(lru, nid, isolate, 265 cb_arg, &nr_to_walk); 266 if (nr_to_walk <= 0) 267 break; 268 } 269 return isolated; 270} 271#endif /* _LRU_LIST_H */