Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm/list_lru: introduce list_lru_shrink_walk_irq()

Provide list_lru_shrink_walk_irq() and let it behave like
list_lru_walk_one() except that it locks the spinlock with
spin_lock_irq(). This is used by scan_shadow_nodes() because its lock
nests within the i_pages lock which is acquired with IRQ. This change
allows to use proper locking promitives instead hand crafted
lock_irq_disable() plus spin_lock().

There is no EXPORT_SYMBOL provided because the current user is in-kernel
only.

Add list_lru_shrink_walk_irq() which acquires the spinlock with the
proper locking primitives.

Link: http://lkml.kernel.org/r/20180716111921.5365-5-bigeasy@linutronix.de
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Reviewed-by: Vladimir Davydov <vdavydov.dev@gmail.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

authored by

Sebastian Andrzej Siewior and committed by
Linus Torvalds
6b51e881 6e018968

+42 -6
+25
include/linux/list_lru.h
··· 166 166 int nid, struct mem_cgroup *memcg, 167 167 list_lru_walk_cb isolate, void *cb_arg, 168 168 unsigned long *nr_to_walk); 169 + /** 170 + * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items. 171 + * @lru: the lru pointer. 172 + * @nid: the node id to scan from. 173 + * @memcg: the cgroup to scan from. 174 + * @isolate: callback function that is resposible for deciding what to do with 175 + * the item currently being scanned 176 + * @cb_arg: opaque type that will be passed to @isolate 177 + * @nr_to_walk: how many items to scan. 178 + * 179 + * Same as @list_lru_walk_one except that the spinlock is acquired with 180 + * spin_lock_irq(). 181 + */ 182 + unsigned long list_lru_walk_one_irq(struct list_lru *lru, 183 + int nid, struct mem_cgroup *memcg, 184 + list_lru_walk_cb isolate, void *cb_arg, 185 + unsigned long *nr_to_walk); 169 186 unsigned long list_lru_walk_node(struct list_lru *lru, int nid, 170 187 list_lru_walk_cb isolate, void *cb_arg, 171 188 unsigned long *nr_to_walk); ··· 193 176 { 194 177 return list_lru_walk_one(lru, sc->nid, sc->memcg, isolate, cb_arg, 195 178 &sc->nr_to_scan); 179 + } 180 + 181 + static inline unsigned long 182 + list_lru_shrink_walk_irq(struct list_lru *lru, struct shrink_control *sc, 183 + list_lru_walk_cb isolate, void *cb_arg) 184 + { 185 + return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg, 186 + &sc->nr_to_scan); 196 187 } 197 188 198 189 static inline unsigned long
+15
mm/list_lru.c
··· 282 282 } 283 283 EXPORT_SYMBOL_GPL(list_lru_walk_one); 284 284 285 + unsigned long 286 + list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg, 287 + list_lru_walk_cb isolate, void *cb_arg, 288 + unsigned long *nr_to_walk) 289 + { 290 + struct list_lru_node *nlru = &lru->node[nid]; 291 + unsigned long ret; 292 + 293 + spin_lock_irq(&nlru->lock); 294 + ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg, 295 + nr_to_walk); 296 + spin_unlock_irq(&nlru->lock); 297 + return ret; 298 + } 299 + 285 300 unsigned long list_lru_walk_node(struct list_lru *lru, int nid, 286 301 list_lru_walk_cb isolate, void *cb_arg, 287 302 unsigned long *nr_to_walk)
+2 -6
mm/workingset.c
··· 483 483 static unsigned long scan_shadow_nodes(struct shrinker *shrinker, 484 484 struct shrink_control *sc) 485 485 { 486 - unsigned long ret; 487 - 488 486 /* list_lru lock nests inside the IRQ-safe i_pages lock */ 489 - local_irq_disable(); 490 - ret = list_lru_shrink_walk(&shadow_nodes, sc, shadow_lru_isolate, NULL); 491 - local_irq_enable(); 492 - return ret; 487 + return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate, 488 + NULL); 493 489 } 494 490 495 491 static struct shrinker workingset_shadow_shrinker = {