Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

memcg: no irq disable for memcg stock lock

There is no need to disable irqs to use memcg per-cpu stock, so let's just
not do that. One consequence of this change is if the kernel while in
task context has the memcg stock lock and that cpu got interrupted. The
memcg charges on that cpu in the irq context will take the slow path of
memcg charging. However that should be super rare and should be fine in
general.

Link: https://lkml.kernel.org/r/20250506225533.2580386-5-shakeel.butt@linux.dev
Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Eric Dumaze <edumazet@google.com>
Cc: Jakub Kacinski <kuba@kernel.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

authored by

Shakeel Butt and committed by
Andrew Morton
9e619cd4 c80509ef

+6 -9
+6 -9
mm/memcontrol.c
··· 1829 1829 { 1830 1830 struct memcg_stock_pcp *stock; 1831 1831 uint8_t stock_pages; 1832 - unsigned long flags; 1833 1832 bool ret = false; 1834 1833 int i; 1835 1834 1836 1835 if (nr_pages > MEMCG_CHARGE_BATCH || 1837 - !local_trylock_irqsave(&memcg_stock.lock, flags)) 1836 + !local_trylock(&memcg_stock.lock)) 1838 1837 return ret; 1839 1838 1840 1839 stock = this_cpu_ptr(&memcg_stock); ··· 1850 1851 break; 1851 1852 } 1852 1853 1853 - local_unlock_irqrestore(&memcg_stock.lock, flags); 1854 + local_unlock(&memcg_stock.lock); 1854 1855 1855 1856 return ret; 1856 1857 } ··· 1894 1895 static void drain_local_memcg_stock(struct work_struct *dummy) 1895 1896 { 1896 1897 struct memcg_stock_pcp *stock; 1897 - unsigned long flags; 1898 1898 1899 1899 if (WARN_ONCE(!in_task(), "drain in non-task context")) 1900 1900 return; 1901 1901 1902 - local_lock_irqsave(&memcg_stock.lock, flags); 1902 + local_lock(&memcg_stock.lock); 1903 1903 1904 1904 stock = this_cpu_ptr(&memcg_stock); 1905 1905 drain_stock_fully(stock); 1906 1906 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); 1907 1907 1908 - local_unlock_irqrestore(&memcg_stock.lock, flags); 1908 + local_unlock(&memcg_stock.lock); 1909 1909 } 1910 1910 1911 1911 static void drain_local_obj_stock(struct work_struct *dummy) ··· 1929 1931 struct memcg_stock_pcp *stock; 1930 1932 struct mem_cgroup *cached; 1931 1933 uint8_t stock_pages; 1932 - unsigned long flags; 1933 1934 bool success = false; 1934 1935 int empty_slot = -1; 1935 1936 int i; ··· 1943 1946 VM_WARN_ON_ONCE(mem_cgroup_is_root(memcg)); 1944 1947 1945 1948 if (nr_pages > MEMCG_CHARGE_BATCH || 1946 - !local_trylock_irqsave(&memcg_stock.lock, flags)) { 1949 + !local_trylock(&memcg_stock.lock)) { 1947 1950 /* 1948 1951 * In case of larger than batch refill or unlikely failure to 1949 1952 * lock the percpu memcg_stock.lock, uncharge memcg directly. ··· 1978 1981 WRITE_ONCE(stock->nr_pages[i], nr_pages); 1979 1982 } 1980 1983 1981 - local_unlock_irqrestore(&memcg_stock.lock, flags); 1984 + local_unlock(&memcg_stock.lock); 1982 1985 } 1983 1986 1984 1987 static bool is_memcg_drain_needed(struct memcg_stock_pcp *stock,