Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* memcontrol.h - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#ifndef _LINUX_MEMCONTROL_H
21#define _LINUX_MEMCONTROL_H
22#include <linux/cgroup.h>
23#include <linux/vm_event_item.h>
24#include <linux/hardirq.h>
25#include <linux/jump_label.h>
26
27struct mem_cgroup;
28struct page_cgroup;
29struct page;
30struct mm_struct;
31struct kmem_cache;
32
33/*
34 * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c,
35 * These two lists should keep in accord with each other.
36 */
37enum mem_cgroup_stat_index {
38 /*
39 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
40 */
41 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
42 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
43 MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */
44 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
45 MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */
46 MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
47 MEM_CGROUP_STAT_NSTATS,
48};
49
50struct mem_cgroup_reclaim_cookie {
51 struct zone *zone;
52 int priority;
53 unsigned int generation;
54};
55
56#ifdef CONFIG_MEMCG
57int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
58 gfp_t gfp_mask, struct mem_cgroup **memcgp);
59void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
60 bool lrucare);
61void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg);
62void mem_cgroup_uncharge(struct page *page);
63void mem_cgroup_uncharge_list(struct list_head *page_list);
64
65void mem_cgroup_migrate(struct page *oldpage, struct page *newpage,
66 bool lrucare);
67
68struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
69struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
70
71bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
72 struct mem_cgroup *memcg);
73bool task_in_mem_cgroup(struct task_struct *task,
74 const struct mem_cgroup *memcg);
75
76extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
77extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
78
79extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
80extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css);
81
82static inline
83bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg)
84{
85 struct mem_cgroup *task_memcg;
86 bool match;
87
88 rcu_read_lock();
89 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
90 match = __mem_cgroup_same_or_subtree(memcg, task_memcg);
91 rcu_read_unlock();
92 return match;
93}
94
95extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
96
97struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
98 struct mem_cgroup *,
99 struct mem_cgroup_reclaim_cookie *);
100void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
101
102/*
103 * For memory reclaim.
104 */
105int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
106int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
107unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
108void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
109extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
110 struct task_struct *p);
111
112static inline void mem_cgroup_oom_enable(void)
113{
114 WARN_ON(current->memcg_oom.may_oom);
115 current->memcg_oom.may_oom = 1;
116}
117
118static inline void mem_cgroup_oom_disable(void)
119{
120 WARN_ON(!current->memcg_oom.may_oom);
121 current->memcg_oom.may_oom = 0;
122}
123
124static inline bool task_in_memcg_oom(struct task_struct *p)
125{
126 return p->memcg_oom.memcg;
127}
128
129bool mem_cgroup_oom_synchronize(bool wait);
130
131#ifdef CONFIG_MEMCG_SWAP
132extern int do_swap_account;
133#endif
134
135static inline bool mem_cgroup_disabled(void)
136{
137 if (memory_cgrp_subsys.disabled)
138 return true;
139 return false;
140}
141
142void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
143 unsigned long *flags);
144
145extern atomic_t memcg_moving;
146
147static inline void mem_cgroup_begin_update_page_stat(struct page *page,
148 bool *locked, unsigned long *flags)
149{
150 if (mem_cgroup_disabled())
151 return;
152 rcu_read_lock();
153 *locked = false;
154 if (atomic_read(&memcg_moving))
155 __mem_cgroup_begin_update_page_stat(page, locked, flags);
156}
157
158void __mem_cgroup_end_update_page_stat(struct page *page,
159 unsigned long *flags);
160static inline void mem_cgroup_end_update_page_stat(struct page *page,
161 bool *locked, unsigned long *flags)
162{
163 if (mem_cgroup_disabled())
164 return;
165 if (*locked)
166 __mem_cgroup_end_update_page_stat(page, flags);
167 rcu_read_unlock();
168}
169
170void mem_cgroup_update_page_stat(struct page *page,
171 enum mem_cgroup_stat_index idx,
172 int val);
173
174static inline void mem_cgroup_inc_page_stat(struct page *page,
175 enum mem_cgroup_stat_index idx)
176{
177 mem_cgroup_update_page_stat(page, idx, 1);
178}
179
180static inline void mem_cgroup_dec_page_stat(struct page *page,
181 enum mem_cgroup_stat_index idx)
182{
183 mem_cgroup_update_page_stat(page, idx, -1);
184}
185
186unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
187 gfp_t gfp_mask,
188 unsigned long *total_scanned);
189
190void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
191static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
192 enum vm_event_item idx)
193{
194 if (mem_cgroup_disabled())
195 return;
196 __mem_cgroup_count_vm_event(mm, idx);
197}
198#ifdef CONFIG_TRANSPARENT_HUGEPAGE
199void mem_cgroup_split_huge_fixup(struct page *head);
200#endif
201
202#ifdef CONFIG_DEBUG_VM
203bool mem_cgroup_bad_page_check(struct page *page);
204void mem_cgroup_print_bad_page(struct page *page);
205#endif
206#else /* CONFIG_MEMCG */
207struct mem_cgroup;
208
209static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
210 gfp_t gfp_mask,
211 struct mem_cgroup **memcgp)
212{
213 *memcgp = NULL;
214 return 0;
215}
216
217static inline void mem_cgroup_commit_charge(struct page *page,
218 struct mem_cgroup *memcg,
219 bool lrucare)
220{
221}
222
223static inline void mem_cgroup_cancel_charge(struct page *page,
224 struct mem_cgroup *memcg)
225{
226}
227
228static inline void mem_cgroup_uncharge(struct page *page)
229{
230}
231
232static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
233{
234}
235
236static inline void mem_cgroup_migrate(struct page *oldpage,
237 struct page *newpage,
238 bool lrucare)
239{
240}
241
242static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
243 struct mem_cgroup *memcg)
244{
245 return &zone->lruvec;
246}
247
248static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
249 struct zone *zone)
250{
251 return &zone->lruvec;
252}
253
254static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
255{
256 return NULL;
257}
258
259static inline bool mm_match_cgroup(struct mm_struct *mm,
260 struct mem_cgroup *memcg)
261{
262 return true;
263}
264
265static inline bool task_in_mem_cgroup(struct task_struct *task,
266 const struct mem_cgroup *memcg)
267{
268 return true;
269}
270
271static inline struct cgroup_subsys_state
272 *mem_cgroup_css(struct mem_cgroup *memcg)
273{
274 return NULL;
275}
276
277static inline struct mem_cgroup *
278mem_cgroup_iter(struct mem_cgroup *root,
279 struct mem_cgroup *prev,
280 struct mem_cgroup_reclaim_cookie *reclaim)
281{
282 return NULL;
283}
284
285static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
286 struct mem_cgroup *prev)
287{
288}
289
290static inline bool mem_cgroup_disabled(void)
291{
292 return true;
293}
294
295static inline int
296mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
297{
298 return 1;
299}
300
301static inline unsigned long
302mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
303{
304 return 0;
305}
306
307static inline void
308mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
309 int increment)
310{
311}
312
313static inline void
314mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
315{
316}
317
318static inline void mem_cgroup_begin_update_page_stat(struct page *page,
319 bool *locked, unsigned long *flags)
320{
321}
322
323static inline void mem_cgroup_end_update_page_stat(struct page *page,
324 bool *locked, unsigned long *flags)
325{
326}
327
328static inline void mem_cgroup_oom_enable(void)
329{
330}
331
332static inline void mem_cgroup_oom_disable(void)
333{
334}
335
336static inline bool task_in_memcg_oom(struct task_struct *p)
337{
338 return false;
339}
340
341static inline bool mem_cgroup_oom_synchronize(bool wait)
342{
343 return false;
344}
345
346static inline void mem_cgroup_inc_page_stat(struct page *page,
347 enum mem_cgroup_stat_index idx)
348{
349}
350
351static inline void mem_cgroup_dec_page_stat(struct page *page,
352 enum mem_cgroup_stat_index idx)
353{
354}
355
356static inline
357unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
358 gfp_t gfp_mask,
359 unsigned long *total_scanned)
360{
361 return 0;
362}
363
364static inline void mem_cgroup_split_huge_fixup(struct page *head)
365{
366}
367
368static inline
369void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
370{
371}
372#endif /* CONFIG_MEMCG */
373
374#if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM)
375static inline bool
376mem_cgroup_bad_page_check(struct page *page)
377{
378 return false;
379}
380
381static inline void
382mem_cgroup_print_bad_page(struct page *page)
383{
384}
385#endif
386
387enum {
388 UNDER_LIMIT,
389 SOFT_LIMIT,
390 OVER_LIMIT,
391};
392
393struct sock;
394#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM)
395void sock_update_memcg(struct sock *sk);
396void sock_release_memcg(struct sock *sk);
397#else
398static inline void sock_update_memcg(struct sock *sk)
399{
400}
401static inline void sock_release_memcg(struct sock *sk)
402{
403}
404#endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */
405
406#ifdef CONFIG_MEMCG_KMEM
407extern struct static_key memcg_kmem_enabled_key;
408
409extern int memcg_limited_groups_array_size;
410
411/*
412 * Helper macro to loop through all memcg-specific caches. Callers must still
413 * check if the cache is valid (it is either valid or NULL).
414 * the slab_mutex must be held when looping through those caches
415 */
416#define for_each_memcg_cache_index(_idx) \
417 for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++)
418
419static inline bool memcg_kmem_enabled(void)
420{
421 return static_key_false(&memcg_kmem_enabled_key);
422}
423
424/*
425 * In general, we'll do everything in our power to not incur in any overhead
426 * for non-memcg users for the kmem functions. Not even a function call, if we
427 * can avoid it.
428 *
429 * Therefore, we'll inline all those functions so that in the best case, we'll
430 * see that kmemcg is off for everybody and proceed quickly. If it is on,
431 * we'll still do most of the flag checking inline. We check a lot of
432 * conditions, but because they are pretty simple, they are expected to be
433 * fast.
434 */
435bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg,
436 int order);
437void __memcg_kmem_commit_charge(struct page *page,
438 struct mem_cgroup *memcg, int order);
439void __memcg_kmem_uncharge_pages(struct page *page, int order);
440
441int memcg_cache_id(struct mem_cgroup *memcg);
442
443int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s,
444 struct kmem_cache *root_cache);
445void memcg_free_cache_params(struct kmem_cache *s);
446
447int memcg_update_cache_size(struct kmem_cache *s, int num_groups);
448void memcg_update_array_size(int num_groups);
449
450struct kmem_cache *
451__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp);
452
453int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order);
454void __memcg_uncharge_slab(struct kmem_cache *cachep, int order);
455
456int __memcg_cleanup_cache_params(struct kmem_cache *s);
457
458/**
459 * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed.
460 * @gfp: the gfp allocation flags.
461 * @memcg: a pointer to the memcg this was charged against.
462 * @order: allocation order.
463 *
464 * returns true if the memcg where the current task belongs can hold this
465 * allocation.
466 *
467 * We return true automatically if this allocation is not to be accounted to
468 * any memcg.
469 */
470static inline bool
471memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
472{
473 if (!memcg_kmem_enabled())
474 return true;
475
476 /*
477 * __GFP_NOFAIL allocations will move on even if charging is not
478 * possible. Therefore we don't even try, and have this allocation
479 * unaccounted. We could in theory charge it with
480 * res_counter_charge_nofail, but we hope those allocations are rare,
481 * and won't be worth the trouble.
482 */
483 if (gfp & __GFP_NOFAIL)
484 return true;
485 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
486 return true;
487
488 /* If the test is dying, just let it go. */
489 if (unlikely(fatal_signal_pending(current)))
490 return true;
491
492 return __memcg_kmem_newpage_charge(gfp, memcg, order);
493}
494
495/**
496 * memcg_kmem_uncharge_pages: uncharge pages from memcg
497 * @page: pointer to struct page being freed
498 * @order: allocation order.
499 *
500 * there is no need to specify memcg here, since it is embedded in page_cgroup
501 */
502static inline void
503memcg_kmem_uncharge_pages(struct page *page, int order)
504{
505 if (memcg_kmem_enabled())
506 __memcg_kmem_uncharge_pages(page, order);
507}
508
509/**
510 * memcg_kmem_commit_charge: embeds correct memcg in a page
511 * @page: pointer to struct page recently allocated
512 * @memcg: the memcg structure we charged against
513 * @order: allocation order.
514 *
515 * Needs to be called after memcg_kmem_newpage_charge, regardless of success or
516 * failure of the allocation. if @page is NULL, this function will revert the
517 * charges. Otherwise, it will commit the memcg given by @memcg to the
518 * corresponding page_cgroup.
519 */
520static inline void
521memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
522{
523 if (memcg_kmem_enabled() && memcg)
524 __memcg_kmem_commit_charge(page, memcg, order);
525}
526
527/**
528 * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation
529 * @cachep: the original global kmem cache
530 * @gfp: allocation flags.
531 *
532 * All memory allocated from a per-memcg cache is charged to the owner memcg.
533 */
534static __always_inline struct kmem_cache *
535memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
536{
537 if (!memcg_kmem_enabled())
538 return cachep;
539 if (gfp & __GFP_NOFAIL)
540 return cachep;
541 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
542 return cachep;
543 if (unlikely(fatal_signal_pending(current)))
544 return cachep;
545
546 return __memcg_kmem_get_cache(cachep, gfp);
547}
548#else
549#define for_each_memcg_cache_index(_idx) \
550 for (; NULL; )
551
552static inline bool memcg_kmem_enabled(void)
553{
554 return false;
555}
556
557static inline bool
558memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
559{
560 return true;
561}
562
563static inline void memcg_kmem_uncharge_pages(struct page *page, int order)
564{
565}
566
567static inline void
568memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order)
569{
570}
571
572static inline int memcg_cache_id(struct mem_cgroup *memcg)
573{
574 return -1;
575}
576
577static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg,
578 struct kmem_cache *s, struct kmem_cache *root_cache)
579{
580 return 0;
581}
582
583static inline void memcg_free_cache_params(struct kmem_cache *s)
584{
585}
586
587static inline struct kmem_cache *
588memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
589{
590 return cachep;
591}
592#endif /* CONFIG_MEMCG_KMEM */
593#endif /* _LINUX_MEMCONTROL_H */
594