Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* memcontrol.h - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#ifndef _LINUX_MEMCONTROL_H
21#define _LINUX_MEMCONTROL_H
22#include <linux/cgroup.h>
23#include <linux/vm_event_item.h>
24
25struct mem_cgroup;
26struct page_cgroup;
27struct page;
28struct mm_struct;
29
30/* Stats that can be updated by kernel. */
31enum mem_cgroup_page_stat_item {
32 MEMCG_NR_FILE_MAPPED, /* # of pages charged as file rss */
33};
34
35struct mem_cgroup_reclaim_cookie {
36 struct zone *zone;
37 int priority;
38 unsigned int generation;
39};
40
41#ifdef CONFIG_CGROUP_MEM_RES_CTLR
42/*
43 * All "charge" functions with gfp_mask should use GFP_KERNEL or
44 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
45 * alloc memory but reclaims memory from all available zones. So, "where I want
46 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
47 * available but adding a rule is better. charge functions' gfp_mask should
48 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
49 * codes.
50 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
51 */
52
53extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
54 gfp_t gfp_mask);
55/* for swap handling */
56extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
57 struct page *page, gfp_t mask, struct mem_cgroup **memcgp);
58extern void mem_cgroup_commit_charge_swapin(struct page *page,
59 struct mem_cgroup *memcg);
60extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg);
61
62extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
63 gfp_t gfp_mask);
64
65struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
66struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
67
68/* For coalescing uncharge for reducing memcg' overhead*/
69extern void mem_cgroup_uncharge_start(void);
70extern void mem_cgroup_uncharge_end(void);
71
72extern void mem_cgroup_uncharge_page(struct page *page);
73extern void mem_cgroup_uncharge_cache_page(struct page *page);
74
75extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
76 int order);
77bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
78 struct mem_cgroup *memcg);
79int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg);
80
81extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
82extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
83extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
84
85extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg);
86extern struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont);
87
88static inline
89int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
90{
91 struct mem_cgroup *memcg;
92 int match;
93
94 rcu_read_lock();
95 memcg = mem_cgroup_from_task(rcu_dereference((mm)->owner));
96 match = __mem_cgroup_same_or_subtree(cgroup, memcg);
97 rcu_read_unlock();
98 return match;
99}
100
101extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
102
103extern int
104mem_cgroup_prepare_migration(struct page *page,
105 struct page *newpage, struct mem_cgroup **memcgp, gfp_t gfp_mask);
106extern void mem_cgroup_end_migration(struct mem_cgroup *memcg,
107 struct page *oldpage, struct page *newpage, bool migration_ok);
108
109struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *,
110 struct mem_cgroup *,
111 struct mem_cgroup_reclaim_cookie *);
112void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
113
114/*
115 * For memory reclaim.
116 */
117int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
118int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec);
119int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
120unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
121void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
122extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
123 struct task_struct *p);
124extern void mem_cgroup_replace_page_cache(struct page *oldpage,
125 struct page *newpage);
126
127#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
128extern int do_swap_account;
129#endif
130
131static inline bool mem_cgroup_disabled(void)
132{
133 if (mem_cgroup_subsys.disabled)
134 return true;
135 return false;
136}
137
138void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked,
139 unsigned long *flags);
140
141extern atomic_t memcg_moving;
142
143static inline void mem_cgroup_begin_update_page_stat(struct page *page,
144 bool *locked, unsigned long *flags)
145{
146 if (mem_cgroup_disabled())
147 return;
148 rcu_read_lock();
149 *locked = false;
150 if (atomic_read(&memcg_moving))
151 __mem_cgroup_begin_update_page_stat(page, locked, flags);
152}
153
154void __mem_cgroup_end_update_page_stat(struct page *page,
155 unsigned long *flags);
156static inline void mem_cgroup_end_update_page_stat(struct page *page,
157 bool *locked, unsigned long *flags)
158{
159 if (mem_cgroup_disabled())
160 return;
161 if (*locked)
162 __mem_cgroup_end_update_page_stat(page, flags);
163 rcu_read_unlock();
164}
165
166void mem_cgroup_update_page_stat(struct page *page,
167 enum mem_cgroup_page_stat_item idx,
168 int val);
169
170static inline void mem_cgroup_inc_page_stat(struct page *page,
171 enum mem_cgroup_page_stat_item idx)
172{
173 mem_cgroup_update_page_stat(page, idx, 1);
174}
175
176static inline void mem_cgroup_dec_page_stat(struct page *page,
177 enum mem_cgroup_page_stat_item idx)
178{
179 mem_cgroup_update_page_stat(page, idx, -1);
180}
181
182unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
183 gfp_t gfp_mask,
184 unsigned long *total_scanned);
185u64 mem_cgroup_get_limit(struct mem_cgroup *memcg);
186
187void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx);
188#ifdef CONFIG_TRANSPARENT_HUGEPAGE
189void mem_cgroup_split_huge_fixup(struct page *head);
190#endif
191
192#ifdef CONFIG_DEBUG_VM
193bool mem_cgroup_bad_page_check(struct page *page);
194void mem_cgroup_print_bad_page(struct page *page);
195#endif
196#else /* CONFIG_CGROUP_MEM_RES_CTLR */
197struct mem_cgroup;
198
199static inline int mem_cgroup_newpage_charge(struct page *page,
200 struct mm_struct *mm, gfp_t gfp_mask)
201{
202 return 0;
203}
204
205static inline int mem_cgroup_cache_charge(struct page *page,
206 struct mm_struct *mm, gfp_t gfp_mask)
207{
208 return 0;
209}
210
211static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
212 struct page *page, gfp_t gfp_mask, struct mem_cgroup **memcgp)
213{
214 return 0;
215}
216
217static inline void mem_cgroup_commit_charge_swapin(struct page *page,
218 struct mem_cgroup *memcg)
219{
220}
221
222static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *memcg)
223{
224}
225
226static inline void mem_cgroup_uncharge_start(void)
227{
228}
229
230static inline void mem_cgroup_uncharge_end(void)
231{
232}
233
234static inline void mem_cgroup_uncharge_page(struct page *page)
235{
236}
237
238static inline void mem_cgroup_uncharge_cache_page(struct page *page)
239{
240}
241
242static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
243 struct mem_cgroup *memcg)
244{
245 return &zone->lruvec;
246}
247
248static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
249 struct zone *zone)
250{
251 return &zone->lruvec;
252}
253
254static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
255{
256 return NULL;
257}
258
259static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
260{
261 return NULL;
262}
263
264static inline int mm_match_cgroup(struct mm_struct *mm,
265 struct mem_cgroup *memcg)
266{
267 return 1;
268}
269
270static inline int task_in_mem_cgroup(struct task_struct *task,
271 const struct mem_cgroup *memcg)
272{
273 return 1;
274}
275
276static inline struct cgroup_subsys_state
277 *mem_cgroup_css(struct mem_cgroup *memcg)
278{
279 return NULL;
280}
281
282static inline int
283mem_cgroup_prepare_migration(struct page *page, struct page *newpage,
284 struct mem_cgroup **memcgp, gfp_t gfp_mask)
285{
286 return 0;
287}
288
289static inline void mem_cgroup_end_migration(struct mem_cgroup *memcg,
290 struct page *oldpage, struct page *newpage, bool migration_ok)
291{
292}
293
294static inline struct mem_cgroup *
295mem_cgroup_iter(struct mem_cgroup *root,
296 struct mem_cgroup *prev,
297 struct mem_cgroup_reclaim_cookie *reclaim)
298{
299 return NULL;
300}
301
302static inline void mem_cgroup_iter_break(struct mem_cgroup *root,
303 struct mem_cgroup *prev)
304{
305}
306
307static inline bool mem_cgroup_disabled(void)
308{
309 return true;
310}
311
312static inline int
313mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
314{
315 return 1;
316}
317
318static inline int
319mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
320{
321 return 1;
322}
323
324static inline unsigned long
325mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
326{
327 return 0;
328}
329
330static inline void
331mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
332 int increment)
333{
334}
335
336static inline void
337mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
338{
339}
340
341static inline void mem_cgroup_begin_update_page_stat(struct page *page,
342 bool *locked, unsigned long *flags)
343{
344}
345
346static inline void mem_cgroup_end_update_page_stat(struct page *page,
347 bool *locked, unsigned long *flags)
348{
349}
350
351static inline void mem_cgroup_inc_page_stat(struct page *page,
352 enum mem_cgroup_page_stat_item idx)
353{
354}
355
356static inline void mem_cgroup_dec_page_stat(struct page *page,
357 enum mem_cgroup_page_stat_item idx)
358{
359}
360
361static inline
362unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
363 gfp_t gfp_mask,
364 unsigned long *total_scanned)
365{
366 return 0;
367}
368
369static inline
370u64 mem_cgroup_get_limit(struct mem_cgroup *memcg)
371{
372 return 0;
373}
374
375static inline void mem_cgroup_split_huge_fixup(struct page *head)
376{
377}
378
379static inline
380void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx)
381{
382}
383static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
384 struct page *newpage)
385{
386}
387#endif /* CONFIG_CGROUP_MEM_RES_CTLR */
388
389#if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
390static inline bool
391mem_cgroup_bad_page_check(struct page *page)
392{
393 return false;
394}
395
396static inline void
397mem_cgroup_print_bad_page(struct page *page)
398{
399}
400#endif
401
402enum {
403 UNDER_LIMIT,
404 SOFT_LIMIT,
405 OVER_LIMIT,
406};
407
408struct sock;
409#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
410void sock_update_memcg(struct sock *sk);
411void sock_release_memcg(struct sock *sk);
412#else
413static inline void sock_update_memcg(struct sock *sk)
414{
415}
416static inline void sock_release_memcg(struct sock *sk)
417{
418}
419#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
420#endif /* _LINUX_MEMCONTROL_H */
421