Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* memcontrol.h - Memory Controller
2 *
3 * Copyright IBM Corporation, 2007
4 * Author Balbir Singh <balbir@linux.vnet.ibm.com>
5 *
6 * Copyright 2007 OpenVZ SWsoft Inc
7 * Author: Pavel Emelianov <xemul@openvz.org>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 */
19
20#ifndef _LINUX_MEMCONTROL_H
21#define _LINUX_MEMCONTROL_H
22#include <linux/cgroup.h>
23struct mem_cgroup;
24struct page_cgroup;
25struct page;
26struct mm_struct;
27
28#ifdef CONFIG_CGROUP_MEM_RES_CTLR
29/*
30 * All "charge" functions with gfp_mask should use GFP_KERNEL or
31 * (gfp_mask & GFP_RECLAIM_MASK). In current implementatin, memcg doesn't
32 * alloc memory but reclaims memory from all available zones. So, "where I want
33 * memory from" bits of gfp_mask has no meaning. So any bits of that field is
34 * available but adding a rule is better. charge functions' gfp_mask should
35 * be set to GFP_KERNEL or gfp_mask & GFP_RECLAIM_MASK for avoiding ambiguous
36 * codes.
37 * (Of course, if memcg does memory allocation in future, GFP_KERNEL is sane.)
38 */
39
40extern int mem_cgroup_newpage_charge(struct page *page, struct mm_struct *mm,
41 gfp_t gfp_mask);
42/* for swap handling */
43extern int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
44 struct page *page, gfp_t mask, struct mem_cgroup **ptr);
45extern void mem_cgroup_commit_charge_swapin(struct page *page,
46 struct mem_cgroup *ptr);
47extern void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr);
48
49extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
50 gfp_t gfp_mask);
51extern void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru);
52extern void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru);
53extern void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru);
54extern void mem_cgroup_del_lru(struct page *page);
55extern void mem_cgroup_move_lists(struct page *page,
56 enum lru_list from, enum lru_list to);
57extern void mem_cgroup_uncharge_page(struct page *page);
58extern void mem_cgroup_uncharge_cache_page(struct page *page);
59extern int mem_cgroup_shrink_usage(struct page *page,
60 struct mm_struct *mm, gfp_t gfp_mask);
61
62extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
63 struct list_head *dst,
64 unsigned long *scanned, int order,
65 int mode, struct zone *z,
66 struct mem_cgroup *mem_cont,
67 int active, int file);
68extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask);
69int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
70
71extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
72
73static inline
74int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
75{
76 struct mem_cgroup *mem;
77 rcu_read_lock();
78 mem = mem_cgroup_from_task((mm)->owner);
79 rcu_read_unlock();
80 return cgroup == mem;
81}
82
83extern int
84mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr);
85extern void mem_cgroup_end_migration(struct mem_cgroup *mem,
86 struct page *oldpage, struct page *newpage);
87
88/*
89 * For memory reclaim.
90 */
91extern int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem);
92extern long mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem);
93
94extern int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem);
95extern void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
96 int priority);
97extern void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
98 int priority);
99int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg);
100unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg,
101 struct zone *zone,
102 enum lru_list lru);
103struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
104 struct zone *zone);
105struct zone_reclaim_stat*
106mem_cgroup_get_reclaim_stat_from_page(struct page *page);
107
108#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
109extern int do_swap_account;
110#endif
111
112static inline bool mem_cgroup_disabled(void)
113{
114 if (mem_cgroup_subsys.disabled)
115 return true;
116 return false;
117}
118
119extern bool mem_cgroup_oom_called(struct task_struct *task);
120
121#else /* CONFIG_CGROUP_MEM_RES_CTLR */
122struct mem_cgroup;
123
124static inline int mem_cgroup_newpage_charge(struct page *page,
125 struct mm_struct *mm, gfp_t gfp_mask)
126{
127 return 0;
128}
129
130static inline int mem_cgroup_cache_charge(struct page *page,
131 struct mm_struct *mm, gfp_t gfp_mask)
132{
133 return 0;
134}
135
136static inline int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
137 struct page *page, gfp_t gfp_mask, struct mem_cgroup **ptr)
138{
139 return 0;
140}
141
142static inline void mem_cgroup_commit_charge_swapin(struct page *page,
143 struct mem_cgroup *ptr)
144{
145}
146
147static inline void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *ptr)
148{
149}
150
151static inline void mem_cgroup_uncharge_page(struct page *page)
152{
153}
154
155static inline void mem_cgroup_uncharge_cache_page(struct page *page)
156{
157}
158
159static inline int mem_cgroup_shrink_usage(struct page *page,
160 struct mm_struct *mm, gfp_t gfp_mask)
161{
162 return 0;
163}
164
165static inline void mem_cgroup_add_lru_list(struct page *page, int lru)
166{
167}
168
169static inline void mem_cgroup_del_lru_list(struct page *page, int lru)
170{
171 return ;
172}
173
174static inline void mem_cgroup_rotate_lru_list(struct page *page, int lru)
175{
176 return ;
177}
178
179static inline void mem_cgroup_del_lru(struct page *page)
180{
181 return ;
182}
183
184static inline void
185mem_cgroup_move_lists(struct page *page, enum lru_list from, enum lru_list to)
186{
187}
188
189static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
190{
191 return 1;
192}
193
194static inline int task_in_mem_cgroup(struct task_struct *task,
195 const struct mem_cgroup *mem)
196{
197 return 1;
198}
199
200static inline int
201mem_cgroup_prepare_migration(struct page *page, struct mem_cgroup **ptr)
202{
203 return 0;
204}
205
206static inline void mem_cgroup_end_migration(struct mem_cgroup *mem,
207 struct page *oldpage,
208 struct page *newpage)
209{
210}
211
212static inline int mem_cgroup_calc_mapped_ratio(struct mem_cgroup *mem)
213{
214 return 0;
215}
216
217static inline int mem_cgroup_reclaim_imbalance(struct mem_cgroup *mem)
218{
219 return 0;
220}
221
222static inline int mem_cgroup_get_reclaim_priority(struct mem_cgroup *mem)
223{
224 return 0;
225}
226
227static inline void mem_cgroup_note_reclaim_priority(struct mem_cgroup *mem,
228 int priority)
229{
230}
231
232static inline void mem_cgroup_record_reclaim_priority(struct mem_cgroup *mem,
233 int priority)
234{
235}
236
237static inline bool mem_cgroup_disabled(void)
238{
239 return true;
240}
241
242static inline bool mem_cgroup_oom_called(struct task_struct *task)
243{
244 return false;
245}
246
247static inline int
248mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg)
249{
250 return 1;
251}
252
253static inline unsigned long
254mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone,
255 enum lru_list lru)
256{
257 return 0;
258}
259
260
261static inline struct zone_reclaim_stat*
262mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone)
263{
264 return NULL;
265}
266
267static inline struct zone_reclaim_stat*
268mem_cgroup_get_reclaim_stat_from_page(struct page *page)
269{
270 return NULL;
271}
272
273#endif /* CONFIG_CGROUP_MEM_CONT */
274
275#endif /* _LINUX_MEMCONTROL_H */
276