at v3.18 15 kB view raw
1/* memcontrol.h - Memory Controller 2 * 3 * Copyright IBM Corporation, 2007 4 * Author Balbir Singh <balbir@linux.vnet.ibm.com> 5 * 6 * Copyright 2007 OpenVZ SWsoft Inc 7 * Author: Pavel Emelianov <xemul@openvz.org> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License as published by 11 * the Free Software Foundation; either version 2 of the License, or 12 * (at your option) any later version. 13 * 14 * This program is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17 * GNU General Public License for more details. 18 */ 19 20#ifndef _LINUX_MEMCONTROL_H 21#define _LINUX_MEMCONTROL_H 22#include <linux/cgroup.h> 23#include <linux/vm_event_item.h> 24#include <linux/hardirq.h> 25#include <linux/jump_label.h> 26 27struct mem_cgroup; 28struct page_cgroup; 29struct page; 30struct mm_struct; 31struct kmem_cache; 32 33/* 34 * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c, 35 * These two lists should keep in accord with each other. 36 */ 37enum mem_cgroup_stat_index { 38 /* 39 * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss. 40 */ 41 MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */ 42 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ 43 MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */ 44 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ 45 MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */ 46 MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */ 47 MEM_CGROUP_STAT_NSTATS, 48}; 49 50struct mem_cgroup_reclaim_cookie { 51 struct zone *zone; 52 int priority; 53 unsigned int generation; 54}; 55 56#ifdef CONFIG_MEMCG 57int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 58 gfp_t gfp_mask, struct mem_cgroup **memcgp); 59void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, 60 bool lrucare); 61void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg); 62void mem_cgroup_uncharge(struct page *page); 63void mem_cgroup_uncharge_list(struct list_head *page_list); 64 65void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, 66 bool lrucare); 67 68struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); 69struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); 70 71bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, 72 struct mem_cgroup *memcg); 73bool task_in_mem_cgroup(struct task_struct *task, 74 const struct mem_cgroup *memcg); 75 76extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); 77extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 78 79extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); 80extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css); 81 82static inline 83bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg) 84{ 85 struct mem_cgroup *task_memcg; 86 bool match; 87 88 rcu_read_lock(); 89 task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 90 match = __mem_cgroup_same_or_subtree(memcg, task_memcg); 91 rcu_read_unlock(); 92 return match; 93} 94 95extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); 96 97struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, 98 struct mem_cgroup *, 99 struct mem_cgroup_reclaim_cookie *); 100void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); 101 102/* 103 * For memory reclaim. 104 */ 105int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec); 106int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); 107unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list); 108void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int); 109extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, 110 struct task_struct *p); 111 112static inline void mem_cgroup_oom_enable(void) 113{ 114 WARN_ON(current->memcg_oom.may_oom); 115 current->memcg_oom.may_oom = 1; 116} 117 118static inline void mem_cgroup_oom_disable(void) 119{ 120 WARN_ON(!current->memcg_oom.may_oom); 121 current->memcg_oom.may_oom = 0; 122} 123 124static inline bool task_in_memcg_oom(struct task_struct *p) 125{ 126 return p->memcg_oom.memcg; 127} 128 129bool mem_cgroup_oom_synchronize(bool wait); 130 131#ifdef CONFIG_MEMCG_SWAP 132extern int do_swap_account; 133#endif 134 135static inline bool mem_cgroup_disabled(void) 136{ 137 if (memory_cgrp_subsys.disabled) 138 return true; 139 return false; 140} 141 142struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked, 143 unsigned long *flags); 144void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked, 145 unsigned long flags); 146void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, 147 enum mem_cgroup_stat_index idx, int val); 148 149static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, 150 enum mem_cgroup_stat_index idx) 151{ 152 mem_cgroup_update_page_stat(memcg, idx, 1); 153} 154 155static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg, 156 enum mem_cgroup_stat_index idx) 157{ 158 mem_cgroup_update_page_stat(memcg, idx, -1); 159} 160 161unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 162 gfp_t gfp_mask, 163 unsigned long *total_scanned); 164 165void __mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); 166static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, 167 enum vm_event_item idx) 168{ 169 if (mem_cgroup_disabled()) 170 return; 171 __mem_cgroup_count_vm_event(mm, idx); 172} 173#ifdef CONFIG_TRANSPARENT_HUGEPAGE 174void mem_cgroup_split_huge_fixup(struct page *head); 175#endif 176 177#ifdef CONFIG_DEBUG_VM 178bool mem_cgroup_bad_page_check(struct page *page); 179void mem_cgroup_print_bad_page(struct page *page); 180#endif 181#else /* CONFIG_MEMCG */ 182struct mem_cgroup; 183 184static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, 185 gfp_t gfp_mask, 186 struct mem_cgroup **memcgp) 187{ 188 *memcgp = NULL; 189 return 0; 190} 191 192static inline void mem_cgroup_commit_charge(struct page *page, 193 struct mem_cgroup *memcg, 194 bool lrucare) 195{ 196} 197 198static inline void mem_cgroup_cancel_charge(struct page *page, 199 struct mem_cgroup *memcg) 200{ 201} 202 203static inline void mem_cgroup_uncharge(struct page *page) 204{ 205} 206 207static inline void mem_cgroup_uncharge_list(struct list_head *page_list) 208{ 209} 210 211static inline void mem_cgroup_migrate(struct page *oldpage, 212 struct page *newpage, 213 bool lrucare) 214{ 215} 216 217static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone, 218 struct mem_cgroup *memcg) 219{ 220 return &zone->lruvec; 221} 222 223static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page, 224 struct zone *zone) 225{ 226 return &zone->lruvec; 227} 228 229static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) 230{ 231 return NULL; 232} 233 234static inline bool mm_match_cgroup(struct mm_struct *mm, 235 struct mem_cgroup *memcg) 236{ 237 return true; 238} 239 240static inline bool task_in_mem_cgroup(struct task_struct *task, 241 const struct mem_cgroup *memcg) 242{ 243 return true; 244} 245 246static inline struct cgroup_subsys_state 247 *mem_cgroup_css(struct mem_cgroup *memcg) 248{ 249 return NULL; 250} 251 252static inline struct mem_cgroup * 253mem_cgroup_iter(struct mem_cgroup *root, 254 struct mem_cgroup *prev, 255 struct mem_cgroup_reclaim_cookie *reclaim) 256{ 257 return NULL; 258} 259 260static inline void mem_cgroup_iter_break(struct mem_cgroup *root, 261 struct mem_cgroup *prev) 262{ 263} 264 265static inline bool mem_cgroup_disabled(void) 266{ 267 return true; 268} 269 270static inline int 271mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) 272{ 273 return 1; 274} 275 276static inline unsigned long 277mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru) 278{ 279 return 0; 280} 281 282static inline void 283mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, 284 int increment) 285{ 286} 287 288static inline void 289mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 290{ 291} 292 293static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, 294 bool *locked, unsigned long *flags) 295{ 296 return NULL; 297} 298 299static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, 300 bool locked, unsigned long flags) 301{ 302} 303 304static inline void mem_cgroup_oom_enable(void) 305{ 306} 307 308static inline void mem_cgroup_oom_disable(void) 309{ 310} 311 312static inline bool task_in_memcg_oom(struct task_struct *p) 313{ 314 return false; 315} 316 317static inline bool mem_cgroup_oom_synchronize(bool wait) 318{ 319 return false; 320} 321 322static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg, 323 enum mem_cgroup_stat_index idx) 324{ 325} 326 327static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg, 328 enum mem_cgroup_stat_index idx) 329{ 330} 331 332static inline 333unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 334 gfp_t gfp_mask, 335 unsigned long *total_scanned) 336{ 337 return 0; 338} 339 340static inline void mem_cgroup_split_huge_fixup(struct page *head) 341{ 342} 343 344static inline 345void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) 346{ 347} 348#endif /* CONFIG_MEMCG */ 349 350#if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM) 351static inline bool 352mem_cgroup_bad_page_check(struct page *page) 353{ 354 return false; 355} 356 357static inline void 358mem_cgroup_print_bad_page(struct page *page) 359{ 360} 361#endif 362 363enum { 364 UNDER_LIMIT, 365 SOFT_LIMIT, 366 OVER_LIMIT, 367}; 368 369struct sock; 370#if defined(CONFIG_INET) && defined(CONFIG_MEMCG_KMEM) 371void sock_update_memcg(struct sock *sk); 372void sock_release_memcg(struct sock *sk); 373#else 374static inline void sock_update_memcg(struct sock *sk) 375{ 376} 377static inline void sock_release_memcg(struct sock *sk) 378{ 379} 380#endif /* CONFIG_INET && CONFIG_MEMCG_KMEM */ 381 382#ifdef CONFIG_MEMCG_KMEM 383extern struct static_key memcg_kmem_enabled_key; 384 385extern int memcg_limited_groups_array_size; 386 387/* 388 * Helper macro to loop through all memcg-specific caches. Callers must still 389 * check if the cache is valid (it is either valid or NULL). 390 * the slab_mutex must be held when looping through those caches 391 */ 392#define for_each_memcg_cache_index(_idx) \ 393 for ((_idx) = 0; (_idx) < memcg_limited_groups_array_size; (_idx)++) 394 395static inline bool memcg_kmem_enabled(void) 396{ 397 return static_key_false(&memcg_kmem_enabled_key); 398} 399 400/* 401 * In general, we'll do everything in our power to not incur in any overhead 402 * for non-memcg users for the kmem functions. Not even a function call, if we 403 * can avoid it. 404 * 405 * Therefore, we'll inline all those functions so that in the best case, we'll 406 * see that kmemcg is off for everybody and proceed quickly. If it is on, 407 * we'll still do most of the flag checking inline. We check a lot of 408 * conditions, but because they are pretty simple, they are expected to be 409 * fast. 410 */ 411bool __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, 412 int order); 413void __memcg_kmem_commit_charge(struct page *page, 414 struct mem_cgroup *memcg, int order); 415void __memcg_kmem_uncharge_pages(struct page *page, int order); 416 417int memcg_cache_id(struct mem_cgroup *memcg); 418 419void memcg_update_array_size(int num_groups); 420 421struct kmem_cache * 422__memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); 423 424int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order); 425void __memcg_uncharge_slab(struct kmem_cache *cachep, int order); 426 427int __memcg_cleanup_cache_params(struct kmem_cache *s); 428 429/** 430 * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. 431 * @gfp: the gfp allocation flags. 432 * @memcg: a pointer to the memcg this was charged against. 433 * @order: allocation order. 434 * 435 * returns true if the memcg where the current task belongs can hold this 436 * allocation. 437 * 438 * We return true automatically if this allocation is not to be accounted to 439 * any memcg. 440 */ 441static inline bool 442memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) 443{ 444 if (!memcg_kmem_enabled()) 445 return true; 446 447 /* 448 * __GFP_NOFAIL allocations will move on even if charging is not 449 * possible. Therefore we don't even try, and have this allocation 450 * unaccounted. We could in theory charge it with 451 * res_counter_charge_nofail, but we hope those allocations are rare, 452 * and won't be worth the trouble. 453 */ 454 if (gfp & __GFP_NOFAIL) 455 return true; 456 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) 457 return true; 458 459 /* If the test is dying, just let it go. */ 460 if (unlikely(fatal_signal_pending(current))) 461 return true; 462 463 return __memcg_kmem_newpage_charge(gfp, memcg, order); 464} 465 466/** 467 * memcg_kmem_uncharge_pages: uncharge pages from memcg 468 * @page: pointer to struct page being freed 469 * @order: allocation order. 470 * 471 * there is no need to specify memcg here, since it is embedded in page_cgroup 472 */ 473static inline void 474memcg_kmem_uncharge_pages(struct page *page, int order) 475{ 476 if (memcg_kmem_enabled()) 477 __memcg_kmem_uncharge_pages(page, order); 478} 479 480/** 481 * memcg_kmem_commit_charge: embeds correct memcg in a page 482 * @page: pointer to struct page recently allocated 483 * @memcg: the memcg structure we charged against 484 * @order: allocation order. 485 * 486 * Needs to be called after memcg_kmem_newpage_charge, regardless of success or 487 * failure of the allocation. if @page is NULL, this function will revert the 488 * charges. Otherwise, it will commit the memcg given by @memcg to the 489 * corresponding page_cgroup. 490 */ 491static inline void 492memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) 493{ 494 if (memcg_kmem_enabled() && memcg) 495 __memcg_kmem_commit_charge(page, memcg, order); 496} 497 498/** 499 * memcg_kmem_get_cache: selects the correct per-memcg cache for allocation 500 * @cachep: the original global kmem cache 501 * @gfp: allocation flags. 502 * 503 * All memory allocated from a per-memcg cache is charged to the owner memcg. 504 */ 505static __always_inline struct kmem_cache * 506memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) 507{ 508 if (!memcg_kmem_enabled()) 509 return cachep; 510 if (gfp & __GFP_NOFAIL) 511 return cachep; 512 if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD)) 513 return cachep; 514 if (unlikely(fatal_signal_pending(current))) 515 return cachep; 516 517 return __memcg_kmem_get_cache(cachep, gfp); 518} 519#else 520#define for_each_memcg_cache_index(_idx) \ 521 for (; NULL; ) 522 523static inline bool memcg_kmem_enabled(void) 524{ 525 return false; 526} 527 528static inline bool 529memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) 530{ 531 return true; 532} 533 534static inline void memcg_kmem_uncharge_pages(struct page *page, int order) 535{ 536} 537 538static inline void 539memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) 540{ 541} 542 543static inline int memcg_cache_id(struct mem_cgroup *memcg) 544{ 545 return -1; 546} 547 548static inline struct kmem_cache * 549memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) 550{ 551 return cachep; 552} 553#endif /* CONFIG_MEMCG_KMEM */ 554#endif /* _LINUX_MEMCONTROL_H */ 555