at master 27 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _LINUX_CGROUP_H 3#define _LINUX_CGROUP_H 4/* 5 * cgroup interface 6 * 7 * Copyright (C) 2003 BULL SA 8 * Copyright (C) 2004-2006 Silicon Graphics, Inc. 9 * 10 */ 11 12#include <linux/sched.h> 13#include <linux/nodemask.h> 14#include <linux/list.h> 15#include <linux/rculist.h> 16#include <linux/cgroupstats.h> 17#include <linux/fs.h> 18#include <linux/seq_file.h> 19#include <linux/kernfs.h> 20#include <linux/jump_label.h> 21#include <linux/types.h> 22#include <linux/notifier.h> 23#include <linux/ns_common.h> 24#include <linux/nsproxy.h> 25#include <linux/user_namespace.h> 26#include <linux/refcount.h> 27#include <linux/kernel_stat.h> 28 29#include <linux/cgroup-defs.h> 30#include <linux/cgroup_namespace.h> 31 32struct kernel_clone_args; 33 34/* 35 * All weight knobs on the default hierarchy should use the following min, 36 * default and max values. The default value is the logarithmic center of 37 * MIN and MAX and allows 100x to be expressed in both directions. 38 */ 39#define CGROUP_WEIGHT_MIN 1 40#define CGROUP_WEIGHT_DFL 100 41#define CGROUP_WEIGHT_MAX 10000 42 43#ifdef CONFIG_CGROUPS 44 45enum css_task_iter_flags { 46 CSS_TASK_ITER_PROCS = (1U << 0), /* walk only threadgroup leaders */ 47 CSS_TASK_ITER_THREADED = (1U << 1), /* walk all threaded css_sets in the domain */ 48 CSS_TASK_ITER_SKIPPED = (1U << 16), /* internal flags */ 49}; 50 51/* a css_task_iter should be treated as an opaque object */ 52struct css_task_iter { 53 struct cgroup_subsys *ss; 54 unsigned int flags; 55 56 struct list_head *cset_pos; 57 struct list_head *cset_head; 58 59 struct list_head *tcset_pos; 60 struct list_head *tcset_head; 61 62 struct list_head *task_pos; 63 64 struct list_head *cur_tasks_head; 65 struct css_set *cur_cset; 66 struct css_set *cur_dcset; 67 struct task_struct *cur_task; 68 struct list_head iters_node; /* css_set->task_iters */ 69}; 70 71enum cgroup_lifetime_events { 72 CGROUP_LIFETIME_ONLINE, 73 CGROUP_LIFETIME_OFFLINE, 74}; 75 76extern struct file_system_type cgroup_fs_type; 77extern struct cgroup_root cgrp_dfl_root; 78extern struct css_set init_css_set; 79extern spinlock_t css_set_lock; 80extern struct blocking_notifier_head cgroup_lifetime_notifier; 81 82#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys; 83#include <linux/cgroup_subsys.h> 84#undef SUBSYS 85 86#define SUBSYS(_x) \ 87 extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \ 88 extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key; 89#include <linux/cgroup_subsys.h> 90#undef SUBSYS 91 92/** 93 * cgroup_subsys_enabled - fast test on whether a subsys is enabled 94 * @ss: subsystem in question 95 */ 96#define cgroup_subsys_enabled(ss) \ 97 static_branch_likely(&ss ## _enabled_key) 98 99/** 100 * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy 101 * @ss: subsystem in question 102 */ 103#define cgroup_subsys_on_dfl(ss) \ 104 static_branch_likely(&ss ## _on_dfl_key) 105 106bool css_has_online_children(struct cgroup_subsys_state *css); 107struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss); 108struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup, 109 struct cgroup_subsys *ss); 110struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, 111 struct cgroup_subsys *ss); 112struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, 113 struct cgroup_subsys *ss); 114 115struct cgroup *cgroup_get_from_path(const char *path); 116struct cgroup *cgroup_get_from_fd(int fd); 117struct cgroup *cgroup_v1v2_get_from_fd(int fd); 118 119int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); 120int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); 121 122int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); 123int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); 124int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); 125int cgroup_rm_cftypes(struct cftype *cfts); 126void cgroup_file_notify(struct cgroup_file *cfile); 127void cgroup_file_show(struct cgroup_file *cfile, bool show); 128 129int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); 130int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, 131 struct pid *pid, struct task_struct *tsk); 132 133void cgroup_fork(struct task_struct *p); 134extern int cgroup_can_fork(struct task_struct *p, 135 struct kernel_clone_args *kargs); 136extern void cgroup_cancel_fork(struct task_struct *p, 137 struct kernel_clone_args *kargs); 138extern void cgroup_post_fork(struct task_struct *p, 139 struct kernel_clone_args *kargs); 140void cgroup_task_exit(struct task_struct *p); 141void cgroup_task_dead(struct task_struct *p); 142void cgroup_task_release(struct task_struct *p); 143void cgroup_task_free(struct task_struct *p); 144 145int cgroup_init_early(void); 146int cgroup_init(void); 147 148int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v); 149 150/* 151 * Iteration helpers and macros. 152 */ 153 154struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos, 155 struct cgroup_subsys_state *parent); 156struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos, 157 struct cgroup_subsys_state *css); 158struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos); 159struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos, 160 struct cgroup_subsys_state *css); 161 162struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset, 163 struct cgroup_subsys_state **dst_cssp); 164struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset, 165 struct cgroup_subsys_state **dst_cssp); 166 167void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags, 168 struct css_task_iter *it); 169struct task_struct *css_task_iter_next(struct css_task_iter *it); 170void css_task_iter_end(struct css_task_iter *it); 171 172/** 173 * css_for_each_child - iterate through children of a css 174 * @pos: the css * to use as the loop cursor 175 * @parent: css whose children to walk 176 * 177 * Walk @parent's children. Must be called under rcu_read_lock(). 178 * 179 * If a subsystem synchronizes ->css_online() and the start of iteration, a 180 * css which finished ->css_online() is guaranteed to be visible in the 181 * future iterations and will stay visible until the last reference is put. 182 * A css which hasn't finished ->css_online() or already finished 183 * ->css_offline() may show up during traversal. It's each subsystem's 184 * responsibility to synchronize against on/offlining. 185 * 186 * It is allowed to temporarily drop RCU read lock during iteration. The 187 * caller is responsible for ensuring that @pos remains accessible until 188 * the start of the next iteration by, for example, bumping the css refcnt. 189 */ 190#define css_for_each_child(pos, parent) \ 191 for ((pos) = css_next_child(NULL, (parent)); (pos); \ 192 (pos) = css_next_child((pos), (parent))) 193 194/** 195 * css_for_each_descendant_pre - pre-order walk of a css's descendants 196 * @pos: the css * to use as the loop cursor 197 * @root: css whose descendants to walk 198 * 199 * Walk @root's descendants. @root is included in the iteration and the 200 * first node to be visited. Must be called under rcu_read_lock(). 201 * 202 * If a subsystem synchronizes ->css_online() and the start of iteration, a 203 * css which finished ->css_online() is guaranteed to be visible in the 204 * future iterations and will stay visible until the last reference is put. 205 * A css which hasn't finished ->css_online() or already finished 206 * ->css_offline() may show up during traversal. It's each subsystem's 207 * responsibility to synchronize against on/offlining. 208 * 209 * For example, the following guarantees that a descendant can't escape 210 * state updates of its ancestors. 211 * 212 * my_online(@css) 213 * { 214 * Lock @css's parent and @css; 215 * Inherit state from the parent; 216 * Unlock both. 217 * } 218 * 219 * my_update_state(@css) 220 * { 221 * css_for_each_descendant_pre(@pos, @css) { 222 * Lock @pos; 223 * if (@pos == @css) 224 * Update @css's state; 225 * else 226 * Verify @pos is alive and inherit state from its parent; 227 * Unlock @pos; 228 * } 229 * } 230 * 231 * As long as the inheriting step, including checking the parent state, is 232 * enclosed inside @pos locking, double-locking the parent isn't necessary 233 * while inheriting. The state update to the parent is guaranteed to be 234 * visible by walking order and, as long as inheriting operations to the 235 * same @pos are atomic to each other, multiple updates racing each other 236 * still result in the correct state. It's guaranateed that at least one 237 * inheritance happens for any css after the latest update to its parent. 238 * 239 * If checking parent's state requires locking the parent, each inheriting 240 * iteration should lock and unlock both @pos->parent and @pos. 241 * 242 * Alternatively, a subsystem may choose to use a single global lock to 243 * synchronize ->css_online() and ->css_offline() against tree-walking 244 * operations. 245 * 246 * It is allowed to temporarily drop RCU read lock during iteration. The 247 * caller is responsible for ensuring that @pos remains accessible until 248 * the start of the next iteration by, for example, bumping the css refcnt. 249 */ 250#define css_for_each_descendant_pre(pos, css) \ 251 for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \ 252 (pos) = css_next_descendant_pre((pos), (css))) 253 254/** 255 * css_for_each_descendant_post - post-order walk of a css's descendants 256 * @pos: the css * to use as the loop cursor 257 * @css: css whose descendants to walk 258 * 259 * Similar to css_for_each_descendant_pre() but performs post-order 260 * traversal instead. @root is included in the iteration and the last 261 * node to be visited. 262 * 263 * If a subsystem synchronizes ->css_online() and the start of iteration, a 264 * css which finished ->css_online() is guaranteed to be visible in the 265 * future iterations and will stay visible until the last reference is put. 266 * A css which hasn't finished ->css_online() or already finished 267 * ->css_offline() may show up during traversal. It's each subsystem's 268 * responsibility to synchronize against on/offlining. 269 * 270 * Note that the walk visibility guarantee example described in pre-order 271 * walk doesn't apply the same to post-order walks. 272 */ 273#define css_for_each_descendant_post(pos, css) \ 274 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \ 275 (pos) = css_next_descendant_post((pos), (css))) 276 277/** 278 * cgroup_taskset_for_each - iterate cgroup_taskset 279 * @task: the loop cursor 280 * @dst_css: the destination css 281 * @tset: taskset to iterate 282 * 283 * @tset may contain multiple tasks and they may belong to multiple 284 * processes. 285 * 286 * On the v2 hierarchy, there may be tasks from multiple processes and they 287 * may not share the source or destination csses. 288 * 289 * On traditional hierarchies, when there are multiple tasks in @tset, if a 290 * task of a process is in @tset, all tasks of the process are in @tset. 291 * Also, all are guaranteed to share the same source and destination csses. 292 * 293 * Iteration is not in any specific order. 294 */ 295#define cgroup_taskset_for_each(task, dst_css, tset) \ 296 for ((task) = cgroup_taskset_first((tset), &(dst_css)); \ 297 (task); \ 298 (task) = cgroup_taskset_next((tset), &(dst_css))) 299 300/** 301 * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset 302 * @leader: the loop cursor 303 * @dst_css: the destination css 304 * @tset: taskset to iterate 305 * 306 * Iterate threadgroup leaders of @tset. For single-task migrations, @tset 307 * may not contain any. 308 */ 309#define cgroup_taskset_for_each_leader(leader, dst_css, tset) \ 310 for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \ 311 (leader); \ 312 (leader) = cgroup_taskset_next((tset), &(dst_css))) \ 313 if ((leader) != (leader)->group_leader) \ 314 ; \ 315 else 316 317/* 318 * Inline functions. 319 */ 320 321#ifdef CONFIG_DEBUG_CGROUP_REF 322void css_get(struct cgroup_subsys_state *css); 323void css_get_many(struct cgroup_subsys_state *css, unsigned int n); 324bool css_tryget(struct cgroup_subsys_state *css); 325bool css_tryget_online(struct cgroup_subsys_state *css); 326void css_put(struct cgroup_subsys_state *css); 327void css_put_many(struct cgroup_subsys_state *css, unsigned int n); 328#else 329#define CGROUP_REF_FN_ATTRS static inline 330#define CGROUP_REF_EXPORT(fn) 331#include <linux/cgroup_refcnt.h> 332#endif 333 334static inline u64 cgroup_id(const struct cgroup *cgrp) 335{ 336 return cgrp->kn->id; 337} 338 339/** 340 * css_is_dying - test whether the specified css is dying 341 * @css: target css 342 * 343 * Test whether @css is in the process of offlining or already offline. In 344 * most cases, ->css_online() and ->css_offline() callbacks should be 345 * enough; however, the actual offline operations are RCU delayed and this 346 * test returns %true also when @css is scheduled to be offlined. 347 * 348 * This is useful, for example, when the use case requires synchronous 349 * behavior with respect to cgroup removal. cgroup removal schedules css 350 * offlining but the css can seem alive while the operation is being 351 * delayed. If the delay affects user visible semantics, this test can be 352 * used to resolve the situation. 353 */ 354static inline bool css_is_dying(struct cgroup_subsys_state *css) 355{ 356 return css->flags & CSS_DYING; 357} 358 359static inline bool css_is_online(struct cgroup_subsys_state *css) 360{ 361 return css->flags & CSS_ONLINE; 362} 363 364static inline bool css_is_self(struct cgroup_subsys_state *css) 365{ 366 if (css == &css->cgroup->self) { 367 /* cgroup::self should not have subsystem association */ 368 WARN_ON(css->ss != NULL); 369 return true; 370 } 371 372 return false; 373} 374 375static inline void cgroup_get(struct cgroup *cgrp) 376{ 377 css_get(&cgrp->self); 378} 379 380static inline bool cgroup_tryget(struct cgroup *cgrp) 381{ 382 return css_tryget(&cgrp->self); 383} 384 385static inline void cgroup_put(struct cgroup *cgrp) 386{ 387 css_put(&cgrp->self); 388} 389 390extern struct mutex cgroup_mutex; 391 392static inline void cgroup_lock(void) 393{ 394 mutex_lock(&cgroup_mutex); 395} 396 397static inline void cgroup_unlock(void) 398{ 399 mutex_unlock(&cgroup_mutex); 400} 401 402/** 403 * task_css_set_check - obtain a task's css_set with extra access conditions 404 * @task: the task to obtain css_set for 405 * @__c: extra condition expression to be passed to rcu_dereference_check() 406 * 407 * A task's css_set is RCU protected, initialized and exited while holding 408 * task_lock(), and can only be modified while holding both cgroup_mutex 409 * and task_lock() while the task is alive. This macro verifies that the 410 * caller is inside proper critical section and returns @task's css_set. 411 * 412 * The caller can also specify additional allowed conditions via @__c, such 413 * as locks used during the cgroup_subsys::attach() methods. 414 */ 415#ifdef CONFIG_PROVE_RCU 416#define task_css_set_check(task, __c) \ 417 rcu_dereference_check((task)->cgroups, \ 418 rcu_read_lock_sched_held() || \ 419 lockdep_is_held(&cgroup_mutex) || \ 420 lockdep_is_held(&css_set_lock) || \ 421 ((task)->flags & PF_EXITING) || (__c)) 422#else 423#define task_css_set_check(task, __c) \ 424 rcu_dereference((task)->cgroups) 425#endif 426 427/** 428 * task_css_check - obtain css for (task, subsys) w/ extra access conds 429 * @task: the target task 430 * @subsys_id: the target subsystem ID 431 * @__c: extra condition expression to be passed to rcu_dereference_check() 432 * 433 * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The 434 * synchronization rules are the same as task_css_set_check(). 435 */ 436#define task_css_check(task, subsys_id, __c) \ 437 task_css_set_check((task), (__c))->subsys[(subsys_id)] 438 439/** 440 * task_css_set - obtain a task's css_set 441 * @task: the task to obtain css_set for 442 * 443 * See task_css_set_check(). 444 */ 445static inline struct css_set *task_css_set(struct task_struct *task) 446{ 447 return task_css_set_check(task, false); 448} 449 450/** 451 * task_css - obtain css for (task, subsys) 452 * @task: the target task 453 * @subsys_id: the target subsystem ID 454 * 455 * See task_css_check(). 456 */ 457static inline struct cgroup_subsys_state *task_css(struct task_struct *task, 458 int subsys_id) 459{ 460 return task_css_check(task, subsys_id, false); 461} 462 463/** 464 * task_get_css - find and get the css for (task, subsys) 465 * @task: the target task 466 * @subsys_id: the target subsystem ID 467 * 468 * Find the css for the (@task, @subsys_id) combination, increment a 469 * reference on and return it. This function is guaranteed to return a 470 * valid css. The returned css may already have been offlined. 471 */ 472static inline struct cgroup_subsys_state * 473task_get_css(struct task_struct *task, int subsys_id) 474{ 475 struct cgroup_subsys_state *css; 476 477 rcu_read_lock(); 478 while (true) { 479 css = task_css(task, subsys_id); 480 /* 481 * Can't use css_tryget_online() here. A task which has 482 * PF_EXITING set may stay associated with an offline css. 483 * If such task calls this function, css_tryget_online() 484 * will keep failing. 485 */ 486 if (likely(css_tryget(css))) 487 break; 488 cpu_relax(); 489 } 490 rcu_read_unlock(); 491 return css; 492} 493 494/** 495 * task_css_is_root - test whether a task belongs to the root css 496 * @task: the target task 497 * @subsys_id: the target subsystem ID 498 * 499 * Test whether @task belongs to the root css on the specified subsystem. 500 * May be invoked in any context. 501 */ 502static inline bool task_css_is_root(struct task_struct *task, int subsys_id) 503{ 504 return task_css_check(task, subsys_id, true) == 505 init_css_set.subsys[subsys_id]; 506} 507 508static inline struct cgroup *task_cgroup(struct task_struct *task, 509 int subsys_id) 510{ 511 return task_css(task, subsys_id)->cgroup; 512} 513 514static inline struct cgroup *task_dfl_cgroup(struct task_struct *task) 515{ 516 return task_css_set(task)->dfl_cgrp; 517} 518 519static inline struct cgroup *cgroup_parent(struct cgroup *cgrp) 520{ 521 struct cgroup_subsys_state *parent_css = cgrp->self.parent; 522 523 if (parent_css) 524 return container_of(parent_css, struct cgroup, self); 525 return NULL; 526} 527 528/** 529 * cgroup_is_descendant - test ancestry 530 * @cgrp: the cgroup to be tested 531 * @ancestor: possible ancestor of @cgrp 532 * 533 * Test whether @cgrp is a descendant of @ancestor. It also returns %true 534 * if @cgrp == @ancestor. This function is safe to call as long as @cgrp 535 * and @ancestor are accessible. 536 */ 537static inline bool cgroup_is_descendant(struct cgroup *cgrp, 538 struct cgroup *ancestor) 539{ 540 if (cgrp->root != ancestor->root || cgrp->level < ancestor->level) 541 return false; 542 return cgrp->ancestors[ancestor->level] == ancestor; 543} 544 545/** 546 * cgroup_ancestor - find ancestor of cgroup 547 * @cgrp: cgroup to find ancestor of 548 * @ancestor_level: level of ancestor to find starting from root 549 * 550 * Find ancestor of cgroup at specified level starting from root if it exists 551 * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at 552 * @ancestor_level. 553 * 554 * This function is safe to call as long as @cgrp is accessible. 555 */ 556static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp, 557 int ancestor_level) 558{ 559 if (ancestor_level < 0 || ancestor_level > cgrp->level) 560 return NULL; 561 return cgrp->ancestors[ancestor_level]; 562} 563 564/** 565 * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry 566 * @task: the task to be tested 567 * @ancestor: possible ancestor of @task's cgroup 568 * 569 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor. 570 * It follows all the same rules as cgroup_is_descendant, and only applies 571 * to the default hierarchy. 572 */ 573static inline bool task_under_cgroup_hierarchy(struct task_struct *task, 574 struct cgroup *ancestor) 575{ 576 struct css_set *cset = task_css_set(task); 577 578 return cgroup_is_descendant(cset->dfl_cgrp, ancestor); 579} 580 581/* no synchronization, the result can only be used as a hint */ 582static inline bool cgroup_is_populated(struct cgroup *cgrp) 583{ 584 return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children + 585 cgrp->nr_populated_threaded_children; 586} 587 588/* returns ino associated with a cgroup */ 589static inline ino_t cgroup_ino(struct cgroup *cgrp) 590{ 591 return kernfs_ino(cgrp->kn); 592} 593 594/* cft/css accessors for cftype->write() operation */ 595static inline struct cftype *of_cft(struct kernfs_open_file *of) 596{ 597 return of->kn->priv; 598} 599 600struct cgroup_subsys_state *of_css(struct kernfs_open_file *of); 601 602/* cft/css accessors for cftype->seq_*() operations */ 603static inline struct cftype *seq_cft(struct seq_file *seq) 604{ 605 return of_cft(seq->private); 606} 607 608static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq) 609{ 610 return of_css(seq->private); 611} 612 613/* 614 * Name / path handling functions. All are thin wrappers around the kernfs 615 * counterparts and can be called under any context. 616 */ 617 618static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen) 619{ 620 return kernfs_name(cgrp->kn, buf, buflen); 621} 622 623static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen) 624{ 625 return kernfs_path(cgrp->kn, buf, buflen); 626} 627 628static inline void pr_cont_cgroup_name(struct cgroup *cgrp) 629{ 630 pr_cont_kernfs_name(cgrp->kn); 631} 632 633static inline void pr_cont_cgroup_path(struct cgroup *cgrp) 634{ 635 pr_cont_kernfs_path(cgrp->kn); 636} 637 638bool cgroup_psi_enabled(void); 639 640static inline void cgroup_init_kthreadd(void) 641{ 642 /* 643 * kthreadd is inherited by all kthreads, keep it in the root so 644 * that the new kthreads are guaranteed to stay in the root until 645 * initialization is finished. 646 */ 647 current->no_cgroup_migration = 1; 648} 649 650static inline void cgroup_kthread_ready(void) 651{ 652 /* 653 * This kthread finished initialization. The creator should have 654 * set PF_NO_SETAFFINITY if this kthread should stay in the root. 655 */ 656 current->no_cgroup_migration = 0; 657} 658 659void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen); 660struct cgroup *__cgroup_get_from_id(u64 id); 661struct cgroup *cgroup_get_from_id(u64 id); 662#else /* !CONFIG_CGROUPS */ 663 664struct cgroup_subsys_state; 665struct cgroup; 666 667static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; } 668static inline void css_get(struct cgroup_subsys_state *css) {} 669static inline void css_put(struct cgroup_subsys_state *css) {} 670static inline void cgroup_lock(void) {} 671static inline void cgroup_unlock(void) {} 672static inline int cgroup_attach_task_all(struct task_struct *from, 673 struct task_struct *t) { return 0; } 674static inline int cgroupstats_build(struct cgroupstats *stats, 675 struct dentry *dentry) { return -EINVAL; } 676 677static inline void cgroup_fork(struct task_struct *p) {} 678static inline int cgroup_can_fork(struct task_struct *p, 679 struct kernel_clone_args *kargs) { return 0; } 680static inline void cgroup_cancel_fork(struct task_struct *p, 681 struct kernel_clone_args *kargs) {} 682static inline void cgroup_post_fork(struct task_struct *p, 683 struct kernel_clone_args *kargs) {} 684static inline void cgroup_task_exit(struct task_struct *p) {} 685static inline void cgroup_task_dead(struct task_struct *p) {} 686static inline void cgroup_task_release(struct task_struct *p) {} 687static inline void cgroup_task_free(struct task_struct *p) {} 688 689static inline int cgroup_init_early(void) { return 0; } 690static inline int cgroup_init(void) { return 0; } 691static inline void cgroup_init_kthreadd(void) {} 692static inline void cgroup_kthread_ready(void) {} 693 694static inline struct cgroup *cgroup_parent(struct cgroup *cgrp) 695{ 696 return NULL; 697} 698 699static inline bool cgroup_psi_enabled(void) 700{ 701 return false; 702} 703 704static inline bool task_under_cgroup_hierarchy(struct task_struct *task, 705 struct cgroup *ancestor) 706{ 707 return true; 708} 709 710static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen) 711{} 712#endif /* !CONFIG_CGROUPS */ 713 714#ifdef CONFIG_CGROUPS 715/* 716 * cgroup scalable recursive statistics. 717 */ 718void css_rstat_updated(struct cgroup_subsys_state *css, int cpu); 719void css_rstat_flush(struct cgroup_subsys_state *css); 720 721/* 722 * Basic resource stats. 723 */ 724#ifdef CONFIG_CGROUP_CPUACCT 725void cpuacct_charge(struct task_struct *tsk, u64 cputime); 726void cpuacct_account_field(struct task_struct *tsk, int index, u64 val); 727#else 728static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} 729static inline void cpuacct_account_field(struct task_struct *tsk, int index, 730 u64 val) {} 731#endif 732 733void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec); 734void __cgroup_account_cputime_field(struct cgroup *cgrp, 735 enum cpu_usage_stat index, u64 delta_exec); 736 737static inline void cgroup_account_cputime(struct task_struct *task, 738 u64 delta_exec) 739{ 740 struct cgroup *cgrp; 741 742 cpuacct_charge(task, delta_exec); 743 744 cgrp = task_dfl_cgroup(task); 745 if (cgroup_parent(cgrp)) 746 __cgroup_account_cputime(cgrp, delta_exec); 747} 748 749static inline void cgroup_account_cputime_field(struct task_struct *task, 750 enum cpu_usage_stat index, 751 u64 delta_exec) 752{ 753 struct cgroup *cgrp; 754 755 cpuacct_account_field(task, index, delta_exec); 756 757 cgrp = task_dfl_cgroup(task); 758 if (cgroup_parent(cgrp)) 759 __cgroup_account_cputime_field(cgrp, index, delta_exec); 760} 761 762#else /* CONFIG_CGROUPS */ 763 764static inline void cgroup_account_cputime(struct task_struct *task, 765 u64 delta_exec) {} 766static inline void cgroup_account_cputime_field(struct task_struct *task, 767 enum cpu_usage_stat index, 768 u64 delta_exec) {} 769 770#endif /* CONFIG_CGROUPS */ 771 772/* 773 * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data 774 * definition in cgroup-defs.h. 775 */ 776#ifdef CONFIG_SOCK_CGROUP_DATA 777 778void cgroup_sk_alloc(struct sock_cgroup_data *skcd); 779void cgroup_sk_clone(struct sock_cgroup_data *skcd); 780void cgroup_sk_free(struct sock_cgroup_data *skcd); 781 782static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) 783{ 784 return skcd->cgroup; 785} 786 787#else /* CONFIG_CGROUP_DATA */ 788 789static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {} 790static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {} 791static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {} 792 793#endif /* CONFIG_CGROUP_DATA */ 794 795#ifdef CONFIG_CGROUPS 796 797void cgroup_enter_frozen(void); 798void cgroup_leave_frozen(bool always_leave); 799void cgroup_update_frozen(struct cgroup *cgrp); 800void cgroup_freeze(struct cgroup *cgrp, bool freeze); 801void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src, 802 struct cgroup *dst); 803 804static inline bool cgroup_task_frozen(struct task_struct *task) 805{ 806 return task->frozen; 807} 808 809#else /* !CONFIG_CGROUPS */ 810 811static inline void cgroup_enter_frozen(void) { } 812static inline void cgroup_leave_frozen(bool always_leave) { } 813static inline bool cgroup_task_frozen(struct task_struct *task) 814{ 815 return false; 816} 817 818#endif /* !CONFIG_CGROUPS */ 819 820#ifdef CONFIG_CGROUP_BPF 821static inline void cgroup_bpf_get(struct cgroup *cgrp) 822{ 823 percpu_ref_get(&cgrp->bpf.refcnt); 824} 825 826static inline void cgroup_bpf_put(struct cgroup *cgrp) 827{ 828 percpu_ref_put(&cgrp->bpf.refcnt); 829} 830 831#else /* CONFIG_CGROUP_BPF */ 832 833static inline void cgroup_bpf_get(struct cgroup *cgrp) {} 834static inline void cgroup_bpf_put(struct cgroup *cgrp) {} 835 836#endif /* CONFIG_CGROUP_BPF */ 837 838struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id); 839 840struct cgroup_of_peak *of_peak(struct kernfs_open_file *of); 841 842#endif /* _LINUX_CGROUP_H */