Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_CGROUP_H
3#define _LINUX_CGROUP_H
4/*
5 * cgroup interface
6 *
7 * Copyright (C) 2003 BULL SA
8 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
9 *
10 */
11
12#include <linux/sched.h>
13#include <linux/nodemask.h>
14#include <linux/list.h>
15#include <linux/rculist.h>
16#include <linux/cgroupstats.h>
17#include <linux/fs.h>
18#include <linux/seq_file.h>
19#include <linux/kernfs.h>
20#include <linux/jump_label.h>
21#include <linux/types.h>
22#include <linux/notifier.h>
23#include <linux/ns_common.h>
24#include <linux/nsproxy.h>
25#include <linux/user_namespace.h>
26#include <linux/refcount.h>
27#include <linux/kernel_stat.h>
28
29#include <linux/cgroup-defs.h>
30
31struct kernel_clone_args;
32
33/*
34 * All weight knobs on the default hierarchy should use the following min,
35 * default and max values. The default value is the logarithmic center of
36 * MIN and MAX and allows 100x to be expressed in both directions.
37 */
38#define CGROUP_WEIGHT_MIN 1
39#define CGROUP_WEIGHT_DFL 100
40#define CGROUP_WEIGHT_MAX 10000
41
42#ifdef CONFIG_CGROUPS
43
44enum css_task_iter_flags {
45 CSS_TASK_ITER_PROCS = (1U << 0), /* walk only threadgroup leaders */
46 CSS_TASK_ITER_THREADED = (1U << 1), /* walk all threaded css_sets in the domain */
47 CSS_TASK_ITER_SKIPPED = (1U << 16), /* internal flags */
48};
49
50/* a css_task_iter should be treated as an opaque object */
51struct css_task_iter {
52 struct cgroup_subsys *ss;
53 unsigned int flags;
54
55 struct list_head *cset_pos;
56 struct list_head *cset_head;
57
58 struct list_head *tcset_pos;
59 struct list_head *tcset_head;
60
61 struct list_head *task_pos;
62
63 struct list_head *cur_tasks_head;
64 struct css_set *cur_cset;
65 struct css_set *cur_dcset;
66 struct task_struct *cur_task;
67 struct list_head iters_node; /* css_set->task_iters */
68};
69
70enum cgroup_lifetime_events {
71 CGROUP_LIFETIME_ONLINE,
72 CGROUP_LIFETIME_OFFLINE,
73};
74
75extern struct file_system_type cgroup_fs_type;
76extern struct cgroup_root cgrp_dfl_root;
77extern struct css_set init_css_set;
78extern spinlock_t css_set_lock;
79extern struct blocking_notifier_head cgroup_lifetime_notifier;
80
81#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
82#include <linux/cgroup_subsys.h>
83#undef SUBSYS
84
85#define SUBSYS(_x) \
86 extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \
87 extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
88#include <linux/cgroup_subsys.h>
89#undef SUBSYS
90
91/**
92 * cgroup_subsys_enabled - fast test on whether a subsys is enabled
93 * @ss: subsystem in question
94 */
95#define cgroup_subsys_enabled(ss) \
96 static_branch_likely(&ss ## _enabled_key)
97
98/**
99 * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy
100 * @ss: subsystem in question
101 */
102#define cgroup_subsys_on_dfl(ss) \
103 static_branch_likely(&ss ## _on_dfl_key)
104
105bool css_has_online_children(struct cgroup_subsys_state *css);
106struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
107struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
108 struct cgroup_subsys *ss);
109struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
110 struct cgroup_subsys *ss);
111struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
112 struct cgroup_subsys *ss);
113
114struct cgroup *cgroup_get_from_path(const char *path);
115struct cgroup *cgroup_get_from_fd(int fd);
116struct cgroup *cgroup_v1v2_get_from_fd(int fd);
117
118int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
119int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
120
121int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
122int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
123int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
124int cgroup_rm_cftypes(struct cftype *cfts);
125void cgroup_file_notify(struct cgroup_file *cfile);
126void cgroup_file_show(struct cgroup_file *cfile, bool show);
127
128int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
129int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
130 struct pid *pid, struct task_struct *tsk);
131
132void cgroup_fork(struct task_struct *p);
133extern int cgroup_can_fork(struct task_struct *p,
134 struct kernel_clone_args *kargs);
135extern void cgroup_cancel_fork(struct task_struct *p,
136 struct kernel_clone_args *kargs);
137extern void cgroup_post_fork(struct task_struct *p,
138 struct kernel_clone_args *kargs);
139void cgroup_exit(struct task_struct *p);
140void cgroup_release(struct task_struct *p);
141void cgroup_free(struct task_struct *p);
142
143int cgroup_init_early(void);
144int cgroup_init(void);
145
146int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v);
147
148/*
149 * Iteration helpers and macros.
150 */
151
152struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
153 struct cgroup_subsys_state *parent);
154struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
155 struct cgroup_subsys_state *css);
156struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
157struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
158 struct cgroup_subsys_state *css);
159
160struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
161 struct cgroup_subsys_state **dst_cssp);
162struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
163 struct cgroup_subsys_state **dst_cssp);
164
165void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
166 struct css_task_iter *it);
167struct task_struct *css_task_iter_next(struct css_task_iter *it);
168void css_task_iter_end(struct css_task_iter *it);
169
170/**
171 * css_for_each_child - iterate through children of a css
172 * @pos: the css * to use as the loop cursor
173 * @parent: css whose children to walk
174 *
175 * Walk @parent's children. Must be called under rcu_read_lock().
176 *
177 * If a subsystem synchronizes ->css_online() and the start of iteration, a
178 * css which finished ->css_online() is guaranteed to be visible in the
179 * future iterations and will stay visible until the last reference is put.
180 * A css which hasn't finished ->css_online() or already finished
181 * ->css_offline() may show up during traversal. It's each subsystem's
182 * responsibility to synchronize against on/offlining.
183 *
184 * It is allowed to temporarily drop RCU read lock during iteration. The
185 * caller is responsible for ensuring that @pos remains accessible until
186 * the start of the next iteration by, for example, bumping the css refcnt.
187 */
188#define css_for_each_child(pos, parent) \
189 for ((pos) = css_next_child(NULL, (parent)); (pos); \
190 (pos) = css_next_child((pos), (parent)))
191
192/**
193 * css_for_each_descendant_pre - pre-order walk of a css's descendants
194 * @pos: the css * to use as the loop cursor
195 * @root: css whose descendants to walk
196 *
197 * Walk @root's descendants. @root is included in the iteration and the
198 * first node to be visited. Must be called under rcu_read_lock().
199 *
200 * If a subsystem synchronizes ->css_online() and the start of iteration, a
201 * css which finished ->css_online() is guaranteed to be visible in the
202 * future iterations and will stay visible until the last reference is put.
203 * A css which hasn't finished ->css_online() or already finished
204 * ->css_offline() may show up during traversal. It's each subsystem's
205 * responsibility to synchronize against on/offlining.
206 *
207 * For example, the following guarantees that a descendant can't escape
208 * state updates of its ancestors.
209 *
210 * my_online(@css)
211 * {
212 * Lock @css's parent and @css;
213 * Inherit state from the parent;
214 * Unlock both.
215 * }
216 *
217 * my_update_state(@css)
218 * {
219 * css_for_each_descendant_pre(@pos, @css) {
220 * Lock @pos;
221 * if (@pos == @css)
222 * Update @css's state;
223 * else
224 * Verify @pos is alive and inherit state from its parent;
225 * Unlock @pos;
226 * }
227 * }
228 *
229 * As long as the inheriting step, including checking the parent state, is
230 * enclosed inside @pos locking, double-locking the parent isn't necessary
231 * while inheriting. The state update to the parent is guaranteed to be
232 * visible by walking order and, as long as inheriting operations to the
233 * same @pos are atomic to each other, multiple updates racing each other
234 * still result in the correct state. It's guaranateed that at least one
235 * inheritance happens for any css after the latest update to its parent.
236 *
237 * If checking parent's state requires locking the parent, each inheriting
238 * iteration should lock and unlock both @pos->parent and @pos.
239 *
240 * Alternatively, a subsystem may choose to use a single global lock to
241 * synchronize ->css_online() and ->css_offline() against tree-walking
242 * operations.
243 *
244 * It is allowed to temporarily drop RCU read lock during iteration. The
245 * caller is responsible for ensuring that @pos remains accessible until
246 * the start of the next iteration by, for example, bumping the css refcnt.
247 */
248#define css_for_each_descendant_pre(pos, css) \
249 for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
250 (pos) = css_next_descendant_pre((pos), (css)))
251
252/**
253 * css_for_each_descendant_post - post-order walk of a css's descendants
254 * @pos: the css * to use as the loop cursor
255 * @css: css whose descendants to walk
256 *
257 * Similar to css_for_each_descendant_pre() but performs post-order
258 * traversal instead. @root is included in the iteration and the last
259 * node to be visited.
260 *
261 * If a subsystem synchronizes ->css_online() and the start of iteration, a
262 * css which finished ->css_online() is guaranteed to be visible in the
263 * future iterations and will stay visible until the last reference is put.
264 * A css which hasn't finished ->css_online() or already finished
265 * ->css_offline() may show up during traversal. It's each subsystem's
266 * responsibility to synchronize against on/offlining.
267 *
268 * Note that the walk visibility guarantee example described in pre-order
269 * walk doesn't apply the same to post-order walks.
270 */
271#define css_for_each_descendant_post(pos, css) \
272 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
273 (pos) = css_next_descendant_post((pos), (css)))
274
275/**
276 * cgroup_taskset_for_each - iterate cgroup_taskset
277 * @task: the loop cursor
278 * @dst_css: the destination css
279 * @tset: taskset to iterate
280 *
281 * @tset may contain multiple tasks and they may belong to multiple
282 * processes.
283 *
284 * On the v2 hierarchy, there may be tasks from multiple processes and they
285 * may not share the source or destination csses.
286 *
287 * On traditional hierarchies, when there are multiple tasks in @tset, if a
288 * task of a process is in @tset, all tasks of the process are in @tset.
289 * Also, all are guaranteed to share the same source and destination csses.
290 *
291 * Iteration is not in any specific order.
292 */
293#define cgroup_taskset_for_each(task, dst_css, tset) \
294 for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
295 (task); \
296 (task) = cgroup_taskset_next((tset), &(dst_css)))
297
298/**
299 * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
300 * @leader: the loop cursor
301 * @dst_css: the destination css
302 * @tset: taskset to iterate
303 *
304 * Iterate threadgroup leaders of @tset. For single-task migrations, @tset
305 * may not contain any.
306 */
307#define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
308 for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
309 (leader); \
310 (leader) = cgroup_taskset_next((tset), &(dst_css))) \
311 if ((leader) != (leader)->group_leader) \
312 ; \
313 else
314
315/*
316 * Inline functions.
317 */
318
319#ifdef CONFIG_DEBUG_CGROUP_REF
320void css_get(struct cgroup_subsys_state *css);
321void css_get_many(struct cgroup_subsys_state *css, unsigned int n);
322bool css_tryget(struct cgroup_subsys_state *css);
323bool css_tryget_online(struct cgroup_subsys_state *css);
324void css_put(struct cgroup_subsys_state *css);
325void css_put_many(struct cgroup_subsys_state *css, unsigned int n);
326#else
327#define CGROUP_REF_FN_ATTRS static inline
328#define CGROUP_REF_EXPORT(fn)
329#include <linux/cgroup_refcnt.h>
330#endif
331
332static inline u64 cgroup_id(const struct cgroup *cgrp)
333{
334 return cgrp->kn->id;
335}
336
337/**
338 * css_is_dying - test whether the specified css is dying
339 * @css: target css
340 *
341 * Test whether @css is in the process of offlining or already offline. In
342 * most cases, ->css_online() and ->css_offline() callbacks should be
343 * enough; however, the actual offline operations are RCU delayed and this
344 * test returns %true also when @css is scheduled to be offlined.
345 *
346 * This is useful, for example, when the use case requires synchronous
347 * behavior with respect to cgroup removal. cgroup removal schedules css
348 * offlining but the css can seem alive while the operation is being
349 * delayed. If the delay affects user visible semantics, this test can be
350 * used to resolve the situation.
351 */
352static inline bool css_is_dying(struct cgroup_subsys_state *css)
353{
354 return css->flags & CSS_DYING;
355}
356
357static inline bool css_is_self(struct cgroup_subsys_state *css)
358{
359 if (css == &css->cgroup->self) {
360 /* cgroup::self should not have subsystem association */
361 WARN_ON(css->ss != NULL);
362 return true;
363 }
364
365 return false;
366}
367
368static inline void cgroup_get(struct cgroup *cgrp)
369{
370 css_get(&cgrp->self);
371}
372
373static inline bool cgroup_tryget(struct cgroup *cgrp)
374{
375 return css_tryget(&cgrp->self);
376}
377
378static inline void cgroup_put(struct cgroup *cgrp)
379{
380 css_put(&cgrp->self);
381}
382
383extern struct mutex cgroup_mutex;
384
385static inline void cgroup_lock(void)
386{
387 mutex_lock(&cgroup_mutex);
388}
389
390static inline void cgroup_unlock(void)
391{
392 mutex_unlock(&cgroup_mutex);
393}
394
395/**
396 * task_css_set_check - obtain a task's css_set with extra access conditions
397 * @task: the task to obtain css_set for
398 * @__c: extra condition expression to be passed to rcu_dereference_check()
399 *
400 * A task's css_set is RCU protected, initialized and exited while holding
401 * task_lock(), and can only be modified while holding both cgroup_mutex
402 * and task_lock() while the task is alive. This macro verifies that the
403 * caller is inside proper critical section and returns @task's css_set.
404 *
405 * The caller can also specify additional allowed conditions via @__c, such
406 * as locks used during the cgroup_subsys::attach() methods.
407 */
408#ifdef CONFIG_PROVE_RCU
409#define task_css_set_check(task, __c) \
410 rcu_dereference_check((task)->cgroups, \
411 rcu_read_lock_sched_held() || \
412 lockdep_is_held(&cgroup_mutex) || \
413 lockdep_is_held(&css_set_lock) || \
414 ((task)->flags & PF_EXITING) || (__c))
415#else
416#define task_css_set_check(task, __c) \
417 rcu_dereference((task)->cgroups)
418#endif
419
420/**
421 * task_css_check - obtain css for (task, subsys) w/ extra access conds
422 * @task: the target task
423 * @subsys_id: the target subsystem ID
424 * @__c: extra condition expression to be passed to rcu_dereference_check()
425 *
426 * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
427 * synchronization rules are the same as task_css_set_check().
428 */
429#define task_css_check(task, subsys_id, __c) \
430 task_css_set_check((task), (__c))->subsys[(subsys_id)]
431
432/**
433 * task_css_set - obtain a task's css_set
434 * @task: the task to obtain css_set for
435 *
436 * See task_css_set_check().
437 */
438static inline struct css_set *task_css_set(struct task_struct *task)
439{
440 return task_css_set_check(task, false);
441}
442
443/**
444 * task_css - obtain css for (task, subsys)
445 * @task: the target task
446 * @subsys_id: the target subsystem ID
447 *
448 * See task_css_check().
449 */
450static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
451 int subsys_id)
452{
453 return task_css_check(task, subsys_id, false);
454}
455
456/**
457 * task_get_css - find and get the css for (task, subsys)
458 * @task: the target task
459 * @subsys_id: the target subsystem ID
460 *
461 * Find the css for the (@task, @subsys_id) combination, increment a
462 * reference on and return it. This function is guaranteed to return a
463 * valid css. The returned css may already have been offlined.
464 */
465static inline struct cgroup_subsys_state *
466task_get_css(struct task_struct *task, int subsys_id)
467{
468 struct cgroup_subsys_state *css;
469
470 rcu_read_lock();
471 while (true) {
472 css = task_css(task, subsys_id);
473 /*
474 * Can't use css_tryget_online() here. A task which has
475 * PF_EXITING set may stay associated with an offline css.
476 * If such task calls this function, css_tryget_online()
477 * will keep failing.
478 */
479 if (likely(css_tryget(css)))
480 break;
481 cpu_relax();
482 }
483 rcu_read_unlock();
484 return css;
485}
486
487/**
488 * task_css_is_root - test whether a task belongs to the root css
489 * @task: the target task
490 * @subsys_id: the target subsystem ID
491 *
492 * Test whether @task belongs to the root css on the specified subsystem.
493 * May be invoked in any context.
494 */
495static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
496{
497 return task_css_check(task, subsys_id, true) ==
498 init_css_set.subsys[subsys_id];
499}
500
501static inline struct cgroup *task_cgroup(struct task_struct *task,
502 int subsys_id)
503{
504 return task_css(task, subsys_id)->cgroup;
505}
506
507static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
508{
509 return task_css_set(task)->dfl_cgrp;
510}
511
512static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
513{
514 struct cgroup_subsys_state *parent_css = cgrp->self.parent;
515
516 if (parent_css)
517 return container_of(parent_css, struct cgroup, self);
518 return NULL;
519}
520
521/**
522 * cgroup_is_descendant - test ancestry
523 * @cgrp: the cgroup to be tested
524 * @ancestor: possible ancestor of @cgrp
525 *
526 * Test whether @cgrp is a descendant of @ancestor. It also returns %true
527 * if @cgrp == @ancestor. This function is safe to call as long as @cgrp
528 * and @ancestor are accessible.
529 */
530static inline bool cgroup_is_descendant(struct cgroup *cgrp,
531 struct cgroup *ancestor)
532{
533 if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
534 return false;
535 return cgrp->ancestors[ancestor->level] == ancestor;
536}
537
538/**
539 * cgroup_ancestor - find ancestor of cgroup
540 * @cgrp: cgroup to find ancestor of
541 * @ancestor_level: level of ancestor to find starting from root
542 *
543 * Find ancestor of cgroup at specified level starting from root if it exists
544 * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at
545 * @ancestor_level.
546 *
547 * This function is safe to call as long as @cgrp is accessible.
548 */
549static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
550 int ancestor_level)
551{
552 if (ancestor_level < 0 || ancestor_level > cgrp->level)
553 return NULL;
554 return cgrp->ancestors[ancestor_level];
555}
556
557/**
558 * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
559 * @task: the task to be tested
560 * @ancestor: possible ancestor of @task's cgroup
561 *
562 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
563 * It follows all the same rules as cgroup_is_descendant, and only applies
564 * to the default hierarchy.
565 */
566static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
567 struct cgroup *ancestor)
568{
569 struct css_set *cset = task_css_set(task);
570
571 return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
572}
573
574/* no synchronization, the result can only be used as a hint */
575static inline bool cgroup_is_populated(struct cgroup *cgrp)
576{
577 return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
578 cgrp->nr_populated_threaded_children;
579}
580
581/* returns ino associated with a cgroup */
582static inline ino_t cgroup_ino(struct cgroup *cgrp)
583{
584 return kernfs_ino(cgrp->kn);
585}
586
587/* cft/css accessors for cftype->write() operation */
588static inline struct cftype *of_cft(struct kernfs_open_file *of)
589{
590 return of->kn->priv;
591}
592
593struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
594
595/* cft/css accessors for cftype->seq_*() operations */
596static inline struct cftype *seq_cft(struct seq_file *seq)
597{
598 return of_cft(seq->private);
599}
600
601static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
602{
603 return of_css(seq->private);
604}
605
606/*
607 * Name / path handling functions. All are thin wrappers around the kernfs
608 * counterparts and can be called under any context.
609 */
610
611static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
612{
613 return kernfs_name(cgrp->kn, buf, buflen);
614}
615
616static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
617{
618 return kernfs_path(cgrp->kn, buf, buflen);
619}
620
621static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
622{
623 pr_cont_kernfs_name(cgrp->kn);
624}
625
626static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
627{
628 pr_cont_kernfs_path(cgrp->kn);
629}
630
631bool cgroup_psi_enabled(void);
632
633static inline void cgroup_init_kthreadd(void)
634{
635 /*
636 * kthreadd is inherited by all kthreads, keep it in the root so
637 * that the new kthreads are guaranteed to stay in the root until
638 * initialization is finished.
639 */
640 current->no_cgroup_migration = 1;
641}
642
643static inline void cgroup_kthread_ready(void)
644{
645 /*
646 * This kthread finished initialization. The creator should have
647 * set PF_NO_SETAFFINITY if this kthread should stay in the root.
648 */
649 current->no_cgroup_migration = 0;
650}
651
652void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
653struct cgroup *cgroup_get_from_id(u64 id);
654#else /* !CONFIG_CGROUPS */
655
656struct cgroup_subsys_state;
657struct cgroup;
658
659static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; }
660static inline void css_get(struct cgroup_subsys_state *css) {}
661static inline void css_put(struct cgroup_subsys_state *css) {}
662static inline void cgroup_lock(void) {}
663static inline void cgroup_unlock(void) {}
664static inline int cgroup_attach_task_all(struct task_struct *from,
665 struct task_struct *t) { return 0; }
666static inline int cgroupstats_build(struct cgroupstats *stats,
667 struct dentry *dentry) { return -EINVAL; }
668
669static inline void cgroup_fork(struct task_struct *p) {}
670static inline int cgroup_can_fork(struct task_struct *p,
671 struct kernel_clone_args *kargs) { return 0; }
672static inline void cgroup_cancel_fork(struct task_struct *p,
673 struct kernel_clone_args *kargs) {}
674static inline void cgroup_post_fork(struct task_struct *p,
675 struct kernel_clone_args *kargs) {}
676static inline void cgroup_exit(struct task_struct *p) {}
677static inline void cgroup_release(struct task_struct *p) {}
678static inline void cgroup_free(struct task_struct *p) {}
679
680static inline int cgroup_init_early(void) { return 0; }
681static inline int cgroup_init(void) { return 0; }
682static inline void cgroup_init_kthreadd(void) {}
683static inline void cgroup_kthread_ready(void) {}
684
685static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
686{
687 return NULL;
688}
689
690static inline bool cgroup_psi_enabled(void)
691{
692 return false;
693}
694
695static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
696 struct cgroup *ancestor)
697{
698 return true;
699}
700
701static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
702{}
703#endif /* !CONFIG_CGROUPS */
704
705#ifdef CONFIG_CGROUPS
706/*
707 * cgroup scalable recursive statistics.
708 */
709void css_rstat_updated(struct cgroup_subsys_state *css, int cpu);
710void css_rstat_flush(struct cgroup_subsys_state *css);
711
712/*
713 * Basic resource stats.
714 */
715#ifdef CONFIG_CGROUP_CPUACCT
716void cpuacct_charge(struct task_struct *tsk, u64 cputime);
717void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
718#else
719static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
720static inline void cpuacct_account_field(struct task_struct *tsk, int index,
721 u64 val) {}
722#endif
723
724void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
725void __cgroup_account_cputime_field(struct cgroup *cgrp,
726 enum cpu_usage_stat index, u64 delta_exec);
727
728static inline void cgroup_account_cputime(struct task_struct *task,
729 u64 delta_exec)
730{
731 struct cgroup *cgrp;
732
733 cpuacct_charge(task, delta_exec);
734
735 cgrp = task_dfl_cgroup(task);
736 if (cgroup_parent(cgrp))
737 __cgroup_account_cputime(cgrp, delta_exec);
738}
739
740static inline void cgroup_account_cputime_field(struct task_struct *task,
741 enum cpu_usage_stat index,
742 u64 delta_exec)
743{
744 struct cgroup *cgrp;
745
746 cpuacct_account_field(task, index, delta_exec);
747
748 cgrp = task_dfl_cgroup(task);
749 if (cgroup_parent(cgrp))
750 __cgroup_account_cputime_field(cgrp, index, delta_exec);
751}
752
753#else /* CONFIG_CGROUPS */
754
755static inline void cgroup_account_cputime(struct task_struct *task,
756 u64 delta_exec) {}
757static inline void cgroup_account_cputime_field(struct task_struct *task,
758 enum cpu_usage_stat index,
759 u64 delta_exec) {}
760
761#endif /* CONFIG_CGROUPS */
762
763/*
764 * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
765 * definition in cgroup-defs.h.
766 */
767#ifdef CONFIG_SOCK_CGROUP_DATA
768
769void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
770void cgroup_sk_clone(struct sock_cgroup_data *skcd);
771void cgroup_sk_free(struct sock_cgroup_data *skcd);
772
773static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
774{
775 return skcd->cgroup;
776}
777
778#else /* CONFIG_CGROUP_DATA */
779
780static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
781static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {}
782static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
783
784#endif /* CONFIG_CGROUP_DATA */
785
786struct cgroup_namespace {
787 struct ns_common ns;
788 struct user_namespace *user_ns;
789 struct ucounts *ucounts;
790 struct css_set *root_cset;
791};
792
793extern struct cgroup_namespace init_cgroup_ns;
794
795#ifdef CONFIG_CGROUPS
796
797void free_cgroup_ns(struct cgroup_namespace *ns);
798
799struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
800 struct user_namespace *user_ns,
801 struct cgroup_namespace *old_ns);
802
803int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
804 struct cgroup_namespace *ns);
805
806static inline void get_cgroup_ns(struct cgroup_namespace *ns)
807{
808 refcount_inc(&ns->ns.count);
809}
810
811static inline void put_cgroup_ns(struct cgroup_namespace *ns)
812{
813 if (refcount_dec_and_test(&ns->ns.count))
814 free_cgroup_ns(ns);
815}
816
817#else /* !CONFIG_CGROUPS */
818
819static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
820static inline struct cgroup_namespace *
821copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
822 struct cgroup_namespace *old_ns)
823{
824 return old_ns;
825}
826
827static inline void get_cgroup_ns(struct cgroup_namespace *ns) { }
828static inline void put_cgroup_ns(struct cgroup_namespace *ns) { }
829
830#endif /* !CONFIG_CGROUPS */
831
832#ifdef CONFIG_CGROUPS
833
834void cgroup_enter_frozen(void);
835void cgroup_leave_frozen(bool always_leave);
836void cgroup_update_frozen(struct cgroup *cgrp);
837void cgroup_freeze(struct cgroup *cgrp, bool freeze);
838void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src,
839 struct cgroup *dst);
840
841static inline bool cgroup_task_frozen(struct task_struct *task)
842{
843 return task->frozen;
844}
845
846#else /* !CONFIG_CGROUPS */
847
848static inline void cgroup_enter_frozen(void) { }
849static inline void cgroup_leave_frozen(bool always_leave) { }
850static inline bool cgroup_task_frozen(struct task_struct *task)
851{
852 return false;
853}
854
855#endif /* !CONFIG_CGROUPS */
856
857#ifdef CONFIG_CGROUP_BPF
858static inline void cgroup_bpf_get(struct cgroup *cgrp)
859{
860 percpu_ref_get(&cgrp->bpf.refcnt);
861}
862
863static inline void cgroup_bpf_put(struct cgroup *cgrp)
864{
865 percpu_ref_put(&cgrp->bpf.refcnt);
866}
867
868#else /* CONFIG_CGROUP_BPF */
869
870static inline void cgroup_bpf_get(struct cgroup *cgrp) {}
871static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
872
873#endif /* CONFIG_CGROUP_BPF */
874
875struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id);
876
877struct cgroup_of_peak *of_peak(struct kernfs_open_file *of);
878
879#endif /* _LINUX_CGROUP_H */