Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_CGROUP_H
3#define _LINUX_CGROUP_H
4/*
5 * cgroup interface
6 *
7 * Copyright (C) 2003 BULL SA
8 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
9 *
10 */
11
12#include <linux/sched.h>
13#include <linux/cpumask.h>
14#include <linux/nodemask.h>
15#include <linux/rculist.h>
16#include <linux/cgroupstats.h>
17#include <linux/fs.h>
18#include <linux/seq_file.h>
19#include <linux/kernfs.h>
20#include <linux/jump_label.h>
21#include <linux/types.h>
22#include <linux/ns_common.h>
23#include <linux/nsproxy.h>
24#include <linux/user_namespace.h>
25#include <linux/refcount.h>
26#include <linux/kernel_stat.h>
27
28#include <linux/cgroup-defs.h>
29
30struct kernel_clone_args;
31
32#ifdef CONFIG_CGROUPS
33
34/*
35 * All weight knobs on the default hierarchy should use the following min,
36 * default and max values. The default value is the logarithmic center of
37 * MIN and MAX and allows 100x to be expressed in both directions.
38 */
39#define CGROUP_WEIGHT_MIN 1
40#define CGROUP_WEIGHT_DFL 100
41#define CGROUP_WEIGHT_MAX 10000
42
43/* walk only threadgroup leaders */
44#define CSS_TASK_ITER_PROCS (1U << 0)
45/* walk all threaded css_sets in the domain */
46#define CSS_TASK_ITER_THREADED (1U << 1)
47
48/* internal flags */
49#define CSS_TASK_ITER_SKIPPED (1U << 16)
50
51/* a css_task_iter should be treated as an opaque object */
52struct css_task_iter {
53 struct cgroup_subsys *ss;
54 unsigned int flags;
55
56 struct list_head *cset_pos;
57 struct list_head *cset_head;
58
59 struct list_head *tcset_pos;
60 struct list_head *tcset_head;
61
62 struct list_head *task_pos;
63
64 struct list_head *cur_tasks_head;
65 struct css_set *cur_cset;
66 struct css_set *cur_dcset;
67 struct task_struct *cur_task;
68 struct list_head iters_node; /* css_set->task_iters */
69};
70
71extern struct file_system_type cgroup_fs_type;
72extern struct cgroup_root cgrp_dfl_root;
73extern struct css_set init_css_set;
74
75#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
76#include <linux/cgroup_subsys.h>
77#undef SUBSYS
78
79#define SUBSYS(_x) \
80 extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \
81 extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
82#include <linux/cgroup_subsys.h>
83#undef SUBSYS
84
85/**
86 * cgroup_subsys_enabled - fast test on whether a subsys is enabled
87 * @ss: subsystem in question
88 */
89#define cgroup_subsys_enabled(ss) \
90 static_branch_likely(&ss ## _enabled_key)
91
92/**
93 * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy
94 * @ss: subsystem in question
95 */
96#define cgroup_subsys_on_dfl(ss) \
97 static_branch_likely(&ss ## _on_dfl_key)
98
99bool css_has_online_children(struct cgroup_subsys_state *css);
100struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
101struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
102 struct cgroup_subsys *ss);
103struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
104 struct cgroup_subsys *ss);
105struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
106 struct cgroup_subsys *ss);
107
108struct cgroup *cgroup_get_from_path(const char *path);
109struct cgroup *cgroup_get_from_fd(int fd);
110struct cgroup *cgroup_v1v2_get_from_fd(int fd);
111
112int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
113int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
114
115int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
116int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
117int cgroup_rm_cftypes(struct cftype *cfts);
118void cgroup_file_notify(struct cgroup_file *cfile);
119void cgroup_file_show(struct cgroup_file *cfile, bool show);
120
121int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
122int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
123int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
124 struct pid *pid, struct task_struct *tsk);
125
126void cgroup_fork(struct task_struct *p);
127extern int cgroup_can_fork(struct task_struct *p,
128 struct kernel_clone_args *kargs);
129extern void cgroup_cancel_fork(struct task_struct *p,
130 struct kernel_clone_args *kargs);
131extern void cgroup_post_fork(struct task_struct *p,
132 struct kernel_clone_args *kargs);
133void cgroup_exit(struct task_struct *p);
134void cgroup_release(struct task_struct *p);
135void cgroup_free(struct task_struct *p);
136
137int cgroup_init_early(void);
138int cgroup_init(void);
139
140int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v);
141
142/*
143 * Iteration helpers and macros.
144 */
145
146struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
147 struct cgroup_subsys_state *parent);
148struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
149 struct cgroup_subsys_state *css);
150struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
151struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
152 struct cgroup_subsys_state *css);
153
154struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
155 struct cgroup_subsys_state **dst_cssp);
156struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
157 struct cgroup_subsys_state **dst_cssp);
158
159void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
160 struct css_task_iter *it);
161struct task_struct *css_task_iter_next(struct css_task_iter *it);
162void css_task_iter_end(struct css_task_iter *it);
163
164/**
165 * css_for_each_child - iterate through children of a css
166 * @pos: the css * to use as the loop cursor
167 * @parent: css whose children to walk
168 *
169 * Walk @parent's children. Must be called under rcu_read_lock().
170 *
171 * If a subsystem synchronizes ->css_online() and the start of iteration, a
172 * css which finished ->css_online() is guaranteed to be visible in the
173 * future iterations and will stay visible until the last reference is put.
174 * A css which hasn't finished ->css_online() or already finished
175 * ->css_offline() may show up during traversal. It's each subsystem's
176 * responsibility to synchronize against on/offlining.
177 *
178 * It is allowed to temporarily drop RCU read lock during iteration. The
179 * caller is responsible for ensuring that @pos remains accessible until
180 * the start of the next iteration by, for example, bumping the css refcnt.
181 */
182#define css_for_each_child(pos, parent) \
183 for ((pos) = css_next_child(NULL, (parent)); (pos); \
184 (pos) = css_next_child((pos), (parent)))
185
186/**
187 * css_for_each_descendant_pre - pre-order walk of a css's descendants
188 * @pos: the css * to use as the loop cursor
189 * @root: css whose descendants to walk
190 *
191 * Walk @root's descendants. @root is included in the iteration and the
192 * first node to be visited. Must be called under rcu_read_lock().
193 *
194 * If a subsystem synchronizes ->css_online() and the start of iteration, a
195 * css which finished ->css_online() is guaranteed to be visible in the
196 * future iterations and will stay visible until the last reference is put.
197 * A css which hasn't finished ->css_online() or already finished
198 * ->css_offline() may show up during traversal. It's each subsystem's
199 * responsibility to synchronize against on/offlining.
200 *
201 * For example, the following guarantees that a descendant can't escape
202 * state updates of its ancestors.
203 *
204 * my_online(@css)
205 * {
206 * Lock @css's parent and @css;
207 * Inherit state from the parent;
208 * Unlock both.
209 * }
210 *
211 * my_update_state(@css)
212 * {
213 * css_for_each_descendant_pre(@pos, @css) {
214 * Lock @pos;
215 * if (@pos == @css)
216 * Update @css's state;
217 * else
218 * Verify @pos is alive and inherit state from its parent;
219 * Unlock @pos;
220 * }
221 * }
222 *
223 * As long as the inheriting step, including checking the parent state, is
224 * enclosed inside @pos locking, double-locking the parent isn't necessary
225 * while inheriting. The state update to the parent is guaranteed to be
226 * visible by walking order and, as long as inheriting operations to the
227 * same @pos are atomic to each other, multiple updates racing each other
228 * still result in the correct state. It's guaranateed that at least one
229 * inheritance happens for any css after the latest update to its parent.
230 *
231 * If checking parent's state requires locking the parent, each inheriting
232 * iteration should lock and unlock both @pos->parent and @pos.
233 *
234 * Alternatively, a subsystem may choose to use a single global lock to
235 * synchronize ->css_online() and ->css_offline() against tree-walking
236 * operations.
237 *
238 * It is allowed to temporarily drop RCU read lock during iteration. The
239 * caller is responsible for ensuring that @pos remains accessible until
240 * the start of the next iteration by, for example, bumping the css refcnt.
241 */
242#define css_for_each_descendant_pre(pos, css) \
243 for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
244 (pos) = css_next_descendant_pre((pos), (css)))
245
246/**
247 * css_for_each_descendant_post - post-order walk of a css's descendants
248 * @pos: the css * to use as the loop cursor
249 * @css: css whose descendants to walk
250 *
251 * Similar to css_for_each_descendant_pre() but performs post-order
252 * traversal instead. @root is included in the iteration and the last
253 * node to be visited.
254 *
255 * If a subsystem synchronizes ->css_online() and the start of iteration, a
256 * css which finished ->css_online() is guaranteed to be visible in the
257 * future iterations and will stay visible until the last reference is put.
258 * A css which hasn't finished ->css_online() or already finished
259 * ->css_offline() may show up during traversal. It's each subsystem's
260 * responsibility to synchronize against on/offlining.
261 *
262 * Note that the walk visibility guarantee example described in pre-order
263 * walk doesn't apply the same to post-order walks.
264 */
265#define css_for_each_descendant_post(pos, css) \
266 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
267 (pos) = css_next_descendant_post((pos), (css)))
268
269/**
270 * cgroup_taskset_for_each - iterate cgroup_taskset
271 * @task: the loop cursor
272 * @dst_css: the destination css
273 * @tset: taskset to iterate
274 *
275 * @tset may contain multiple tasks and they may belong to multiple
276 * processes.
277 *
278 * On the v2 hierarchy, there may be tasks from multiple processes and they
279 * may not share the source or destination csses.
280 *
281 * On traditional hierarchies, when there are multiple tasks in @tset, if a
282 * task of a process is in @tset, all tasks of the process are in @tset.
283 * Also, all are guaranteed to share the same source and destination csses.
284 *
285 * Iteration is not in any specific order.
286 */
287#define cgroup_taskset_for_each(task, dst_css, tset) \
288 for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
289 (task); \
290 (task) = cgroup_taskset_next((tset), &(dst_css)))
291
292/**
293 * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
294 * @leader: the loop cursor
295 * @dst_css: the destination css
296 * @tset: taskset to iterate
297 *
298 * Iterate threadgroup leaders of @tset. For single-task migrations, @tset
299 * may not contain any.
300 */
301#define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
302 for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
303 (leader); \
304 (leader) = cgroup_taskset_next((tset), &(dst_css))) \
305 if ((leader) != (leader)->group_leader) \
306 ; \
307 else
308
309/*
310 * Inline functions.
311 */
312
313static inline u64 cgroup_id(const struct cgroup *cgrp)
314{
315 return cgrp->kn->id;
316}
317
318/**
319 * css_get - obtain a reference on the specified css
320 * @css: target css
321 *
322 * The caller must already have a reference.
323 */
324static inline void css_get(struct cgroup_subsys_state *css)
325{
326 if (!(css->flags & CSS_NO_REF))
327 percpu_ref_get(&css->refcnt);
328}
329
330/**
331 * css_get_many - obtain references on the specified css
332 * @css: target css
333 * @n: number of references to get
334 *
335 * The caller must already have a reference.
336 */
337static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
338{
339 if (!(css->flags & CSS_NO_REF))
340 percpu_ref_get_many(&css->refcnt, n);
341}
342
343/**
344 * css_tryget - try to obtain a reference on the specified css
345 * @css: target css
346 *
347 * Obtain a reference on @css unless it already has reached zero and is
348 * being released. This function doesn't care whether @css is on or
349 * offline. The caller naturally needs to ensure that @css is accessible
350 * but doesn't have to be holding a reference on it - IOW, RCU protected
351 * access is good enough for this function. Returns %true if a reference
352 * count was successfully obtained; %false otherwise.
353 */
354static inline bool css_tryget(struct cgroup_subsys_state *css)
355{
356 if (!(css->flags & CSS_NO_REF))
357 return percpu_ref_tryget(&css->refcnt);
358 return true;
359}
360
361/**
362 * css_tryget_online - try to obtain a reference on the specified css if online
363 * @css: target css
364 *
365 * Obtain a reference on @css if it's online. The caller naturally needs
366 * to ensure that @css is accessible but doesn't have to be holding a
367 * reference on it - IOW, RCU protected access is good enough for this
368 * function. Returns %true if a reference count was successfully obtained;
369 * %false otherwise.
370 */
371static inline bool css_tryget_online(struct cgroup_subsys_state *css)
372{
373 if (!(css->flags & CSS_NO_REF))
374 return percpu_ref_tryget_live(&css->refcnt);
375 return true;
376}
377
378/**
379 * css_is_dying - test whether the specified css is dying
380 * @css: target css
381 *
382 * Test whether @css is in the process of offlining or already offline. In
383 * most cases, ->css_online() and ->css_offline() callbacks should be
384 * enough; however, the actual offline operations are RCU delayed and this
385 * test returns %true also when @css is scheduled to be offlined.
386 *
387 * This is useful, for example, when the use case requires synchronous
388 * behavior with respect to cgroup removal. cgroup removal schedules css
389 * offlining but the css can seem alive while the operation is being
390 * delayed. If the delay affects user visible semantics, this test can be
391 * used to resolve the situation.
392 */
393static inline bool css_is_dying(struct cgroup_subsys_state *css)
394{
395 return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
396}
397
398/**
399 * css_put - put a css reference
400 * @css: target css
401 *
402 * Put a reference obtained via css_get() and css_tryget_online().
403 */
404static inline void css_put(struct cgroup_subsys_state *css)
405{
406 if (!(css->flags & CSS_NO_REF))
407 percpu_ref_put(&css->refcnt);
408}
409
410/**
411 * css_put_many - put css references
412 * @css: target css
413 * @n: number of references to put
414 *
415 * Put references obtained via css_get() and css_tryget_online().
416 */
417static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
418{
419 if (!(css->flags & CSS_NO_REF))
420 percpu_ref_put_many(&css->refcnt, n);
421}
422
423static inline void cgroup_get(struct cgroup *cgrp)
424{
425 css_get(&cgrp->self);
426}
427
428static inline bool cgroup_tryget(struct cgroup *cgrp)
429{
430 return css_tryget(&cgrp->self);
431}
432
433static inline void cgroup_put(struct cgroup *cgrp)
434{
435 css_put(&cgrp->self);
436}
437
438extern struct mutex cgroup_mutex;
439
440static inline void cgroup_lock(void)
441{
442 mutex_lock(&cgroup_mutex);
443}
444
445static inline void cgroup_unlock(void)
446{
447 mutex_unlock(&cgroup_mutex);
448}
449
450/**
451 * task_css_set_check - obtain a task's css_set with extra access conditions
452 * @task: the task to obtain css_set for
453 * @__c: extra condition expression to be passed to rcu_dereference_check()
454 *
455 * A task's css_set is RCU protected, initialized and exited while holding
456 * task_lock(), and can only be modified while holding both cgroup_mutex
457 * and task_lock() while the task is alive. This macro verifies that the
458 * caller is inside proper critical section and returns @task's css_set.
459 *
460 * The caller can also specify additional allowed conditions via @__c, such
461 * as locks used during the cgroup_subsys::attach() methods.
462 */
463#ifdef CONFIG_PROVE_RCU
464extern spinlock_t css_set_lock;
465#define task_css_set_check(task, __c) \
466 rcu_dereference_check((task)->cgroups, \
467 rcu_read_lock_sched_held() || \
468 lockdep_is_held(&cgroup_mutex) || \
469 lockdep_is_held(&css_set_lock) || \
470 ((task)->flags & PF_EXITING) || (__c))
471#else
472#define task_css_set_check(task, __c) \
473 rcu_dereference((task)->cgroups)
474#endif
475
476/**
477 * task_css_check - obtain css for (task, subsys) w/ extra access conds
478 * @task: the target task
479 * @subsys_id: the target subsystem ID
480 * @__c: extra condition expression to be passed to rcu_dereference_check()
481 *
482 * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
483 * synchronization rules are the same as task_css_set_check().
484 */
485#define task_css_check(task, subsys_id, __c) \
486 task_css_set_check((task), (__c))->subsys[(subsys_id)]
487
488/**
489 * task_css_set - obtain a task's css_set
490 * @task: the task to obtain css_set for
491 *
492 * See task_css_set_check().
493 */
494static inline struct css_set *task_css_set(struct task_struct *task)
495{
496 return task_css_set_check(task, false);
497}
498
499/**
500 * task_css - obtain css for (task, subsys)
501 * @task: the target task
502 * @subsys_id: the target subsystem ID
503 *
504 * See task_css_check().
505 */
506static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
507 int subsys_id)
508{
509 return task_css_check(task, subsys_id, false);
510}
511
512/**
513 * task_get_css - find and get the css for (task, subsys)
514 * @task: the target task
515 * @subsys_id: the target subsystem ID
516 *
517 * Find the css for the (@task, @subsys_id) combination, increment a
518 * reference on and return it. This function is guaranteed to return a
519 * valid css. The returned css may already have been offlined.
520 */
521static inline struct cgroup_subsys_state *
522task_get_css(struct task_struct *task, int subsys_id)
523{
524 struct cgroup_subsys_state *css;
525
526 rcu_read_lock();
527 while (true) {
528 css = task_css(task, subsys_id);
529 /*
530 * Can't use css_tryget_online() here. A task which has
531 * PF_EXITING set may stay associated with an offline css.
532 * If such task calls this function, css_tryget_online()
533 * will keep failing.
534 */
535 if (likely(css_tryget(css)))
536 break;
537 cpu_relax();
538 }
539 rcu_read_unlock();
540 return css;
541}
542
543/**
544 * task_css_is_root - test whether a task belongs to the root css
545 * @task: the target task
546 * @subsys_id: the target subsystem ID
547 *
548 * Test whether @task belongs to the root css on the specified subsystem.
549 * May be invoked in any context.
550 */
551static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
552{
553 return task_css_check(task, subsys_id, true) ==
554 init_css_set.subsys[subsys_id];
555}
556
557static inline struct cgroup *task_cgroup(struct task_struct *task,
558 int subsys_id)
559{
560 return task_css(task, subsys_id)->cgroup;
561}
562
563static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
564{
565 return task_css_set(task)->dfl_cgrp;
566}
567
568static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
569{
570 struct cgroup_subsys_state *parent_css = cgrp->self.parent;
571
572 if (parent_css)
573 return container_of(parent_css, struct cgroup, self);
574 return NULL;
575}
576
577/**
578 * cgroup_is_descendant - test ancestry
579 * @cgrp: the cgroup to be tested
580 * @ancestor: possible ancestor of @cgrp
581 *
582 * Test whether @cgrp is a descendant of @ancestor. It also returns %true
583 * if @cgrp == @ancestor. This function is safe to call as long as @cgrp
584 * and @ancestor are accessible.
585 */
586static inline bool cgroup_is_descendant(struct cgroup *cgrp,
587 struct cgroup *ancestor)
588{
589 if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
590 return false;
591 return cgrp->ancestors[ancestor->level] == ancestor;
592}
593
594/**
595 * cgroup_ancestor - find ancestor of cgroup
596 * @cgrp: cgroup to find ancestor of
597 * @ancestor_level: level of ancestor to find starting from root
598 *
599 * Find ancestor of cgroup at specified level starting from root if it exists
600 * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at
601 * @ancestor_level.
602 *
603 * This function is safe to call as long as @cgrp is accessible.
604 */
605static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
606 int ancestor_level)
607{
608 if (ancestor_level < 0 || ancestor_level > cgrp->level)
609 return NULL;
610 return cgrp->ancestors[ancestor_level];
611}
612
613/**
614 * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
615 * @task: the task to be tested
616 * @ancestor: possible ancestor of @task's cgroup
617 *
618 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
619 * It follows all the same rules as cgroup_is_descendant, and only applies
620 * to the default hierarchy.
621 */
622static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
623 struct cgroup *ancestor)
624{
625 struct css_set *cset = task_css_set(task);
626
627 return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
628}
629
630/* no synchronization, the result can only be used as a hint */
631static inline bool cgroup_is_populated(struct cgroup *cgrp)
632{
633 return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
634 cgrp->nr_populated_threaded_children;
635}
636
637/* returns ino associated with a cgroup */
638static inline ino_t cgroup_ino(struct cgroup *cgrp)
639{
640 return kernfs_ino(cgrp->kn);
641}
642
643/* cft/css accessors for cftype->write() operation */
644static inline struct cftype *of_cft(struct kernfs_open_file *of)
645{
646 return of->kn->priv;
647}
648
649struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
650
651/* cft/css accessors for cftype->seq_*() operations */
652static inline struct cftype *seq_cft(struct seq_file *seq)
653{
654 return of_cft(seq->private);
655}
656
657static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
658{
659 return of_css(seq->private);
660}
661
662/*
663 * Name / path handling functions. All are thin wrappers around the kernfs
664 * counterparts and can be called under any context.
665 */
666
667static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
668{
669 return kernfs_name(cgrp->kn, buf, buflen);
670}
671
672static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
673{
674 return kernfs_path(cgrp->kn, buf, buflen);
675}
676
677static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
678{
679 pr_cont_kernfs_name(cgrp->kn);
680}
681
682static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
683{
684 pr_cont_kernfs_path(cgrp->kn);
685}
686
687bool cgroup_psi_enabled(void);
688
689static inline void cgroup_init_kthreadd(void)
690{
691 /*
692 * kthreadd is inherited by all kthreads, keep it in the root so
693 * that the new kthreads are guaranteed to stay in the root until
694 * initialization is finished.
695 */
696 current->no_cgroup_migration = 1;
697}
698
699static inline void cgroup_kthread_ready(void)
700{
701 /*
702 * This kthread finished initialization. The creator should have
703 * set PF_NO_SETAFFINITY if this kthread should stay in the root.
704 */
705 current->no_cgroup_migration = 0;
706}
707
708void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
709struct cgroup *cgroup_get_from_id(u64 id);
710#else /* !CONFIG_CGROUPS */
711
712struct cgroup_subsys_state;
713struct cgroup;
714
715static inline u64 cgroup_id(const struct cgroup *cgrp) { return 1; }
716static inline void css_get(struct cgroup_subsys_state *css) {}
717static inline void css_put(struct cgroup_subsys_state *css) {}
718static inline void cgroup_lock(void) {}
719static inline void cgroup_unlock(void) {}
720static inline int cgroup_attach_task_all(struct task_struct *from,
721 struct task_struct *t) { return 0; }
722static inline int cgroupstats_build(struct cgroupstats *stats,
723 struct dentry *dentry) { return -EINVAL; }
724
725static inline void cgroup_fork(struct task_struct *p) {}
726static inline int cgroup_can_fork(struct task_struct *p,
727 struct kernel_clone_args *kargs) { return 0; }
728static inline void cgroup_cancel_fork(struct task_struct *p,
729 struct kernel_clone_args *kargs) {}
730static inline void cgroup_post_fork(struct task_struct *p,
731 struct kernel_clone_args *kargs) {}
732static inline void cgroup_exit(struct task_struct *p) {}
733static inline void cgroup_release(struct task_struct *p) {}
734static inline void cgroup_free(struct task_struct *p) {}
735
736static inline int cgroup_init_early(void) { return 0; }
737static inline int cgroup_init(void) { return 0; }
738static inline void cgroup_init_kthreadd(void) {}
739static inline void cgroup_kthread_ready(void) {}
740
741static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
742{
743 return NULL;
744}
745
746static inline bool cgroup_psi_enabled(void)
747{
748 return false;
749}
750
751static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
752 struct cgroup *ancestor)
753{
754 return true;
755}
756
757static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
758{}
759#endif /* !CONFIG_CGROUPS */
760
761#ifdef CONFIG_CGROUPS
762/*
763 * cgroup scalable recursive statistics.
764 */
765void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
766void cgroup_rstat_flush(struct cgroup *cgrp);
767void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
768void cgroup_rstat_flush_hold(struct cgroup *cgrp);
769void cgroup_rstat_flush_release(void);
770
771/*
772 * Basic resource stats.
773 */
774#ifdef CONFIG_CGROUP_CPUACCT
775void cpuacct_charge(struct task_struct *tsk, u64 cputime);
776void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
777#else
778static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
779static inline void cpuacct_account_field(struct task_struct *tsk, int index,
780 u64 val) {}
781#endif
782
783void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
784void __cgroup_account_cputime_field(struct cgroup *cgrp,
785 enum cpu_usage_stat index, u64 delta_exec);
786
787static inline void cgroup_account_cputime(struct task_struct *task,
788 u64 delta_exec)
789{
790 struct cgroup *cgrp;
791
792 cpuacct_charge(task, delta_exec);
793
794 cgrp = task_dfl_cgroup(task);
795 if (cgroup_parent(cgrp))
796 __cgroup_account_cputime(cgrp, delta_exec);
797}
798
799static inline void cgroup_account_cputime_field(struct task_struct *task,
800 enum cpu_usage_stat index,
801 u64 delta_exec)
802{
803 struct cgroup *cgrp;
804
805 cpuacct_account_field(task, index, delta_exec);
806
807 cgrp = task_dfl_cgroup(task);
808 if (cgroup_parent(cgrp))
809 __cgroup_account_cputime_field(cgrp, index, delta_exec);
810}
811
812#else /* CONFIG_CGROUPS */
813
814static inline void cgroup_account_cputime(struct task_struct *task,
815 u64 delta_exec) {}
816static inline void cgroup_account_cputime_field(struct task_struct *task,
817 enum cpu_usage_stat index,
818 u64 delta_exec) {}
819
820#endif /* CONFIG_CGROUPS */
821
822/*
823 * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
824 * definition in cgroup-defs.h.
825 */
826#ifdef CONFIG_SOCK_CGROUP_DATA
827
828void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
829void cgroup_sk_clone(struct sock_cgroup_data *skcd);
830void cgroup_sk_free(struct sock_cgroup_data *skcd);
831
832static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
833{
834 return skcd->cgroup;
835}
836
837#else /* CONFIG_CGROUP_DATA */
838
839static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
840static inline void cgroup_sk_clone(struct sock_cgroup_data *skcd) {}
841static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
842
843#endif /* CONFIG_CGROUP_DATA */
844
845struct cgroup_namespace {
846 struct ns_common ns;
847 struct user_namespace *user_ns;
848 struct ucounts *ucounts;
849 struct css_set *root_cset;
850};
851
852extern struct cgroup_namespace init_cgroup_ns;
853
854#ifdef CONFIG_CGROUPS
855
856void free_cgroup_ns(struct cgroup_namespace *ns);
857
858struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
859 struct user_namespace *user_ns,
860 struct cgroup_namespace *old_ns);
861
862int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
863 struct cgroup_namespace *ns);
864
865#else /* !CONFIG_CGROUPS */
866
867static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
868static inline struct cgroup_namespace *
869copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
870 struct cgroup_namespace *old_ns)
871{
872 return old_ns;
873}
874
875#endif /* !CONFIG_CGROUPS */
876
877static inline void get_cgroup_ns(struct cgroup_namespace *ns)
878{
879 if (ns)
880 refcount_inc(&ns->ns.count);
881}
882
883static inline void put_cgroup_ns(struct cgroup_namespace *ns)
884{
885 if (ns && refcount_dec_and_test(&ns->ns.count))
886 free_cgroup_ns(ns);
887}
888
889#ifdef CONFIG_CGROUPS
890
891void cgroup_enter_frozen(void);
892void cgroup_leave_frozen(bool always_leave);
893void cgroup_update_frozen(struct cgroup *cgrp);
894void cgroup_freeze(struct cgroup *cgrp, bool freeze);
895void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src,
896 struct cgroup *dst);
897
898static inline bool cgroup_task_frozen(struct task_struct *task)
899{
900 return task->frozen;
901}
902
903#else /* !CONFIG_CGROUPS */
904
905static inline void cgroup_enter_frozen(void) { }
906static inline void cgroup_leave_frozen(bool always_leave) { }
907static inline bool cgroup_task_frozen(struct task_struct *task)
908{
909 return false;
910}
911
912#endif /* !CONFIG_CGROUPS */
913
914#ifdef CONFIG_CGROUP_BPF
915static inline void cgroup_bpf_get(struct cgroup *cgrp)
916{
917 percpu_ref_get(&cgrp->bpf.refcnt);
918}
919
920static inline void cgroup_bpf_put(struct cgroup *cgrp)
921{
922 percpu_ref_put(&cgrp->bpf.refcnt);
923}
924
925#else /* CONFIG_CGROUP_BPF */
926
927static inline void cgroup_bpf_get(struct cgroup *cgrp) {}
928static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
929
930#endif /* CONFIG_CGROUP_BPF */
931
932#endif /* _LINUX_CGROUP_H */