Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_CGROUP_H
3#define _LINUX_CGROUP_H
4/*
5 * cgroup interface
6 *
7 * Copyright (C) 2003 BULL SA
8 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
9 *
10 */
11
12#include <linux/sched.h>
13#include <linux/cpumask.h>
14#include <linux/nodemask.h>
15#include <linux/rculist.h>
16#include <linux/cgroupstats.h>
17#include <linux/fs.h>
18#include <linux/seq_file.h>
19#include <linux/kernfs.h>
20#include <linux/jump_label.h>
21#include <linux/types.h>
22#include <linux/ns_common.h>
23#include <linux/nsproxy.h>
24#include <linux/user_namespace.h>
25#include <linux/refcount.h>
26#include <linux/kernel_stat.h>
27
28#include <linux/cgroup-defs.h>
29
30#ifdef CONFIG_CGROUPS
31
32/*
33 * All weight knobs on the default hierarhcy should use the following min,
34 * default and max values. The default value is the logarithmic center of
35 * MIN and MAX and allows 100x to be expressed in both directions.
36 */
37#define CGROUP_WEIGHT_MIN 1
38#define CGROUP_WEIGHT_DFL 100
39#define CGROUP_WEIGHT_MAX 10000
40
41/* walk only threadgroup leaders */
42#define CSS_TASK_ITER_PROCS (1U << 0)
43/* walk all threaded css_sets in the domain */
44#define CSS_TASK_ITER_THREADED (1U << 1)
45
46/* internal flags */
47#define CSS_TASK_ITER_SKIPPED (1U << 16)
48
49/* a css_task_iter should be treated as an opaque object */
50struct css_task_iter {
51 struct cgroup_subsys *ss;
52 unsigned int flags;
53
54 struct list_head *cset_pos;
55 struct list_head *cset_head;
56
57 struct list_head *tcset_pos;
58 struct list_head *tcset_head;
59
60 struct list_head *task_pos;
61 struct list_head *tasks_head;
62 struct list_head *mg_tasks_head;
63 struct list_head *dying_tasks_head;
64
65 struct css_set *cur_cset;
66 struct css_set *cur_dcset;
67 struct task_struct *cur_task;
68 struct list_head iters_node; /* css_set->task_iters */
69};
70
71extern struct cgroup_root cgrp_dfl_root;
72extern struct css_set init_css_set;
73
74#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys;
75#include <linux/cgroup_subsys.h>
76#undef SUBSYS
77
78#define SUBSYS(_x) \
79 extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \
80 extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key;
81#include <linux/cgroup_subsys.h>
82#undef SUBSYS
83
84/**
85 * cgroup_subsys_enabled - fast test on whether a subsys is enabled
86 * @ss: subsystem in question
87 */
88#define cgroup_subsys_enabled(ss) \
89 static_branch_likely(&ss ## _enabled_key)
90
91/**
92 * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy
93 * @ss: subsystem in question
94 */
95#define cgroup_subsys_on_dfl(ss) \
96 static_branch_likely(&ss ## _on_dfl_key)
97
98bool css_has_online_children(struct cgroup_subsys_state *css);
99struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss);
100struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgroup,
101 struct cgroup_subsys *ss);
102struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
103 struct cgroup_subsys *ss);
104struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
105 struct cgroup_subsys *ss);
106
107struct cgroup *cgroup_get_from_path(const char *path);
108struct cgroup *cgroup_get_from_fd(int fd);
109
110int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
111int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
112
113int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
114int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
115int cgroup_rm_cftypes(struct cftype *cfts);
116void cgroup_file_notify(struct cgroup_file *cfile);
117
118int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
119int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry);
120int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
121 struct pid *pid, struct task_struct *tsk);
122
123void cgroup_fork(struct task_struct *p);
124extern int cgroup_can_fork(struct task_struct *p);
125extern void cgroup_cancel_fork(struct task_struct *p);
126extern void cgroup_post_fork(struct task_struct *p);
127void cgroup_exit(struct task_struct *p);
128void cgroup_release(struct task_struct *p);
129void cgroup_free(struct task_struct *p);
130
131int cgroup_init_early(void);
132int cgroup_init(void);
133
134int cgroup_parse_float(const char *input, unsigned dec_shift, s64 *v);
135
136/*
137 * Iteration helpers and macros.
138 */
139
140struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
141 struct cgroup_subsys_state *parent);
142struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos,
143 struct cgroup_subsys_state *css);
144struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos);
145struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos,
146 struct cgroup_subsys_state *css);
147
148struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
149 struct cgroup_subsys_state **dst_cssp);
150struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
151 struct cgroup_subsys_state **dst_cssp);
152
153void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
154 struct css_task_iter *it);
155struct task_struct *css_task_iter_next(struct css_task_iter *it);
156void css_task_iter_end(struct css_task_iter *it);
157
158/**
159 * css_for_each_child - iterate through children of a css
160 * @pos: the css * to use as the loop cursor
161 * @parent: css whose children to walk
162 *
163 * Walk @parent's children. Must be called under rcu_read_lock().
164 *
165 * If a subsystem synchronizes ->css_online() and the start of iteration, a
166 * css which finished ->css_online() is guaranteed to be visible in the
167 * future iterations and will stay visible until the last reference is put.
168 * A css which hasn't finished ->css_online() or already finished
169 * ->css_offline() may show up during traversal. It's each subsystem's
170 * responsibility to synchronize against on/offlining.
171 *
172 * It is allowed to temporarily drop RCU read lock during iteration. The
173 * caller is responsible for ensuring that @pos remains accessible until
174 * the start of the next iteration by, for example, bumping the css refcnt.
175 */
176#define css_for_each_child(pos, parent) \
177 for ((pos) = css_next_child(NULL, (parent)); (pos); \
178 (pos) = css_next_child((pos), (parent)))
179
180/**
181 * css_for_each_descendant_pre - pre-order walk of a css's descendants
182 * @pos: the css * to use as the loop cursor
183 * @root: css whose descendants to walk
184 *
185 * Walk @root's descendants. @root is included in the iteration and the
186 * first node to be visited. Must be called under rcu_read_lock().
187 *
188 * If a subsystem synchronizes ->css_online() and the start of iteration, a
189 * css which finished ->css_online() is guaranteed to be visible in the
190 * future iterations and will stay visible until the last reference is put.
191 * A css which hasn't finished ->css_online() or already finished
192 * ->css_offline() may show up during traversal. It's each subsystem's
193 * responsibility to synchronize against on/offlining.
194 *
195 * For example, the following guarantees that a descendant can't escape
196 * state updates of its ancestors.
197 *
198 * my_online(@css)
199 * {
200 * Lock @css's parent and @css;
201 * Inherit state from the parent;
202 * Unlock both.
203 * }
204 *
205 * my_update_state(@css)
206 * {
207 * css_for_each_descendant_pre(@pos, @css) {
208 * Lock @pos;
209 * if (@pos == @css)
210 * Update @css's state;
211 * else
212 * Verify @pos is alive and inherit state from its parent;
213 * Unlock @pos;
214 * }
215 * }
216 *
217 * As long as the inheriting step, including checking the parent state, is
218 * enclosed inside @pos locking, double-locking the parent isn't necessary
219 * while inheriting. The state update to the parent is guaranteed to be
220 * visible by walking order and, as long as inheriting operations to the
221 * same @pos are atomic to each other, multiple updates racing each other
222 * still result in the correct state. It's guaranateed that at least one
223 * inheritance happens for any css after the latest update to its parent.
224 *
225 * If checking parent's state requires locking the parent, each inheriting
226 * iteration should lock and unlock both @pos->parent and @pos.
227 *
228 * Alternatively, a subsystem may choose to use a single global lock to
229 * synchronize ->css_online() and ->css_offline() against tree-walking
230 * operations.
231 *
232 * It is allowed to temporarily drop RCU read lock during iteration. The
233 * caller is responsible for ensuring that @pos remains accessible until
234 * the start of the next iteration by, for example, bumping the css refcnt.
235 */
236#define css_for_each_descendant_pre(pos, css) \
237 for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \
238 (pos) = css_next_descendant_pre((pos), (css)))
239
240/**
241 * css_for_each_descendant_post - post-order walk of a css's descendants
242 * @pos: the css * to use as the loop cursor
243 * @css: css whose descendants to walk
244 *
245 * Similar to css_for_each_descendant_pre() but performs post-order
246 * traversal instead. @root is included in the iteration and the last
247 * node to be visited.
248 *
249 * If a subsystem synchronizes ->css_online() and the start of iteration, a
250 * css which finished ->css_online() is guaranteed to be visible in the
251 * future iterations and will stay visible until the last reference is put.
252 * A css which hasn't finished ->css_online() or already finished
253 * ->css_offline() may show up during traversal. It's each subsystem's
254 * responsibility to synchronize against on/offlining.
255 *
256 * Note that the walk visibility guarantee example described in pre-order
257 * walk doesn't apply the same to post-order walks.
258 */
259#define css_for_each_descendant_post(pos, css) \
260 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
261 (pos) = css_next_descendant_post((pos), (css)))
262
263/**
264 * cgroup_taskset_for_each - iterate cgroup_taskset
265 * @task: the loop cursor
266 * @dst_css: the destination css
267 * @tset: taskset to iterate
268 *
269 * @tset may contain multiple tasks and they may belong to multiple
270 * processes.
271 *
272 * On the v2 hierarchy, there may be tasks from multiple processes and they
273 * may not share the source or destination csses.
274 *
275 * On traditional hierarchies, when there are multiple tasks in @tset, if a
276 * task of a process is in @tset, all tasks of the process are in @tset.
277 * Also, all are guaranteed to share the same source and destination csses.
278 *
279 * Iteration is not in any specific order.
280 */
281#define cgroup_taskset_for_each(task, dst_css, tset) \
282 for ((task) = cgroup_taskset_first((tset), &(dst_css)); \
283 (task); \
284 (task) = cgroup_taskset_next((tset), &(dst_css)))
285
286/**
287 * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset
288 * @leader: the loop cursor
289 * @dst_css: the destination css
290 * @tset: taskset to iterate
291 *
292 * Iterate threadgroup leaders of @tset. For single-task migrations, @tset
293 * may not contain any.
294 */
295#define cgroup_taskset_for_each_leader(leader, dst_css, tset) \
296 for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \
297 (leader); \
298 (leader) = cgroup_taskset_next((tset), &(dst_css))) \
299 if ((leader) != (leader)->group_leader) \
300 ; \
301 else
302
303/*
304 * Inline functions.
305 */
306
307static inline u64 cgroup_id(struct cgroup *cgrp)
308{
309 return cgrp->kn->id;
310}
311
312/**
313 * css_get - obtain a reference on the specified css
314 * @css: target css
315 *
316 * The caller must already have a reference.
317 */
318static inline void css_get(struct cgroup_subsys_state *css)
319{
320 if (!(css->flags & CSS_NO_REF))
321 percpu_ref_get(&css->refcnt);
322}
323
324/**
325 * css_get_many - obtain references on the specified css
326 * @css: target css
327 * @n: number of references to get
328 *
329 * The caller must already have a reference.
330 */
331static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n)
332{
333 if (!(css->flags & CSS_NO_REF))
334 percpu_ref_get_many(&css->refcnt, n);
335}
336
337/**
338 * css_tryget - try to obtain a reference on the specified css
339 * @css: target css
340 *
341 * Obtain a reference on @css unless it already has reached zero and is
342 * being released. This function doesn't care whether @css is on or
343 * offline. The caller naturally needs to ensure that @css is accessible
344 * but doesn't have to be holding a reference on it - IOW, RCU protected
345 * access is good enough for this function. Returns %true if a reference
346 * count was successfully obtained; %false otherwise.
347 */
348static inline bool css_tryget(struct cgroup_subsys_state *css)
349{
350 if (!(css->flags & CSS_NO_REF))
351 return percpu_ref_tryget(&css->refcnt);
352 return true;
353}
354
355/**
356 * css_tryget_online - try to obtain a reference on the specified css if online
357 * @css: target css
358 *
359 * Obtain a reference on @css if it's online. The caller naturally needs
360 * to ensure that @css is accessible but doesn't have to be holding a
361 * reference on it - IOW, RCU protected access is good enough for this
362 * function. Returns %true if a reference count was successfully obtained;
363 * %false otherwise.
364 */
365static inline bool css_tryget_online(struct cgroup_subsys_state *css)
366{
367 if (!(css->flags & CSS_NO_REF))
368 return percpu_ref_tryget_live(&css->refcnt);
369 return true;
370}
371
372/**
373 * css_is_dying - test whether the specified css is dying
374 * @css: target css
375 *
376 * Test whether @css is in the process of offlining or already offline. In
377 * most cases, ->css_online() and ->css_offline() callbacks should be
378 * enough; however, the actual offline operations are RCU delayed and this
379 * test returns %true also when @css is scheduled to be offlined.
380 *
381 * This is useful, for example, when the use case requires synchronous
382 * behavior with respect to cgroup removal. cgroup removal schedules css
383 * offlining but the css can seem alive while the operation is being
384 * delayed. If the delay affects user visible semantics, this test can be
385 * used to resolve the situation.
386 */
387static inline bool css_is_dying(struct cgroup_subsys_state *css)
388{
389 return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
390}
391
392/**
393 * css_put - put a css reference
394 * @css: target css
395 *
396 * Put a reference obtained via css_get() and css_tryget_online().
397 */
398static inline void css_put(struct cgroup_subsys_state *css)
399{
400 if (!(css->flags & CSS_NO_REF))
401 percpu_ref_put(&css->refcnt);
402}
403
404/**
405 * css_put_many - put css references
406 * @css: target css
407 * @n: number of references to put
408 *
409 * Put references obtained via css_get() and css_tryget_online().
410 */
411static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n)
412{
413 if (!(css->flags & CSS_NO_REF))
414 percpu_ref_put_many(&css->refcnt, n);
415}
416
417static inline void cgroup_get(struct cgroup *cgrp)
418{
419 css_get(&cgrp->self);
420}
421
422static inline bool cgroup_tryget(struct cgroup *cgrp)
423{
424 return css_tryget(&cgrp->self);
425}
426
427static inline void cgroup_put(struct cgroup *cgrp)
428{
429 css_put(&cgrp->self);
430}
431
432/**
433 * task_css_set_check - obtain a task's css_set with extra access conditions
434 * @task: the task to obtain css_set for
435 * @__c: extra condition expression to be passed to rcu_dereference_check()
436 *
437 * A task's css_set is RCU protected, initialized and exited while holding
438 * task_lock(), and can only be modified while holding both cgroup_mutex
439 * and task_lock() while the task is alive. This macro verifies that the
440 * caller is inside proper critical section and returns @task's css_set.
441 *
442 * The caller can also specify additional allowed conditions via @__c, such
443 * as locks used during the cgroup_subsys::attach() methods.
444 */
445#ifdef CONFIG_PROVE_RCU
446extern struct mutex cgroup_mutex;
447extern spinlock_t css_set_lock;
448#define task_css_set_check(task, __c) \
449 rcu_dereference_check((task)->cgroups, \
450 lockdep_is_held(&cgroup_mutex) || \
451 lockdep_is_held(&css_set_lock) || \
452 ((task)->flags & PF_EXITING) || (__c))
453#else
454#define task_css_set_check(task, __c) \
455 rcu_dereference((task)->cgroups)
456#endif
457
458/**
459 * task_css_check - obtain css for (task, subsys) w/ extra access conds
460 * @task: the target task
461 * @subsys_id: the target subsystem ID
462 * @__c: extra condition expression to be passed to rcu_dereference_check()
463 *
464 * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The
465 * synchronization rules are the same as task_css_set_check().
466 */
467#define task_css_check(task, subsys_id, __c) \
468 task_css_set_check((task), (__c))->subsys[(subsys_id)]
469
470/**
471 * task_css_set - obtain a task's css_set
472 * @task: the task to obtain css_set for
473 *
474 * See task_css_set_check().
475 */
476static inline struct css_set *task_css_set(struct task_struct *task)
477{
478 return task_css_set_check(task, false);
479}
480
481/**
482 * task_css - obtain css for (task, subsys)
483 * @task: the target task
484 * @subsys_id: the target subsystem ID
485 *
486 * See task_css_check().
487 */
488static inline struct cgroup_subsys_state *task_css(struct task_struct *task,
489 int subsys_id)
490{
491 return task_css_check(task, subsys_id, false);
492}
493
494/**
495 * task_get_css - find and get the css for (task, subsys)
496 * @task: the target task
497 * @subsys_id: the target subsystem ID
498 *
499 * Find the css for the (@task, @subsys_id) combination, increment a
500 * reference on and return it. This function is guaranteed to return a
501 * valid css. The returned css may already have been offlined.
502 */
503static inline struct cgroup_subsys_state *
504task_get_css(struct task_struct *task, int subsys_id)
505{
506 struct cgroup_subsys_state *css;
507
508 rcu_read_lock();
509 while (true) {
510 css = task_css(task, subsys_id);
511 /*
512 * Can't use css_tryget_online() here. A task which has
513 * PF_EXITING set may stay associated with an offline css.
514 * If such task calls this function, css_tryget_online()
515 * will keep failing.
516 */
517 if (likely(css_tryget(css)))
518 break;
519 cpu_relax();
520 }
521 rcu_read_unlock();
522 return css;
523}
524
525/**
526 * task_css_is_root - test whether a task belongs to the root css
527 * @task: the target task
528 * @subsys_id: the target subsystem ID
529 *
530 * Test whether @task belongs to the root css on the specified subsystem.
531 * May be invoked in any context.
532 */
533static inline bool task_css_is_root(struct task_struct *task, int subsys_id)
534{
535 return task_css_check(task, subsys_id, true) ==
536 init_css_set.subsys[subsys_id];
537}
538
539static inline struct cgroup *task_cgroup(struct task_struct *task,
540 int subsys_id)
541{
542 return task_css(task, subsys_id)->cgroup;
543}
544
545static inline struct cgroup *task_dfl_cgroup(struct task_struct *task)
546{
547 return task_css_set(task)->dfl_cgrp;
548}
549
550static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
551{
552 struct cgroup_subsys_state *parent_css = cgrp->self.parent;
553
554 if (parent_css)
555 return container_of(parent_css, struct cgroup, self);
556 return NULL;
557}
558
559/**
560 * cgroup_is_descendant - test ancestry
561 * @cgrp: the cgroup to be tested
562 * @ancestor: possible ancestor of @cgrp
563 *
564 * Test whether @cgrp is a descendant of @ancestor. It also returns %true
565 * if @cgrp == @ancestor. This function is safe to call as long as @cgrp
566 * and @ancestor are accessible.
567 */
568static inline bool cgroup_is_descendant(struct cgroup *cgrp,
569 struct cgroup *ancestor)
570{
571 if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
572 return false;
573 return cgrp->ancestor_ids[ancestor->level] == cgroup_id(ancestor);
574}
575
576/**
577 * cgroup_ancestor - find ancestor of cgroup
578 * @cgrp: cgroup to find ancestor of
579 * @ancestor_level: level of ancestor to find starting from root
580 *
581 * Find ancestor of cgroup at specified level starting from root if it exists
582 * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at
583 * @ancestor_level.
584 *
585 * This function is safe to call as long as @cgrp is accessible.
586 */
587static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
588 int ancestor_level)
589{
590 if (cgrp->level < ancestor_level)
591 return NULL;
592 while (cgrp && cgrp->level > ancestor_level)
593 cgrp = cgroup_parent(cgrp);
594 return cgrp;
595}
596
597/**
598 * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
599 * @task: the task to be tested
600 * @ancestor: possible ancestor of @task's cgroup
601 *
602 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor.
603 * It follows all the same rules as cgroup_is_descendant, and only applies
604 * to the default hierarchy.
605 */
606static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
607 struct cgroup *ancestor)
608{
609 struct css_set *cset = task_css_set(task);
610
611 return cgroup_is_descendant(cset->dfl_cgrp, ancestor);
612}
613
614/* no synchronization, the result can only be used as a hint */
615static inline bool cgroup_is_populated(struct cgroup *cgrp)
616{
617 return cgrp->nr_populated_csets + cgrp->nr_populated_domain_children +
618 cgrp->nr_populated_threaded_children;
619}
620
621/* returns ino associated with a cgroup */
622static inline ino_t cgroup_ino(struct cgroup *cgrp)
623{
624 return kernfs_ino(cgrp->kn);
625}
626
627/* cft/css accessors for cftype->write() operation */
628static inline struct cftype *of_cft(struct kernfs_open_file *of)
629{
630 return of->kn->priv;
631}
632
633struct cgroup_subsys_state *of_css(struct kernfs_open_file *of);
634
635/* cft/css accessors for cftype->seq_*() operations */
636static inline struct cftype *seq_cft(struct seq_file *seq)
637{
638 return of_cft(seq->private);
639}
640
641static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq)
642{
643 return of_css(seq->private);
644}
645
646/*
647 * Name / path handling functions. All are thin wrappers around the kernfs
648 * counterparts and can be called under any context.
649 */
650
651static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
652{
653 return kernfs_name(cgrp->kn, buf, buflen);
654}
655
656static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen)
657{
658 return kernfs_path(cgrp->kn, buf, buflen);
659}
660
661static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
662{
663 pr_cont_kernfs_name(cgrp->kn);
664}
665
666static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
667{
668 pr_cont_kernfs_path(cgrp->kn);
669}
670
671static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
672{
673 return &cgrp->psi;
674}
675
676static inline void cgroup_init_kthreadd(void)
677{
678 /*
679 * kthreadd is inherited by all kthreads, keep it in the root so
680 * that the new kthreads are guaranteed to stay in the root until
681 * initialization is finished.
682 */
683 current->no_cgroup_migration = 1;
684}
685
686static inline void cgroup_kthread_ready(void)
687{
688 /*
689 * This kthread finished initialization. The creator should have
690 * set PF_NO_SETAFFINITY if this kthread should stay in the root.
691 */
692 current->no_cgroup_migration = 0;
693}
694
695void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
696#else /* !CONFIG_CGROUPS */
697
698struct cgroup_subsys_state;
699struct cgroup;
700
701static inline u64 cgroup_id(struct cgroup *cgrp) { return 1; }
702static inline void css_get(struct cgroup_subsys_state *css) {}
703static inline void css_put(struct cgroup_subsys_state *css) {}
704static inline int cgroup_attach_task_all(struct task_struct *from,
705 struct task_struct *t) { return 0; }
706static inline int cgroupstats_build(struct cgroupstats *stats,
707 struct dentry *dentry) { return -EINVAL; }
708
709static inline void cgroup_fork(struct task_struct *p) {}
710static inline int cgroup_can_fork(struct task_struct *p) { return 0; }
711static inline void cgroup_cancel_fork(struct task_struct *p) {}
712static inline void cgroup_post_fork(struct task_struct *p) {}
713static inline void cgroup_exit(struct task_struct *p) {}
714static inline void cgroup_release(struct task_struct *p) {}
715static inline void cgroup_free(struct task_struct *p) {}
716
717static inline int cgroup_init_early(void) { return 0; }
718static inline int cgroup_init(void) { return 0; }
719static inline void cgroup_init_kthreadd(void) {}
720static inline void cgroup_kthread_ready(void) {}
721
722static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
723{
724 return NULL;
725}
726
727static inline struct psi_group *cgroup_psi(struct cgroup *cgrp)
728{
729 return NULL;
730}
731
732static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
733 struct cgroup *ancestor)
734{
735 return true;
736}
737
738static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
739{}
740#endif /* !CONFIG_CGROUPS */
741
742#ifdef CONFIG_CGROUPS
743/*
744 * cgroup scalable recursive statistics.
745 */
746void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
747void cgroup_rstat_flush(struct cgroup *cgrp);
748void cgroup_rstat_flush_irqsafe(struct cgroup *cgrp);
749void cgroup_rstat_flush_hold(struct cgroup *cgrp);
750void cgroup_rstat_flush_release(void);
751
752/*
753 * Basic resource stats.
754 */
755#ifdef CONFIG_CGROUP_CPUACCT
756void cpuacct_charge(struct task_struct *tsk, u64 cputime);
757void cpuacct_account_field(struct task_struct *tsk, int index, u64 val);
758#else
759static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
760static inline void cpuacct_account_field(struct task_struct *tsk, int index,
761 u64 val) {}
762#endif
763
764void __cgroup_account_cputime(struct cgroup *cgrp, u64 delta_exec);
765void __cgroup_account_cputime_field(struct cgroup *cgrp,
766 enum cpu_usage_stat index, u64 delta_exec);
767
768static inline void cgroup_account_cputime(struct task_struct *task,
769 u64 delta_exec)
770{
771 struct cgroup *cgrp;
772
773 cpuacct_charge(task, delta_exec);
774
775 rcu_read_lock();
776 cgrp = task_dfl_cgroup(task);
777 if (cgroup_parent(cgrp))
778 __cgroup_account_cputime(cgrp, delta_exec);
779 rcu_read_unlock();
780}
781
782static inline void cgroup_account_cputime_field(struct task_struct *task,
783 enum cpu_usage_stat index,
784 u64 delta_exec)
785{
786 struct cgroup *cgrp;
787
788 cpuacct_account_field(task, index, delta_exec);
789
790 rcu_read_lock();
791 cgrp = task_dfl_cgroup(task);
792 if (cgroup_parent(cgrp))
793 __cgroup_account_cputime_field(cgrp, index, delta_exec);
794 rcu_read_unlock();
795}
796
797#else /* CONFIG_CGROUPS */
798
799static inline void cgroup_account_cputime(struct task_struct *task,
800 u64 delta_exec) {}
801static inline void cgroup_account_cputime_field(struct task_struct *task,
802 enum cpu_usage_stat index,
803 u64 delta_exec) {}
804
805#endif /* CONFIG_CGROUPS */
806
807/*
808 * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data
809 * definition in cgroup-defs.h.
810 */
811#ifdef CONFIG_SOCK_CGROUP_DATA
812
813#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
814extern spinlock_t cgroup_sk_update_lock;
815#endif
816
817void cgroup_sk_alloc_disable(void);
818void cgroup_sk_alloc(struct sock_cgroup_data *skcd);
819void cgroup_sk_free(struct sock_cgroup_data *skcd);
820
821static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd)
822{
823#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID)
824 unsigned long v;
825
826 /*
827 * @skcd->val is 64bit but the following is safe on 32bit too as we
828 * just need the lower ulong to be written and read atomically.
829 */
830 v = READ_ONCE(skcd->val);
831
832 if (v & 1)
833 return &cgrp_dfl_root.cgrp;
834
835 return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp;
836#else
837 return (struct cgroup *)(unsigned long)skcd->val;
838#endif
839}
840
841#else /* CONFIG_CGROUP_DATA */
842
843static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {}
844static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
845
846#endif /* CONFIG_CGROUP_DATA */
847
848struct cgroup_namespace {
849 refcount_t count;
850 struct ns_common ns;
851 struct user_namespace *user_ns;
852 struct ucounts *ucounts;
853 struct css_set *root_cset;
854};
855
856extern struct cgroup_namespace init_cgroup_ns;
857
858#ifdef CONFIG_CGROUPS
859
860void free_cgroup_ns(struct cgroup_namespace *ns);
861
862struct cgroup_namespace *copy_cgroup_ns(unsigned long flags,
863 struct user_namespace *user_ns,
864 struct cgroup_namespace *old_ns);
865
866int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen,
867 struct cgroup_namespace *ns);
868
869#else /* !CONFIG_CGROUPS */
870
871static inline void free_cgroup_ns(struct cgroup_namespace *ns) { }
872static inline struct cgroup_namespace *
873copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
874 struct cgroup_namespace *old_ns)
875{
876 return old_ns;
877}
878
879#endif /* !CONFIG_CGROUPS */
880
881static inline void get_cgroup_ns(struct cgroup_namespace *ns)
882{
883 if (ns)
884 refcount_inc(&ns->count);
885}
886
887static inline void put_cgroup_ns(struct cgroup_namespace *ns)
888{
889 if (ns && refcount_dec_and_test(&ns->count))
890 free_cgroup_ns(ns);
891}
892
893#ifdef CONFIG_CGROUPS
894
895void cgroup_enter_frozen(void);
896void cgroup_leave_frozen(bool always_leave);
897void cgroup_update_frozen(struct cgroup *cgrp);
898void cgroup_freeze(struct cgroup *cgrp, bool freeze);
899void cgroup_freezer_migrate_task(struct task_struct *task, struct cgroup *src,
900 struct cgroup *dst);
901
902static inline bool cgroup_task_freeze(struct task_struct *task)
903{
904 bool ret;
905
906 if (task->flags & PF_KTHREAD)
907 return false;
908
909 rcu_read_lock();
910 ret = test_bit(CGRP_FREEZE, &task_dfl_cgroup(task)->flags);
911 rcu_read_unlock();
912
913 return ret;
914}
915
916static inline bool cgroup_task_frozen(struct task_struct *task)
917{
918 return task->frozen;
919}
920
921#else /* !CONFIG_CGROUPS */
922
923static inline void cgroup_enter_frozen(void) { }
924static inline void cgroup_leave_frozen(bool always_leave) { }
925static inline bool cgroup_task_freeze(struct task_struct *task)
926{
927 return false;
928}
929static inline bool cgroup_task_frozen(struct task_struct *task)
930{
931 return false;
932}
933
934#endif /* !CONFIG_CGROUPS */
935
936#ifdef CONFIG_CGROUP_BPF
937static inline void cgroup_bpf_get(struct cgroup *cgrp)
938{
939 percpu_ref_get(&cgrp->bpf.refcnt);
940}
941
942static inline void cgroup_bpf_put(struct cgroup *cgrp)
943{
944 percpu_ref_put(&cgrp->bpf.refcnt);
945}
946
947#else /* CONFIG_CGROUP_BPF */
948
949static inline void cgroup_bpf_get(struct cgroup *cgrp) {}
950static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
951
952#endif /* CONFIG_CGROUP_BPF */
953
954#endif /* _LINUX_CGROUP_H */