at v4.12 23 kB view raw
1#ifndef _LINUX_CGROUP_H 2#define _LINUX_CGROUP_H 3/* 4 * cgroup interface 5 * 6 * Copyright (C) 2003 BULL SA 7 * Copyright (C) 2004-2006 Silicon Graphics, Inc. 8 * 9 */ 10 11#include <linux/sched.h> 12#include <linux/cpumask.h> 13#include <linux/nodemask.h> 14#include <linux/rculist.h> 15#include <linux/cgroupstats.h> 16#include <linux/fs.h> 17#include <linux/seq_file.h> 18#include <linux/kernfs.h> 19#include <linux/jump_label.h> 20#include <linux/types.h> 21#include <linux/ns_common.h> 22#include <linux/nsproxy.h> 23#include <linux/user_namespace.h> 24#include <linux/refcount.h> 25 26#include <linux/cgroup-defs.h> 27 28#ifdef CONFIG_CGROUPS 29 30/* 31 * All weight knobs on the default hierarhcy should use the following min, 32 * default and max values. The default value is the logarithmic center of 33 * MIN and MAX and allows 100x to be expressed in both directions. 34 */ 35#define CGROUP_WEIGHT_MIN 1 36#define CGROUP_WEIGHT_DFL 100 37#define CGROUP_WEIGHT_MAX 10000 38 39/* a css_task_iter should be treated as an opaque object */ 40struct css_task_iter { 41 struct cgroup_subsys *ss; 42 43 struct list_head *cset_pos; 44 struct list_head *cset_head; 45 46 struct list_head *task_pos; 47 struct list_head *tasks_head; 48 struct list_head *mg_tasks_head; 49 50 struct css_set *cur_cset; 51 struct task_struct *cur_task; 52 struct list_head iters_node; /* css_set->task_iters */ 53}; 54 55extern struct cgroup_root cgrp_dfl_root; 56extern struct css_set init_css_set; 57 58#define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys; 59#include <linux/cgroup_subsys.h> 60#undef SUBSYS 61 62#define SUBSYS(_x) \ 63 extern struct static_key_true _x ## _cgrp_subsys_enabled_key; \ 64 extern struct static_key_true _x ## _cgrp_subsys_on_dfl_key; 65#include <linux/cgroup_subsys.h> 66#undef SUBSYS 67 68/** 69 * cgroup_subsys_enabled - fast test on whether a subsys is enabled 70 * @ss: subsystem in question 71 */ 72#define cgroup_subsys_enabled(ss) \ 73 static_branch_likely(&ss ## _enabled_key) 74 75/** 76 * cgroup_subsys_on_dfl - fast test on whether a subsys is on default hierarchy 77 * @ss: subsystem in question 78 */ 79#define cgroup_subsys_on_dfl(ss) \ 80 static_branch_likely(&ss ## _on_dfl_key) 81 82bool css_has_online_children(struct cgroup_subsys_state *css); 83struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss); 84struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup, 85 struct cgroup_subsys *ss); 86struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, 87 struct cgroup_subsys *ss); 88 89struct cgroup *cgroup_get_from_path(const char *path); 90struct cgroup *cgroup_get_from_fd(int fd); 91 92int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); 93int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); 94 95int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); 96int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); 97int cgroup_rm_cftypes(struct cftype *cfts); 98void cgroup_file_notify(struct cgroup_file *cfile); 99 100int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); 101int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); 102int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, 103 struct pid *pid, struct task_struct *tsk); 104 105void cgroup_fork(struct task_struct *p); 106extern int cgroup_can_fork(struct task_struct *p); 107extern void cgroup_cancel_fork(struct task_struct *p); 108extern void cgroup_post_fork(struct task_struct *p); 109void cgroup_exit(struct task_struct *p); 110void cgroup_free(struct task_struct *p); 111 112int cgroup_init_early(void); 113int cgroup_init(void); 114 115/* 116 * Iteration helpers and macros. 117 */ 118 119struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos, 120 struct cgroup_subsys_state *parent); 121struct cgroup_subsys_state *css_next_descendant_pre(struct cgroup_subsys_state *pos, 122 struct cgroup_subsys_state *css); 123struct cgroup_subsys_state *css_rightmost_descendant(struct cgroup_subsys_state *pos); 124struct cgroup_subsys_state *css_next_descendant_post(struct cgroup_subsys_state *pos, 125 struct cgroup_subsys_state *css); 126 127struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset, 128 struct cgroup_subsys_state **dst_cssp); 129struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset, 130 struct cgroup_subsys_state **dst_cssp); 131 132void css_task_iter_start(struct cgroup_subsys_state *css, 133 struct css_task_iter *it); 134struct task_struct *css_task_iter_next(struct css_task_iter *it); 135void css_task_iter_end(struct css_task_iter *it); 136 137/** 138 * css_for_each_child - iterate through children of a css 139 * @pos: the css * to use as the loop cursor 140 * @parent: css whose children to walk 141 * 142 * Walk @parent's children. Must be called under rcu_read_lock(). 143 * 144 * If a subsystem synchronizes ->css_online() and the start of iteration, a 145 * css which finished ->css_online() is guaranteed to be visible in the 146 * future iterations and will stay visible until the last reference is put. 147 * A css which hasn't finished ->css_online() or already finished 148 * ->css_offline() may show up during traversal. It's each subsystem's 149 * responsibility to synchronize against on/offlining. 150 * 151 * It is allowed to temporarily drop RCU read lock during iteration. The 152 * caller is responsible for ensuring that @pos remains accessible until 153 * the start of the next iteration by, for example, bumping the css refcnt. 154 */ 155#define css_for_each_child(pos, parent) \ 156 for ((pos) = css_next_child(NULL, (parent)); (pos); \ 157 (pos) = css_next_child((pos), (parent))) 158 159/** 160 * css_for_each_descendant_pre - pre-order walk of a css's descendants 161 * @pos: the css * to use as the loop cursor 162 * @root: css whose descendants to walk 163 * 164 * Walk @root's descendants. @root is included in the iteration and the 165 * first node to be visited. Must be called under rcu_read_lock(). 166 * 167 * If a subsystem synchronizes ->css_online() and the start of iteration, a 168 * css which finished ->css_online() is guaranteed to be visible in the 169 * future iterations and will stay visible until the last reference is put. 170 * A css which hasn't finished ->css_online() or already finished 171 * ->css_offline() may show up during traversal. It's each subsystem's 172 * responsibility to synchronize against on/offlining. 173 * 174 * For example, the following guarantees that a descendant can't escape 175 * state updates of its ancestors. 176 * 177 * my_online(@css) 178 * { 179 * Lock @css's parent and @css; 180 * Inherit state from the parent; 181 * Unlock both. 182 * } 183 * 184 * my_update_state(@css) 185 * { 186 * css_for_each_descendant_pre(@pos, @css) { 187 * Lock @pos; 188 * if (@pos == @css) 189 * Update @css's state; 190 * else 191 * Verify @pos is alive and inherit state from its parent; 192 * Unlock @pos; 193 * } 194 * } 195 * 196 * As long as the inheriting step, including checking the parent state, is 197 * enclosed inside @pos locking, double-locking the parent isn't necessary 198 * while inheriting. The state update to the parent is guaranteed to be 199 * visible by walking order and, as long as inheriting operations to the 200 * same @pos are atomic to each other, multiple updates racing each other 201 * still result in the correct state. It's guaranateed that at least one 202 * inheritance happens for any css after the latest update to its parent. 203 * 204 * If checking parent's state requires locking the parent, each inheriting 205 * iteration should lock and unlock both @pos->parent and @pos. 206 * 207 * Alternatively, a subsystem may choose to use a single global lock to 208 * synchronize ->css_online() and ->css_offline() against tree-walking 209 * operations. 210 * 211 * It is allowed to temporarily drop RCU read lock during iteration. The 212 * caller is responsible for ensuring that @pos remains accessible until 213 * the start of the next iteration by, for example, bumping the css refcnt. 214 */ 215#define css_for_each_descendant_pre(pos, css) \ 216 for ((pos) = css_next_descendant_pre(NULL, (css)); (pos); \ 217 (pos) = css_next_descendant_pre((pos), (css))) 218 219/** 220 * css_for_each_descendant_post - post-order walk of a css's descendants 221 * @pos: the css * to use as the loop cursor 222 * @css: css whose descendants to walk 223 * 224 * Similar to css_for_each_descendant_pre() but performs post-order 225 * traversal instead. @root is included in the iteration and the last 226 * node to be visited. 227 * 228 * If a subsystem synchronizes ->css_online() and the start of iteration, a 229 * css which finished ->css_online() is guaranteed to be visible in the 230 * future iterations and will stay visible until the last reference is put. 231 * A css which hasn't finished ->css_online() or already finished 232 * ->css_offline() may show up during traversal. It's each subsystem's 233 * responsibility to synchronize against on/offlining. 234 * 235 * Note that the walk visibility guarantee example described in pre-order 236 * walk doesn't apply the same to post-order walks. 237 */ 238#define css_for_each_descendant_post(pos, css) \ 239 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \ 240 (pos) = css_next_descendant_post((pos), (css))) 241 242/** 243 * cgroup_taskset_for_each - iterate cgroup_taskset 244 * @task: the loop cursor 245 * @dst_css: the destination css 246 * @tset: taskset to iterate 247 * 248 * @tset may contain multiple tasks and they may belong to multiple 249 * processes. 250 * 251 * On the v2 hierarchy, there may be tasks from multiple processes and they 252 * may not share the source or destination csses. 253 * 254 * On traditional hierarchies, when there are multiple tasks in @tset, if a 255 * task of a process is in @tset, all tasks of the process are in @tset. 256 * Also, all are guaranteed to share the same source and destination csses. 257 * 258 * Iteration is not in any specific order. 259 */ 260#define cgroup_taskset_for_each(task, dst_css, tset) \ 261 for ((task) = cgroup_taskset_first((tset), &(dst_css)); \ 262 (task); \ 263 (task) = cgroup_taskset_next((tset), &(dst_css))) 264 265/** 266 * cgroup_taskset_for_each_leader - iterate group leaders in a cgroup_taskset 267 * @leader: the loop cursor 268 * @dst_css: the destination css 269 * @tset: taskset to iterate 270 * 271 * Iterate threadgroup leaders of @tset. For single-task migrations, @tset 272 * may not contain any. 273 */ 274#define cgroup_taskset_for_each_leader(leader, dst_css, tset) \ 275 for ((leader) = cgroup_taskset_first((tset), &(dst_css)); \ 276 (leader); \ 277 (leader) = cgroup_taskset_next((tset), &(dst_css))) \ 278 if ((leader) != (leader)->group_leader) \ 279 ; \ 280 else 281 282/* 283 * Inline functions. 284 */ 285 286/** 287 * css_get - obtain a reference on the specified css 288 * @css: target css 289 * 290 * The caller must already have a reference. 291 */ 292static inline void css_get(struct cgroup_subsys_state *css) 293{ 294 if (!(css->flags & CSS_NO_REF)) 295 percpu_ref_get(&css->refcnt); 296} 297 298/** 299 * css_get_many - obtain references on the specified css 300 * @css: target css 301 * @n: number of references to get 302 * 303 * The caller must already have a reference. 304 */ 305static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n) 306{ 307 if (!(css->flags & CSS_NO_REF)) 308 percpu_ref_get_many(&css->refcnt, n); 309} 310 311/** 312 * css_tryget - try to obtain a reference on the specified css 313 * @css: target css 314 * 315 * Obtain a reference on @css unless it already has reached zero and is 316 * being released. This function doesn't care whether @css is on or 317 * offline. The caller naturally needs to ensure that @css is accessible 318 * but doesn't have to be holding a reference on it - IOW, RCU protected 319 * access is good enough for this function. Returns %true if a reference 320 * count was successfully obtained; %false otherwise. 321 */ 322static inline bool css_tryget(struct cgroup_subsys_state *css) 323{ 324 if (!(css->flags & CSS_NO_REF)) 325 return percpu_ref_tryget(&css->refcnt); 326 return true; 327} 328 329/** 330 * css_tryget_online - try to obtain a reference on the specified css if online 331 * @css: target css 332 * 333 * Obtain a reference on @css if it's online. The caller naturally needs 334 * to ensure that @css is accessible but doesn't have to be holding a 335 * reference on it - IOW, RCU protected access is good enough for this 336 * function. Returns %true if a reference count was successfully obtained; 337 * %false otherwise. 338 */ 339static inline bool css_tryget_online(struct cgroup_subsys_state *css) 340{ 341 if (!(css->flags & CSS_NO_REF)) 342 return percpu_ref_tryget_live(&css->refcnt); 343 return true; 344} 345 346/** 347 * css_is_dying - test whether the specified css is dying 348 * @css: target css 349 * 350 * Test whether @css is in the process of offlining or already offline. In 351 * most cases, ->css_online() and ->css_offline() callbacks should be 352 * enough; however, the actual offline operations are RCU delayed and this 353 * test returns %true also when @css is scheduled to be offlined. 354 * 355 * This is useful, for example, when the use case requires synchronous 356 * behavior with respect to cgroup removal. cgroup removal schedules css 357 * offlining but the css can seem alive while the operation is being 358 * delayed. If the delay affects user visible semantics, this test can be 359 * used to resolve the situation. 360 */ 361static inline bool css_is_dying(struct cgroup_subsys_state *css) 362{ 363 return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt); 364} 365 366/** 367 * css_put - put a css reference 368 * @css: target css 369 * 370 * Put a reference obtained via css_get() and css_tryget_online(). 371 */ 372static inline void css_put(struct cgroup_subsys_state *css) 373{ 374 if (!(css->flags & CSS_NO_REF)) 375 percpu_ref_put(&css->refcnt); 376} 377 378/** 379 * css_put_many - put css references 380 * @css: target css 381 * @n: number of references to put 382 * 383 * Put references obtained via css_get() and css_tryget_online(). 384 */ 385static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n) 386{ 387 if (!(css->flags & CSS_NO_REF)) 388 percpu_ref_put_many(&css->refcnt, n); 389} 390 391static inline void cgroup_put(struct cgroup *cgrp) 392{ 393 css_put(&cgrp->self); 394} 395 396/** 397 * task_css_set_check - obtain a task's css_set with extra access conditions 398 * @task: the task to obtain css_set for 399 * @__c: extra condition expression to be passed to rcu_dereference_check() 400 * 401 * A task's css_set is RCU protected, initialized and exited while holding 402 * task_lock(), and can only be modified while holding both cgroup_mutex 403 * and task_lock() while the task is alive. This macro verifies that the 404 * caller is inside proper critical section and returns @task's css_set. 405 * 406 * The caller can also specify additional allowed conditions via @__c, such 407 * as locks used during the cgroup_subsys::attach() methods. 408 */ 409#ifdef CONFIG_PROVE_RCU 410extern struct mutex cgroup_mutex; 411extern spinlock_t css_set_lock; 412#define task_css_set_check(task, __c) \ 413 rcu_dereference_check((task)->cgroups, \ 414 lockdep_is_held(&cgroup_mutex) || \ 415 lockdep_is_held(&css_set_lock) || \ 416 ((task)->flags & PF_EXITING) || (__c)) 417#else 418#define task_css_set_check(task, __c) \ 419 rcu_dereference((task)->cgroups) 420#endif 421 422/** 423 * task_css_check - obtain css for (task, subsys) w/ extra access conds 424 * @task: the target task 425 * @subsys_id: the target subsystem ID 426 * @__c: extra condition expression to be passed to rcu_dereference_check() 427 * 428 * Return the cgroup_subsys_state for the (@task, @subsys_id) pair. The 429 * synchronization rules are the same as task_css_set_check(). 430 */ 431#define task_css_check(task, subsys_id, __c) \ 432 task_css_set_check((task), (__c))->subsys[(subsys_id)] 433 434/** 435 * task_css_set - obtain a task's css_set 436 * @task: the task to obtain css_set for 437 * 438 * See task_css_set_check(). 439 */ 440static inline struct css_set *task_css_set(struct task_struct *task) 441{ 442 return task_css_set_check(task, false); 443} 444 445/** 446 * task_css - obtain css for (task, subsys) 447 * @task: the target task 448 * @subsys_id: the target subsystem ID 449 * 450 * See task_css_check(). 451 */ 452static inline struct cgroup_subsys_state *task_css(struct task_struct *task, 453 int subsys_id) 454{ 455 return task_css_check(task, subsys_id, false); 456} 457 458/** 459 * task_get_css - find and get the css for (task, subsys) 460 * @task: the target task 461 * @subsys_id: the target subsystem ID 462 * 463 * Find the css for the (@task, @subsys_id) combination, increment a 464 * reference on and return it. This function is guaranteed to return a 465 * valid css. 466 */ 467static inline struct cgroup_subsys_state * 468task_get_css(struct task_struct *task, int subsys_id) 469{ 470 struct cgroup_subsys_state *css; 471 472 rcu_read_lock(); 473 while (true) { 474 css = task_css(task, subsys_id); 475 if (likely(css_tryget_online(css))) 476 break; 477 cpu_relax(); 478 } 479 rcu_read_unlock(); 480 return css; 481} 482 483/** 484 * task_css_is_root - test whether a task belongs to the root css 485 * @task: the target task 486 * @subsys_id: the target subsystem ID 487 * 488 * Test whether @task belongs to the root css on the specified subsystem. 489 * May be invoked in any context. 490 */ 491static inline bool task_css_is_root(struct task_struct *task, int subsys_id) 492{ 493 return task_css_check(task, subsys_id, true) == 494 init_css_set.subsys[subsys_id]; 495} 496 497static inline struct cgroup *task_cgroup(struct task_struct *task, 498 int subsys_id) 499{ 500 return task_css(task, subsys_id)->cgroup; 501} 502 503/** 504 * cgroup_is_descendant - test ancestry 505 * @cgrp: the cgroup to be tested 506 * @ancestor: possible ancestor of @cgrp 507 * 508 * Test whether @cgrp is a descendant of @ancestor. It also returns %true 509 * if @cgrp == @ancestor. This function is safe to call as long as @cgrp 510 * and @ancestor are accessible. 511 */ 512static inline bool cgroup_is_descendant(struct cgroup *cgrp, 513 struct cgroup *ancestor) 514{ 515 if (cgrp->root != ancestor->root || cgrp->level < ancestor->level) 516 return false; 517 return cgrp->ancestor_ids[ancestor->level] == ancestor->id; 518} 519 520/** 521 * task_under_cgroup_hierarchy - test task's membership of cgroup ancestry 522 * @task: the task to be tested 523 * @ancestor: possible ancestor of @task's cgroup 524 * 525 * Tests whether @task's default cgroup hierarchy is a descendant of @ancestor. 526 * It follows all the same rules as cgroup_is_descendant, and only applies 527 * to the default hierarchy. 528 */ 529static inline bool task_under_cgroup_hierarchy(struct task_struct *task, 530 struct cgroup *ancestor) 531{ 532 struct css_set *cset = task_css_set(task); 533 534 return cgroup_is_descendant(cset->dfl_cgrp, ancestor); 535} 536 537/* no synchronization, the result can only be used as a hint */ 538static inline bool cgroup_is_populated(struct cgroup *cgrp) 539{ 540 return cgrp->populated_cnt; 541} 542 543/* returns ino associated with a cgroup */ 544static inline ino_t cgroup_ino(struct cgroup *cgrp) 545{ 546 return cgrp->kn->ino; 547} 548 549/* cft/css accessors for cftype->write() operation */ 550static inline struct cftype *of_cft(struct kernfs_open_file *of) 551{ 552 return of->kn->priv; 553} 554 555struct cgroup_subsys_state *of_css(struct kernfs_open_file *of); 556 557/* cft/css accessors for cftype->seq_*() operations */ 558static inline struct cftype *seq_cft(struct seq_file *seq) 559{ 560 return of_cft(seq->private); 561} 562 563static inline struct cgroup_subsys_state *seq_css(struct seq_file *seq) 564{ 565 return of_css(seq->private); 566} 567 568/* 569 * Name / path handling functions. All are thin wrappers around the kernfs 570 * counterparts and can be called under any context. 571 */ 572 573static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen) 574{ 575 return kernfs_name(cgrp->kn, buf, buflen); 576} 577 578static inline int cgroup_path(struct cgroup *cgrp, char *buf, size_t buflen) 579{ 580 return kernfs_path(cgrp->kn, buf, buflen); 581} 582 583static inline void pr_cont_cgroup_name(struct cgroup *cgrp) 584{ 585 pr_cont_kernfs_name(cgrp->kn); 586} 587 588static inline void pr_cont_cgroup_path(struct cgroup *cgrp) 589{ 590 pr_cont_kernfs_path(cgrp->kn); 591} 592 593static inline void cgroup_init_kthreadd(void) 594{ 595 /* 596 * kthreadd is inherited by all kthreads, keep it in the root so 597 * that the new kthreads are guaranteed to stay in the root until 598 * initialization is finished. 599 */ 600 current->no_cgroup_migration = 1; 601} 602 603static inline void cgroup_kthread_ready(void) 604{ 605 /* 606 * This kthread finished initialization. The creator should have 607 * set PF_NO_SETAFFINITY if this kthread should stay in the root. 608 */ 609 current->no_cgroup_migration = 0; 610} 611 612#else /* !CONFIG_CGROUPS */ 613 614struct cgroup_subsys_state; 615struct cgroup; 616 617static inline void css_put(struct cgroup_subsys_state *css) {} 618static inline int cgroup_attach_task_all(struct task_struct *from, 619 struct task_struct *t) { return 0; } 620static inline int cgroupstats_build(struct cgroupstats *stats, 621 struct dentry *dentry) { return -EINVAL; } 622 623static inline void cgroup_fork(struct task_struct *p) {} 624static inline int cgroup_can_fork(struct task_struct *p) { return 0; } 625static inline void cgroup_cancel_fork(struct task_struct *p) {} 626static inline void cgroup_post_fork(struct task_struct *p) {} 627static inline void cgroup_exit(struct task_struct *p) {} 628static inline void cgroup_free(struct task_struct *p) {} 629 630static inline int cgroup_init_early(void) { return 0; } 631static inline int cgroup_init(void) { return 0; } 632static inline void cgroup_init_kthreadd(void) {} 633static inline void cgroup_kthread_ready(void) {} 634 635static inline bool task_under_cgroup_hierarchy(struct task_struct *task, 636 struct cgroup *ancestor) 637{ 638 return true; 639} 640#endif /* !CONFIG_CGROUPS */ 641 642/* 643 * sock->sk_cgrp_data handling. For more info, see sock_cgroup_data 644 * definition in cgroup-defs.h. 645 */ 646#ifdef CONFIG_SOCK_CGROUP_DATA 647 648#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) 649extern spinlock_t cgroup_sk_update_lock; 650#endif 651 652void cgroup_sk_alloc_disable(void); 653void cgroup_sk_alloc(struct sock_cgroup_data *skcd); 654void cgroup_sk_free(struct sock_cgroup_data *skcd); 655 656static inline struct cgroup *sock_cgroup_ptr(struct sock_cgroup_data *skcd) 657{ 658#if defined(CONFIG_CGROUP_NET_PRIO) || defined(CONFIG_CGROUP_NET_CLASSID) 659 unsigned long v; 660 661 /* 662 * @skcd->val is 64bit but the following is safe on 32bit too as we 663 * just need the lower ulong to be written and read atomically. 664 */ 665 v = READ_ONCE(skcd->val); 666 667 if (v & 1) 668 return &cgrp_dfl_root.cgrp; 669 670 return (struct cgroup *)(unsigned long)v ?: &cgrp_dfl_root.cgrp; 671#else 672 return (struct cgroup *)(unsigned long)skcd->val; 673#endif 674} 675 676#else /* CONFIG_CGROUP_DATA */ 677 678static inline void cgroup_sk_alloc(struct sock_cgroup_data *skcd) {} 679static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {} 680 681#endif /* CONFIG_CGROUP_DATA */ 682 683struct cgroup_namespace { 684 refcount_t count; 685 struct ns_common ns; 686 struct user_namespace *user_ns; 687 struct ucounts *ucounts; 688 struct css_set *root_cset; 689}; 690 691extern struct cgroup_namespace init_cgroup_ns; 692 693#ifdef CONFIG_CGROUPS 694 695void free_cgroup_ns(struct cgroup_namespace *ns); 696 697struct cgroup_namespace *copy_cgroup_ns(unsigned long flags, 698 struct user_namespace *user_ns, 699 struct cgroup_namespace *old_ns); 700 701int cgroup_path_ns(struct cgroup *cgrp, char *buf, size_t buflen, 702 struct cgroup_namespace *ns); 703 704#else /* !CONFIG_CGROUPS */ 705 706static inline void free_cgroup_ns(struct cgroup_namespace *ns) { } 707static inline struct cgroup_namespace * 708copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns, 709 struct cgroup_namespace *old_ns) 710{ 711 return old_ns; 712} 713 714#endif /* !CONFIG_CGROUPS */ 715 716static inline void get_cgroup_ns(struct cgroup_namespace *ns) 717{ 718 if (ns) 719 refcount_inc(&ns->count); 720} 721 722static inline void put_cgroup_ns(struct cgroup_namespace *ns) 723{ 724 if (ns && refcount_dec_and_test(&ns->count)) 725 free_cgroup_ns(ns); 726} 727 728#endif /* _LINUX_CGROUP_H */