at master 30 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * linux/cgroup-defs.h - basic definitions for cgroup 4 * 5 * This file provides basic type and interface. Include this file directly 6 * only if necessary to avoid cyclic dependencies. 7 */ 8#ifndef _LINUX_CGROUP_DEFS_H 9#define _LINUX_CGROUP_DEFS_H 10 11#include <linux/limits.h> 12#include <linux/list.h> 13#include <linux/idr.h> 14#include <linux/wait.h> 15#include <linux/mutex.h> 16#include <linux/rcupdate.h> 17#include <linux/refcount.h> 18#include <linux/percpu-refcount.h> 19#include <linux/percpu-rwsem.h> 20#include <linux/u64_stats_sync.h> 21#include <linux/workqueue.h> 22#include <linux/bpf-cgroup-defs.h> 23#include <linux/psi_types.h> 24 25#ifdef CONFIG_CGROUPS 26 27struct cgroup; 28struct cgroup_root; 29struct cgroup_subsys; 30struct cgroup_taskset; 31struct kernfs_node; 32struct kernfs_ops; 33struct kernfs_open_file; 34struct seq_file; 35struct poll_table_struct; 36 37#define MAX_CGROUP_TYPE_NAMELEN 32 38#define MAX_CGROUP_ROOT_NAMELEN 64 39#define MAX_CFTYPE_NAME 64 40 41/* define the enumeration of all cgroup subsystems */ 42#define SUBSYS(_x) _x ## _cgrp_id, 43enum cgroup_subsys_id { 44#include <linux/cgroup_subsys.h> 45 CGROUP_SUBSYS_COUNT, 46}; 47#undef SUBSYS 48 49/* bits in struct cgroup_subsys_state flags field */ 50enum { 51 CSS_NO_REF = (1 << 0), /* no reference counting for this css */ 52 CSS_ONLINE = (1 << 1), /* between ->css_online() and ->css_offline() */ 53 CSS_RELEASED = (1 << 2), /* refcnt reached zero, released */ 54 CSS_VISIBLE = (1 << 3), /* css is visible to userland */ 55 CSS_DYING = (1 << 4), /* css is dying */ 56}; 57 58/* bits in struct cgroup flags field */ 59enum { 60 /* Control Group requires release notifications to userspace */ 61 CGRP_NOTIFY_ON_RELEASE, 62 /* 63 * Clone the parent's configuration when creating a new child 64 * cpuset cgroup. For historical reasons, this option can be 65 * specified at mount time and thus is implemented here. 66 */ 67 CGRP_CPUSET_CLONE_CHILDREN, 68 69 /* Control group has to be frozen. */ 70 CGRP_FREEZE, 71 72 /* Cgroup is frozen. */ 73 CGRP_FROZEN, 74}; 75 76/* cgroup_root->flags */ 77enum { 78 CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ 79 CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ 80 81 /* 82 * Consider namespaces as delegation boundaries. If this flag is 83 * set, controller specific interface files in a namespace root 84 * aren't writeable from inside the namespace. 85 */ 86 CGRP_ROOT_NS_DELEGATE = (1 << 3), 87 88 /* 89 * Reduce latencies on dynamic cgroup modifications such as task 90 * migrations and controller on/offs by disabling percpu operation on 91 * cgroup_threadgroup_rwsem. This makes hot path operations such as 92 * forks and exits into the slow path and more expensive. 93 * 94 * Alleviate the contention between fork, exec, exit operations and 95 * writing to cgroup.procs by taking a per threadgroup rwsem instead of 96 * the global cgroup_threadgroup_rwsem. Fork and other operations 97 * from threads in different thread groups no longer contend with 98 * writing to cgroup.procs. 99 * 100 * The static usage pattern of creating a cgroup, enabling controllers, 101 * and then seeding it with CLONE_INTO_CGROUP doesn't require write 102 * locking cgroup_threadgroup_rwsem and thus doesn't benefit from 103 * favordynmod. 104 */ 105 CGRP_ROOT_FAVOR_DYNMODS = (1 << 4), 106 107 /* 108 * Enable cpuset controller in v1 cgroup to use v2 behavior. 109 */ 110 CGRP_ROOT_CPUSET_V2_MODE = (1 << 16), 111 112 /* 113 * Enable legacy local memory.events. 114 */ 115 CGRP_ROOT_MEMORY_LOCAL_EVENTS = (1 << 17), 116 117 /* 118 * Enable recursive subtree protection 119 */ 120 CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 18), 121 122 /* 123 * Enable hugetlb accounting for the memory controller. 124 */ 125 CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = (1 << 19), 126 127 /* 128 * Enable legacy local pids.events. 129 */ 130 CGRP_ROOT_PIDS_LOCAL_EVENTS = (1 << 20), 131}; 132 133/* cftype->flags */ 134enum { 135 CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */ 136 CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ 137 CFTYPE_NS_DELEGATABLE = (1 << 2), /* writeable beyond delegation boundaries */ 138 139 CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ 140 CFTYPE_WORLD_WRITABLE = (1 << 4), /* (DON'T USE FOR NEW FILES) S_IWUGO */ 141 CFTYPE_DEBUG = (1 << 5), /* create when cgroup_debug */ 142 143 /* internal flags, do not use outside cgroup core proper */ 144 __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ 145 __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */ 146 __CFTYPE_ADDED = (1 << 18), 147}; 148 149enum cgroup_attach_lock_mode { 150 /* Default */ 151 CGRP_ATTACH_LOCK_GLOBAL, 152 153 /* When pid=0 && threadgroup=false, see comments in cgroup_procs_write_start */ 154 CGRP_ATTACH_LOCK_NONE, 155 156 /* When favordynmods is on, see comments above CGRP_ROOT_FAVOR_DYNMODS */ 157 CGRP_ATTACH_LOCK_PER_THREADGROUP, 158}; 159 160/* 161 * cgroup_file is the handle for a file instance created in a cgroup which 162 * is used, for example, to generate file changed notifications. This can 163 * be obtained by setting cftype->file_offset. 164 */ 165struct cgroup_file { 166 /* do not access any fields from outside cgroup core */ 167 struct kernfs_node *kn; 168 unsigned long notified_at; 169 struct timer_list notify_timer; 170}; 171 172/* 173 * Per-subsystem/per-cgroup state maintained by the system. This is the 174 * fundamental structural building block that controllers deal with. 175 * 176 * Fields marked with "PI:" are public and immutable and may be accessed 177 * directly without synchronization. 178 */ 179struct cgroup_subsys_state { 180 /* PI: the cgroup that this css is attached to */ 181 struct cgroup *cgroup; 182 183 /* PI: the cgroup subsystem that this css is attached to */ 184 struct cgroup_subsys *ss; 185 186 /* reference count - access via css_[try]get() and css_put() */ 187 struct percpu_ref refcnt; 188 189 /* 190 * Depending on the context, this field is initialized 191 * via css_rstat_init() at different places: 192 * 193 * when css is associated with cgroup::self 194 * when css->cgroup is the root cgroup 195 * performed in cgroup_init() 196 * when css->cgroup is not the root cgroup 197 * performed in cgroup_create() 198 * when css is associated with a subsystem 199 * when css->cgroup is the root cgroup 200 * performed in cgroup_init_subsys() in the non-early path 201 * when css->cgroup is not the root cgroup 202 * performed in css_create() 203 */ 204 struct css_rstat_cpu __percpu *rstat_cpu; 205 206 /* 207 * siblings list anchored at the parent's ->children 208 * 209 * linkage is protected by cgroup_mutex or RCU 210 */ 211 struct list_head sibling; 212 struct list_head children; 213 214 /* 215 * PI: Subsys-unique ID. 0 is unused and root is always 1. The 216 * matching css can be looked up using css_from_id(). 217 */ 218 int id; 219 220 unsigned int flags; 221 222 /* 223 * Monotonically increasing unique serial number which defines a 224 * uniform order among all csses. It's guaranteed that all 225 * ->children lists are in the ascending order of ->serial_nr and 226 * used to allow interrupting and resuming iterations. 227 */ 228 u64 serial_nr; 229 230 /* 231 * Incremented by online self and children. Used to guarantee that 232 * parents are not offlined before their children. 233 */ 234 atomic_t online_cnt; 235 236 /* percpu_ref killing and RCU release */ 237 struct work_struct destroy_work; 238 struct rcu_work destroy_rwork; 239 240 /* 241 * PI: the parent css. Placed here for cache proximity to following 242 * fields of the containing structure. 243 */ 244 struct cgroup_subsys_state *parent; 245 246 /* 247 * Keep track of total numbers of visible descendant CSSes. 248 * The total number of dying CSSes is tracked in 249 * css->cgroup->nr_dying_subsys[ssid]. 250 * Protected by cgroup_mutex. 251 */ 252 int nr_descendants; 253 254 /* 255 * A singly-linked list of css structures to be rstat flushed. 256 * This is a scratch field to be used exclusively by 257 * css_rstat_flush(). 258 * 259 * Protected by rstat_base_lock when css is cgroup::self. 260 * Protected by css->ss->rstat_ss_lock otherwise. 261 */ 262 struct cgroup_subsys_state *rstat_flush_next; 263}; 264 265/* 266 * A css_set is a structure holding pointers to a set of 267 * cgroup_subsys_state objects. This saves space in the task struct 268 * object and speeds up fork()/exit(), since a single inc/dec and a 269 * list_add()/del() can bump the reference count on the entire cgroup 270 * set for a task. 271 */ 272struct css_set { 273 /* 274 * Set of subsystem states, one for each subsystem. This array is 275 * immutable after creation apart from the init_css_set during 276 * subsystem registration (at boot time). 277 */ 278 struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; 279 280 /* reference count */ 281 refcount_t refcount; 282 283 /* 284 * For a domain cgroup, the following points to self. If threaded, 285 * to the matching cset of the nearest domain ancestor. The 286 * dom_cset provides access to the domain cgroup and its csses to 287 * which domain level resource consumptions should be charged. 288 */ 289 struct css_set *dom_cset; 290 291 /* the default cgroup associated with this css_set */ 292 struct cgroup *dfl_cgrp; 293 294 /* internal task count, protected by css_set_lock */ 295 int nr_tasks; 296 297 /* 298 * Lists running through all tasks using this cgroup group. 299 * mg_tasks lists tasks which belong to this cset but are in the 300 * process of being migrated out or in. Protected by 301 * css_set_lock, but, during migration, once tasks are moved to 302 * mg_tasks, it can be read safely while holding cgroup_mutex. 303 */ 304 struct list_head tasks; 305 struct list_head mg_tasks; 306 struct list_head dying_tasks; 307 308 /* all css_task_iters currently walking this cset */ 309 struct list_head task_iters; 310 311 /* 312 * On the default hierarchy, ->subsys[ssid] may point to a css 313 * attached to an ancestor instead of the cgroup this css_set is 314 * associated with. The following node is anchored at 315 * ->subsys[ssid]->cgroup->e_csets[ssid] and provides a way to 316 * iterate through all css's attached to a given cgroup. 317 */ 318 struct list_head e_cset_node[CGROUP_SUBSYS_COUNT]; 319 320 /* all threaded csets whose ->dom_cset points to this cset */ 321 struct list_head threaded_csets; 322 struct list_head threaded_csets_node; 323 324 /* 325 * List running through all cgroup groups in the same hash 326 * slot. Protected by css_set_lock 327 */ 328 struct hlist_node hlist; 329 330 /* 331 * List of cgrp_cset_links pointing at cgroups referenced from this 332 * css_set. Protected by css_set_lock. 333 */ 334 struct list_head cgrp_links; 335 336 /* 337 * List of csets participating in the on-going migration either as 338 * source or destination. Protected by cgroup_mutex. 339 */ 340 struct list_head mg_src_preload_node; 341 struct list_head mg_dst_preload_node; 342 struct list_head mg_node; 343 344 /* 345 * If this cset is acting as the source of migration the following 346 * two fields are set. mg_src_cgrp and mg_dst_cgrp are 347 * respectively the source and destination cgroups of the on-going 348 * migration. mg_dst_cset is the destination cset the target tasks 349 * on this cset should be migrated to. Protected by cgroup_mutex. 350 */ 351 struct cgroup *mg_src_cgrp; 352 struct cgroup *mg_dst_cgrp; 353 struct css_set *mg_dst_cset; 354 355 /* dead and being drained, ignore for migration */ 356 bool dead; 357 358 /* For RCU-protected deletion */ 359 struct rcu_head rcu_head; 360}; 361 362struct cgroup_base_stat { 363 struct task_cputime cputime; 364 365#ifdef CONFIG_SCHED_CORE 366 u64 forceidle_sum; 367#endif 368 u64 ntime; 369}; 370 371/* 372 * rstat - cgroup scalable recursive statistics. Accounting is done 373 * per-cpu in css_rstat_cpu which is then lazily propagated up the 374 * hierarchy on reads. 375 * 376 * When a stat gets updated, the css_rstat_cpu and its ancestors are 377 * linked into the updated tree. On the following read, propagation only 378 * considers and consumes the updated tree. This makes reading O(the 379 * number of descendants which have been active since last read) instead of 380 * O(the total number of descendants). 381 * 382 * This is important because there can be a lot of (draining) cgroups which 383 * aren't active and stat may be read frequently. The combination can 384 * become very expensive. By propagating selectively, increasing reading 385 * frequency decreases the cost of each read. 386 * 387 * This struct hosts both the fields which implement the above - 388 * updated_children and updated_next. 389 */ 390struct css_rstat_cpu { 391 /* 392 * Child cgroups with stat updates on this cpu since the last read 393 * are linked on the parent's ->updated_children through 394 * ->updated_next. updated_children is terminated by its container css. 395 */ 396 struct cgroup_subsys_state *updated_children; 397 struct cgroup_subsys_state *updated_next; /* NULL if not on the list */ 398 399 struct llist_node lnode; /* lockless list for update */ 400 struct cgroup_subsys_state *owner; /* back pointer */ 401}; 402 403/* 404 * This struct hosts the fields which track basic resource statistics on 405 * top of it - bsync, bstat and last_bstat. 406 */ 407struct cgroup_rstat_base_cpu { 408 /* 409 * ->bsync protects ->bstat. These are the only fields which get 410 * updated in the hot path. 411 */ 412 struct u64_stats_sync bsync; 413 struct cgroup_base_stat bstat; 414 415 /* 416 * Snapshots at the last reading. These are used to calculate the 417 * deltas to propagate to the global counters. 418 */ 419 struct cgroup_base_stat last_bstat; 420 421 /* 422 * This field is used to record the cumulative per-cpu time of 423 * the cgroup and its descendants. Currently it can be read via 424 * eBPF/drgn etc, and we are still trying to determine how to 425 * expose it in the cgroupfs interface. 426 */ 427 struct cgroup_base_stat subtree_bstat; 428 429 /* 430 * Snapshots at the last reading. These are used to calculate the 431 * deltas to propagate to the per-cpu subtree_bstat. 432 */ 433 struct cgroup_base_stat last_subtree_bstat; 434}; 435 436struct cgroup_freezer_state { 437 /* Should the cgroup and its descendants be frozen. */ 438 bool freeze; 439 440 /* Should the cgroup actually be frozen? */ 441 bool e_freeze; 442 443 /* Fields below are protected by css_set_lock */ 444 445 /* Number of frozen descendant cgroups */ 446 int nr_frozen_descendants; 447 448 /* 449 * Number of tasks, which are counted as frozen: 450 * frozen, SIGSTOPped, and PTRACEd. 451 */ 452 int nr_frozen_tasks; 453 454 /* Freeze time data consistency protection */ 455 seqcount_spinlock_t freeze_seq; 456 457 /* 458 * Most recent time the cgroup was requested to freeze. 459 * Accesses guarded by freeze_seq counter. Writes serialized 460 * by css_set_lock. 461 */ 462 u64 freeze_start_nsec; 463 464 /* 465 * Total duration the cgroup has spent freezing. 466 * Accesses guarded by freeze_seq counter. Writes serialized 467 * by css_set_lock. 468 */ 469 u64 frozen_nsec; 470}; 471 472struct cgroup { 473 /* self css with NULL ->ss, points back to this cgroup */ 474 struct cgroup_subsys_state self; 475 476 unsigned long flags; /* "unsigned long" so bitops work */ 477 478 /* 479 * The depth this cgroup is at. The root is at depth zero and each 480 * step down the hierarchy increments the level. This along with 481 * ancestors[] can determine whether a given cgroup is a 482 * descendant of another without traversing the hierarchy. 483 */ 484 int level; 485 486 /* Maximum allowed descent tree depth */ 487 int max_depth; 488 489 /* 490 * Keep track of total numbers of visible and dying descent cgroups. 491 * Dying cgroups are cgroups which were deleted by a user, 492 * but are still existing because someone else is holding a reference. 493 * max_descendants is a maximum allowed number of descent cgroups. 494 * 495 * nr_descendants and nr_dying_descendants are protected 496 * by cgroup_mutex and css_set_lock. It's fine to read them holding 497 * any of cgroup_mutex and css_set_lock; for writing both locks 498 * should be held. 499 */ 500 int nr_descendants; 501 int nr_dying_descendants; 502 int max_descendants; 503 504 /* 505 * Each non-empty css_set associated with this cgroup contributes 506 * one to nr_populated_csets. The counter is zero iff this cgroup 507 * doesn't have any tasks. 508 * 509 * All children which have non-zero nr_populated_csets and/or 510 * nr_populated_children of their own contribute one to either 511 * nr_populated_domain_children or nr_populated_threaded_children 512 * depending on their type. Each counter is zero iff all cgroups 513 * of the type in the subtree proper don't have any tasks. 514 */ 515 int nr_populated_csets; 516 int nr_populated_domain_children; 517 int nr_populated_threaded_children; 518 519 int nr_threaded_children; /* # of live threaded child cgroups */ 520 521 /* sequence number for cgroup.kill, serialized by css_set_lock. */ 522 unsigned int kill_seq; 523 524 struct kernfs_node *kn; /* cgroup kernfs entry */ 525 struct cgroup_file procs_file; /* handle for "cgroup.procs" */ 526 struct cgroup_file events_file; /* handle for "cgroup.events" */ 527 528 /* handles for "{cpu,memory,io,irq}.pressure" */ 529 struct cgroup_file psi_files[NR_PSI_RESOURCES]; 530 531 /* 532 * The bitmask of subsystems enabled on the child cgroups. 533 * ->subtree_control is the one configured through 534 * "cgroup.subtree_control" while ->subtree_ss_mask is the effective 535 * one which may have more subsystems enabled. Controller knobs 536 * are made available iff it's enabled in ->subtree_control. 537 */ 538 u16 subtree_control; 539 u16 subtree_ss_mask; 540 u16 old_subtree_control; 541 u16 old_subtree_ss_mask; 542 543 /* Private pointers for each registered subsystem */ 544 struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT]; 545 546 /* 547 * Keep track of total number of dying CSSes at and below this cgroup. 548 * Protected by cgroup_mutex. 549 */ 550 int nr_dying_subsys[CGROUP_SUBSYS_COUNT]; 551 552 struct cgroup_root *root; 553 554 /* 555 * List of cgrp_cset_links pointing at css_sets with tasks in this 556 * cgroup. Protected by css_set_lock. 557 */ 558 struct list_head cset_links; 559 560 /* 561 * On the default hierarchy, a css_set for a cgroup with some 562 * susbsys disabled will point to css's which are associated with 563 * the closest ancestor which has the subsys enabled. The 564 * following lists all css_sets which point to this cgroup's css 565 * for the given subsystem. 566 */ 567 struct list_head e_csets[CGROUP_SUBSYS_COUNT]; 568 569 /* 570 * If !threaded, self. If threaded, it points to the nearest 571 * domain ancestor. Inside a threaded subtree, cgroups are exempt 572 * from process granularity and no-internal-task constraint. 573 * Domain level resource consumptions which aren't tied to a 574 * specific task are charged to the dom_cgrp. 575 */ 576 struct cgroup *dom_cgrp; 577 struct cgroup *old_dom_cgrp; /* used while enabling threaded */ 578 579 /* 580 * Depending on the context, this field is initialized via 581 * css_rstat_init() at different places: 582 * 583 * when cgroup is the root cgroup 584 * performed in cgroup_setup_root() 585 * otherwise 586 * performed in cgroup_create() 587 */ 588 struct cgroup_rstat_base_cpu __percpu *rstat_base_cpu; 589 590 /* 591 * Add padding to keep the read mostly rstat per-cpu pointer on a 592 * different cacheline than the following *bstat fields which can have 593 * frequent updates. 594 */ 595 CACHELINE_PADDING(_pad_); 596 597 /* cgroup basic resource statistics */ 598 struct cgroup_base_stat last_bstat; 599 struct cgroup_base_stat bstat; 600 struct prev_cputime prev_cputime; /* for printing out cputime */ 601 602 /* 603 * list of pidlists, up to two for each namespace (one for procs, one 604 * for tasks); created on demand. 605 */ 606 struct list_head pidlists; 607 struct mutex pidlist_mutex; 608 609 /* used to wait for offlining of csses */ 610 wait_queue_head_t offline_waitq; 611 612 /* used to schedule release agent */ 613 struct work_struct release_agent_work; 614 615 /* used to track pressure stalls */ 616 struct psi_group *psi; 617 618 /* used to store eBPF programs */ 619 struct cgroup_bpf bpf; 620 621 /* Used to store internal freezer state */ 622 struct cgroup_freezer_state freezer; 623 624#ifdef CONFIG_BPF_SYSCALL 625 struct bpf_local_storage __rcu *bpf_cgrp_storage; 626#endif 627 628 /* All ancestors including self */ 629 union { 630 DECLARE_FLEX_ARRAY(struct cgroup *, ancestors); 631 struct { 632 struct cgroup *_root_ancestor; 633 DECLARE_FLEX_ARRAY(struct cgroup *, _low_ancestors); 634 }; 635 }; 636}; 637 638/* 639 * A cgroup_root represents the root of a cgroup hierarchy, and may be 640 * associated with a kernfs_root to form an active hierarchy. This is 641 * internal to cgroup core. Don't access directly from controllers. 642 */ 643struct cgroup_root { 644 struct kernfs_root *kf_root; 645 646 /* The bitmask of subsystems attached to this hierarchy */ 647 unsigned int subsys_mask; 648 649 /* Unique id for this hierarchy. */ 650 int hierarchy_id; 651 652 /* A list running through the active hierarchies */ 653 struct list_head root_list; 654 struct rcu_head rcu; /* Must be near the top */ 655 656 /* Number of cgroups in the hierarchy, used only for /proc/cgroups */ 657 atomic_t nr_cgrps; 658 659 /* Hierarchy-specific flags */ 660 unsigned int flags; 661 662 /* The path to use for release notifications. */ 663 char release_agent_path[PATH_MAX]; 664 665 /* The name for this hierarchy - may be empty */ 666 char name[MAX_CGROUP_ROOT_NAMELEN]; 667 668 /* 669 * The root cgroup. The containing cgroup_root will be destroyed on its 670 * release. This must be embedded last due to flexible array at the end 671 * of struct cgroup. 672 */ 673 struct cgroup cgrp; 674}; 675 676/* 677 * struct cftype: handler definitions for cgroup control files 678 * 679 * When reading/writing to a file: 680 * - the cgroup to use is file->f_path.dentry->d_parent->d_fsdata 681 * - the 'cftype' of the file is file->f_path.dentry->d_fsdata 682 */ 683struct cftype { 684 /* 685 * Name of the subsystem is prepended in cgroup_file_name(). 686 * Zero length string indicates end of cftype array. 687 */ 688 char name[MAX_CFTYPE_NAME]; 689 unsigned long private; 690 691 /* 692 * The maximum length of string, excluding trailing nul, that can 693 * be passed to write. If < PAGE_SIZE-1, PAGE_SIZE-1 is assumed. 694 */ 695 size_t max_write_len; 696 697 /* CFTYPE_* flags */ 698 unsigned int flags; 699 700 /* 701 * If non-zero, should contain the offset from the start of css to 702 * a struct cgroup_file field. cgroup will record the handle of 703 * the created file into it. The recorded handle can be used as 704 * long as the containing css remains accessible. 705 */ 706 unsigned int file_offset; 707 708 /* 709 * Fields used for internal bookkeeping. Initialized automatically 710 * during registration. 711 */ 712 struct cgroup_subsys *ss; /* NULL for cgroup core files */ 713 struct list_head node; /* anchored at ss->cfts */ 714 struct kernfs_ops *kf_ops; 715 716 int (*open)(struct kernfs_open_file *of); 717 void (*release)(struct kernfs_open_file *of); 718 719 /* 720 * read_u64() is a shortcut for the common case of returning a 721 * single integer. Use it in place of read() 722 */ 723 u64 (*read_u64)(struct cgroup_subsys_state *css, struct cftype *cft); 724 /* 725 * read_s64() is a signed version of read_u64() 726 */ 727 s64 (*read_s64)(struct cgroup_subsys_state *css, struct cftype *cft); 728 729 /* generic seq_file read interface */ 730 int (*seq_show)(struct seq_file *sf, void *v); 731 732 /* optional ops, implement all or none */ 733 void *(*seq_start)(struct seq_file *sf, loff_t *ppos); 734 void *(*seq_next)(struct seq_file *sf, void *v, loff_t *ppos); 735 void (*seq_stop)(struct seq_file *sf, void *v); 736 737 /* 738 * write_u64() is a shortcut for the common case of accepting 739 * a single integer (as parsed by simple_strtoull) from 740 * userspace. Use in place of write(); return 0 or error. 741 */ 742 int (*write_u64)(struct cgroup_subsys_state *css, struct cftype *cft, 743 u64 val); 744 /* 745 * write_s64() is a signed version of write_u64() 746 */ 747 int (*write_s64)(struct cgroup_subsys_state *css, struct cftype *cft, 748 s64 val); 749 750 /* 751 * write() is the generic write callback which maps directly to 752 * kernfs write operation and overrides all other operations. 753 * Maximum write size is determined by ->max_write_len. Use 754 * of_css/cft() to access the associated css and cft. 755 */ 756 ssize_t (*write)(struct kernfs_open_file *of, 757 char *buf, size_t nbytes, loff_t off); 758 759 __poll_t (*poll)(struct kernfs_open_file *of, 760 struct poll_table_struct *pt); 761 762 struct lock_class_key lockdep_key; 763}; 764 765/* 766 * Control Group subsystem type. 767 * See Documentation/admin-guide/cgroup-v1/cgroups.rst for details 768 */ 769struct cgroup_subsys { 770 struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); 771 int (*css_online)(struct cgroup_subsys_state *css); 772 void (*css_offline)(struct cgroup_subsys_state *css); 773 void (*css_released)(struct cgroup_subsys_state *css); 774 void (*css_free)(struct cgroup_subsys_state *css); 775 void (*css_reset)(struct cgroup_subsys_state *css); 776 void (*css_killed)(struct cgroup_subsys_state *css); 777 void (*css_rstat_flush)(struct cgroup_subsys_state *css, int cpu); 778 int (*css_extra_stat_show)(struct seq_file *seq, 779 struct cgroup_subsys_state *css); 780 int (*css_local_stat_show)(struct seq_file *seq, 781 struct cgroup_subsys_state *css); 782 783 int (*can_attach)(struct cgroup_taskset *tset); 784 void (*cancel_attach)(struct cgroup_taskset *tset); 785 void (*attach)(struct cgroup_taskset *tset); 786 int (*can_fork)(struct task_struct *task, 787 struct css_set *cset); 788 void (*cancel_fork)(struct task_struct *task, struct css_set *cset); 789 void (*fork)(struct task_struct *task); 790 void (*exit)(struct task_struct *task); 791 void (*release)(struct task_struct *task); 792 void (*bind)(struct cgroup_subsys_state *root_css); 793 794 bool early_init:1; 795 796 /* 797 * If %true, the controller, on the default hierarchy, doesn't show 798 * up in "cgroup.controllers" or "cgroup.subtree_control", is 799 * implicitly enabled on all cgroups on the default hierarchy, and 800 * bypasses the "no internal process" constraint. This is for 801 * utility type controllers which is transparent to userland. 802 * 803 * An implicit controller can be stolen from the default hierarchy 804 * anytime and thus must be okay with offline csses from previous 805 * hierarchies coexisting with csses for the current one. 806 */ 807 bool implicit_on_dfl:1; 808 809 /* 810 * If %true, the controller, supports threaded mode on the default 811 * hierarchy. In a threaded subtree, both process granularity and 812 * no-internal-process constraint are ignored and a threaded 813 * controllers should be able to handle that. 814 * 815 * Note that as an implicit controller is automatically enabled on 816 * all cgroups on the default hierarchy, it should also be 817 * threaded. implicit && !threaded is not supported. 818 */ 819 bool threaded:1; 820 821 /* the following two fields are initialized automatically during boot */ 822 int id; 823 const char *name; 824 825 /* optional, initialized automatically during boot if not set */ 826 const char *legacy_name; 827 828 /* link to parent, protected by cgroup_lock() */ 829 struct cgroup_root *root; 830 831 /* idr for css->id */ 832 struct idr css_idr; 833 834 /* 835 * List of cftypes. Each entry is the first entry of an array 836 * terminated by zero length name. 837 */ 838 struct list_head cfts; 839 840 /* 841 * Base cftypes which are automatically registered. The two can 842 * point to the same array. 843 */ 844 struct cftype *dfl_cftypes; /* for the default hierarchy */ 845 struct cftype *legacy_cftypes; /* for the legacy hierarchies */ 846 847 /* 848 * A subsystem may depend on other subsystems. When such subsystem 849 * is enabled on a cgroup, the depended-upon subsystems are enabled 850 * together if available. Subsystems enabled due to dependency are 851 * not visible to userland until explicitly enabled. The following 852 * specifies the mask of subsystems that this one depends on. 853 */ 854 unsigned int depends_on; 855 856 spinlock_t rstat_ss_lock; 857 struct llist_head __percpu *lhead; /* lockless update list head */ 858}; 859 860extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; 861extern bool cgroup_enable_per_threadgroup_rwsem; 862 863struct cgroup_of_peak { 864 unsigned long value; 865 struct list_head list; 866}; 867 868/** 869 * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups 870 * @tsk: target task 871 * 872 * Allows cgroup operations to synchronize against threadgroup changes 873 * using a global percpu_rw_semaphore and a per threadgroup rw_semaphore when 874 * favordynmods is on. See the comment above CGRP_ROOT_FAVOR_DYNMODS definition. 875 */ 876static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) 877{ 878 percpu_down_read(&cgroup_threadgroup_rwsem); 879 if (cgroup_enable_per_threadgroup_rwsem) 880 down_read(&tsk->signal->cgroup_threadgroup_rwsem); 881} 882 883/** 884 * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups 885 * @tsk: target task 886 * 887 * Counterpart of cgroup_threadcgroup_change_begin(). 888 */ 889static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) 890{ 891 if (cgroup_enable_per_threadgroup_rwsem) 892 up_read(&tsk->signal->cgroup_threadgroup_rwsem); 893 percpu_up_read(&cgroup_threadgroup_rwsem); 894} 895 896#else /* CONFIG_CGROUPS */ 897 898#define CGROUP_SUBSYS_COUNT 0 899 900static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk) 901{ 902 might_sleep(); 903} 904 905static inline void cgroup_threadgroup_change_end(struct task_struct *tsk) {} 906 907#endif /* CONFIG_CGROUPS */ 908 909#ifdef CONFIG_SOCK_CGROUP_DATA 910 911/* 912 * sock_cgroup_data is embedded at sock->sk_cgrp_data and contains 913 * per-socket cgroup information except for memcg association. 914 * 915 * On legacy hierarchies, net_prio and net_cls controllers directly 916 * set attributes on each sock which can then be tested by the network 917 * layer. On the default hierarchy, each sock is associated with the 918 * cgroup it was created in and the networking layer can match the 919 * cgroup directly. 920 */ 921struct sock_cgroup_data { 922 struct cgroup *cgroup; /* v2 */ 923#ifdef CONFIG_CGROUP_NET_CLASSID 924 u32 classid; /* v1 */ 925#endif 926#ifdef CONFIG_CGROUP_NET_PRIO 927 u16 prioidx; /* v1 */ 928#endif 929}; 930 931static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd) 932{ 933#ifdef CONFIG_CGROUP_NET_PRIO 934 return READ_ONCE(skcd->prioidx); 935#else 936 return 1; 937#endif 938} 939 940#ifdef CONFIG_CGROUP_NET_CLASSID 941static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd) 942{ 943 return READ_ONCE(skcd->classid); 944} 945#endif 946 947static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd, 948 u16 prioidx) 949{ 950#ifdef CONFIG_CGROUP_NET_PRIO 951 WRITE_ONCE(skcd->prioidx, prioidx); 952#endif 953} 954 955#ifdef CONFIG_CGROUP_NET_CLASSID 956static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd, 957 u32 classid) 958{ 959 WRITE_ONCE(skcd->classid, classid); 960} 961#endif 962 963#else /* CONFIG_SOCK_CGROUP_DATA */ 964 965struct sock_cgroup_data { 966}; 967 968#endif /* CONFIG_SOCK_CGROUP_DATA */ 969 970#endif /* _LINUX_CGROUP_DEFS_H */