Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup

Pull cgroup updates from Tejun Heo:
"Nothing too interesting. Just a handful of cleanup patches"

* 'for-3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup:
Revert "cgroup: remove redundant variable in cgroup_mount()"
cgroup: remove redundant variable in cgroup_mount()
cgroup: fix missing unlock in cgroup_release_agent()
cgroup: remove CGRP_RELEASABLE flag
perf/cgroup: Remove perf_put_cgroup()
cgroup: remove redundant check in cgroup_ino()
cpuset: simplify proc_cpuset_show()
cgroup: simplify proc_cgroup_show()
cgroup: use a per-cgroup work for release agent
cgroup: remove bogus comments
cgroup: remove redundant code in cgroup_rmdir()
cgroup: remove some useless forward declarations
cgroup: fix a typo in comment.

+68 -202
+4 -35
fs/proc/base.c
··· 376 376 377 377 #endif 378 378 379 - #ifdef CONFIG_CGROUPS 380 - static int cgroup_open(struct inode *inode, struct file *file) 381 - { 382 - struct pid *pid = PROC_I(inode)->pid; 383 - return single_open(file, proc_cgroup_show, pid); 384 - } 385 - 386 - static const struct file_operations proc_cgroup_operations = { 387 - .open = cgroup_open, 388 - .read = seq_read, 389 - .llseek = seq_lseek, 390 - .release = single_release, 391 - }; 392 - #endif 393 - 394 - #ifdef CONFIG_PROC_PID_CPUSET 395 - 396 - static int cpuset_open(struct inode *inode, struct file *file) 397 - { 398 - struct pid *pid = PROC_I(inode)->pid; 399 - return single_open(file, proc_cpuset_show, pid); 400 - } 401 - 402 - static const struct file_operations proc_cpuset_operations = { 403 - .open = cpuset_open, 404 - .read = seq_read, 405 - .llseek = seq_lseek, 406 - .release = single_release, 407 - }; 408 - #endif 409 - 410 379 static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns, 411 380 struct pid *pid, struct task_struct *task) 412 381 { ··· 2548 2579 REG("latency", S_IRUGO, proc_lstats_operations), 2549 2580 #endif 2550 2581 #ifdef CONFIG_PROC_PID_CPUSET 2551 - REG("cpuset", S_IRUGO, proc_cpuset_operations), 2582 + ONE("cpuset", S_IRUGO, proc_cpuset_show), 2552 2583 #endif 2553 2584 #ifdef CONFIG_CGROUPS 2554 - REG("cgroup", S_IRUGO, proc_cgroup_operations), 2585 + ONE("cgroup", S_IRUGO, proc_cgroup_show), 2555 2586 #endif 2556 2587 ONE("oom_score", S_IRUGO, proc_oom_score), 2557 2588 REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations), ··· 2894 2925 REG("latency", S_IRUGO, proc_lstats_operations), 2895 2926 #endif 2896 2927 #ifdef CONFIG_PROC_PID_CPUSET 2897 - REG("cpuset", S_IRUGO, proc_cpuset_operations), 2928 + ONE("cpuset", S_IRUGO, proc_cpuset_show), 2898 2929 #endif 2899 2930 #ifdef CONFIG_CGROUPS 2900 - REG("cgroup", S_IRUGO, proc_cgroup_operations), 2931 + ONE("cgroup", S_IRUGO, proc_cgroup_show), 2901 2932 #endif 2902 2933 ONE("oom_score", S_IRUGO, proc_oom_score), 2903 2934 REG("oom_adj", S_IRUGO|S_IWUSR, proc_oom_adj_operations),
+7 -19
include/linux/cgroup.h
··· 27 27 28 28 struct cgroup_root; 29 29 struct cgroup_subsys; 30 - struct inode; 31 30 struct cgroup; 32 31 33 32 extern int cgroup_init_early(void); ··· 37 38 extern int cgroupstats_build(struct cgroupstats *stats, 38 39 struct dentry *dentry); 39 40 40 - extern int proc_cgroup_show(struct seq_file *, void *); 41 + extern int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, 42 + struct pid *pid, struct task_struct *tsk); 41 43 42 44 /* define the enumeration of all cgroup subsystems */ 43 45 #define SUBSYS(_x) _x ## _cgrp_id, ··· 161 161 162 162 /* bits in struct cgroup flags field */ 163 163 enum { 164 - /* 165 - * Control Group has previously had a child cgroup or a task, 166 - * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) 167 - */ 168 - CGRP_RELEASABLE, 169 164 /* Control Group requires release notifications to userspace */ 170 165 CGRP_NOTIFY_ON_RELEASE, 171 166 /* ··· 230 235 struct list_head e_csets[CGROUP_SUBSYS_COUNT]; 231 236 232 237 /* 233 - * Linked list running through all cgroups that can 234 - * potentially be reaped by the release agent. Protected by 235 - * release_list_lock 236 - */ 237 - struct list_head release_list; 238 - 239 - /* 240 238 * list of pidlists, up to two for each namespace (one for procs, one 241 239 * for tasks); created on demand. 242 240 */ ··· 238 250 239 251 /* used to wait for offlining of csses */ 240 252 wait_queue_head_t offline_waitq; 253 + 254 + /* used to schedule release agent */ 255 + struct work_struct release_agent_work; 241 256 }; 242 257 243 258 #define MAX_CGROUP_ROOT_NAMELEN 64 ··· 527 536 return !list_empty(&cgrp->cset_links); 528 537 } 529 538 530 - /* returns ino associated with a cgroup, 0 indicates unmounted root */ 539 + /* returns ino associated with a cgroup */ 531 540 static inline ino_t cgroup_ino(struct cgroup *cgrp) 532 541 { 533 - if (cgrp->kn) 534 - return cgrp->kn->ino; 535 - else 536 - return 0; 542 + return cgrp->kn->ino; 537 543 } 538 544 539 545 /* cft/css accessors for cftype->write() operation */
+2 -1
include/linux/cpuset.h
··· 86 86 87 87 extern void cpuset_task_status_allowed(struct seq_file *m, 88 88 struct task_struct *task); 89 - extern int proc_cpuset_show(struct seq_file *, void *); 89 + extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, 90 + struct pid *pid, struct task_struct *tsk); 90 91 91 92 extern int cpuset_mem_spread_node(void); 92 93 extern int cpuset_slab_spread_node(void);
+50 -128
kernel/cgroup.c
··· 185 185 static struct cftype cgroup_dfl_base_files[]; 186 186 static struct cftype cgroup_legacy_base_files[]; 187 187 188 - static void cgroup_put(struct cgroup *cgrp); 189 188 static int rebind_subsystems(struct cgroup_root *dst_root, 190 189 unsigned int ss_mask); 191 190 static int cgroup_destroy_locked(struct cgroup *cgrp); ··· 194 195 static void kill_css(struct cgroup_subsys_state *css); 195 196 static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[], 196 197 bool is_add); 197 - static void cgroup_pidlist_destroy_all(struct cgroup *cgrp); 198 198 199 199 /* IDR wrappers which synchronize using cgroup_idr_lock */ 200 200 static int cgroup_idr_alloc(struct idr *idr, void *ptr, int start, int end, ··· 329 331 return false; 330 332 } 331 333 332 - static int cgroup_is_releasable(const struct cgroup *cgrp) 333 - { 334 - const int bits = 335 - (1 << CGRP_RELEASABLE) | 336 - (1 << CGRP_NOTIFY_ON_RELEASE); 337 - return (cgrp->flags & bits) == bits; 338 - } 339 - 340 334 static int notify_on_release(const struct cgroup *cgrp) 341 335 { 342 336 return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); ··· 384 394 ; \ 385 395 else 386 396 387 - /* the list of cgroups eligible for automatic release. Protected by 388 - * release_list_lock */ 389 - static LIST_HEAD(release_list); 390 - static DEFINE_RAW_SPINLOCK(release_list_lock); 391 397 static void cgroup_release_agent(struct work_struct *work); 392 - static DECLARE_WORK(release_agent_work, cgroup_release_agent); 393 398 static void check_for_release(struct cgroup *cgrp); 394 399 395 400 /* ··· 483 498 return key; 484 499 } 485 500 486 - static void put_css_set_locked(struct css_set *cset, bool taskexit) 501 + static void put_css_set_locked(struct css_set *cset) 487 502 { 488 503 struct cgrp_cset_link *link, *tmp_link; 489 504 struct cgroup_subsys *ss; ··· 509 524 /* @cgrp can't go away while we're holding css_set_rwsem */ 510 525 if (list_empty(&cgrp->cset_links)) { 511 526 cgroup_update_populated(cgrp, false); 512 - if (notify_on_release(cgrp)) { 513 - if (taskexit) 514 - set_bit(CGRP_RELEASABLE, &cgrp->flags); 515 - check_for_release(cgrp); 516 - } 527 + check_for_release(cgrp); 517 528 } 518 529 519 530 kfree(link); ··· 518 537 kfree_rcu(cset, rcu_head); 519 538 } 520 539 521 - static void put_css_set(struct css_set *cset, bool taskexit) 540 + static void put_css_set(struct css_set *cset) 522 541 { 523 542 /* 524 543 * Ensure that the refcount doesn't hit zero while any readers ··· 529 548 return; 530 549 531 550 down_write(&css_set_rwsem); 532 - put_css_set_locked(cset, taskexit); 551 + put_css_set_locked(cset); 533 552 up_write(&css_set_rwsem); 534 553 } 535 554 ··· 949 968 * a task holds cgroup_mutex on a cgroup with zero count, it 950 969 * knows that the cgroup won't be removed, as cgroup_rmdir() 951 970 * needs that mutex. 952 - * 953 - * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't 954 - * (usually) take cgroup_mutex. These are the two most performance 955 - * critical pieces of code here. The exception occurs on cgroup_exit(), 956 - * when a task in a notify_on_release cgroup exits. Then cgroup_mutex 957 - * is taken, and if the cgroup count is zero, a usermode call made 958 - * to the release agent with the name of the cgroup (path relative to 959 - * the root of cgroup file system) as the argument. 960 971 * 961 972 * A cgroup can only be deleted if both its 'count' of using tasks 962 973 * is zero, and its list of 'children' cgroups is empty. Since all ··· 1560 1587 INIT_LIST_HEAD(&cgrp->self.sibling); 1561 1588 INIT_LIST_HEAD(&cgrp->self.children); 1562 1589 INIT_LIST_HEAD(&cgrp->cset_links); 1563 - INIT_LIST_HEAD(&cgrp->release_list); 1564 1590 INIT_LIST_HEAD(&cgrp->pidlists); 1565 1591 mutex_init(&cgrp->pidlist_mutex); 1566 1592 cgrp->self.cgroup = cgrp; ··· 1569 1597 INIT_LIST_HEAD(&cgrp->e_csets[ssid]); 1570 1598 1571 1599 init_waitqueue_head(&cgrp->offline_waitq); 1600 + INIT_WORK(&cgrp->release_agent_work, cgroup_release_agent); 1572 1601 } 1573 1602 1574 1603 static void init_cgroup_root(struct cgroup_root *root, ··· 2025 2052 * task. As trading it for new_cset is protected by cgroup_mutex, 2026 2053 * we're safe to drop it here; it will be freed under RCU. 2027 2054 */ 2028 - set_bit(CGRP_RELEASABLE, &old_cgrp->flags); 2029 - put_css_set_locked(old_cset, false); 2055 + put_css_set_locked(old_cset); 2030 2056 } 2031 2057 2032 2058 /** ··· 2046 2074 cset->mg_src_cgrp = NULL; 2047 2075 cset->mg_dst_cset = NULL; 2048 2076 list_del_init(&cset->mg_preload_node); 2049 - put_css_set_locked(cset, false); 2077 + put_css_set_locked(cset); 2050 2078 } 2051 2079 up_write(&css_set_rwsem); 2052 2080 } ··· 2140 2168 if (src_cset == dst_cset) { 2141 2169 src_cset->mg_src_cgrp = NULL; 2142 2170 list_del_init(&src_cset->mg_preload_node); 2143 - put_css_set(src_cset, false); 2144 - put_css_set(dst_cset, false); 2171 + put_css_set(src_cset); 2172 + put_css_set(dst_cset); 2145 2173 continue; 2146 2174 } 2147 2175 ··· 2150 2178 if (list_empty(&dst_cset->mg_preload_node)) 2151 2179 list_add(&dst_cset->mg_preload_node, &csets); 2152 2180 else 2153 - put_css_set(dst_cset, false); 2181 + put_css_set(dst_cset); 2154 2182 } 2155 2183 2156 2184 list_splice_tail(&csets, preloaded_csets); ··· 4145 4173 static int cgroup_write_notify_on_release(struct cgroup_subsys_state *css, 4146 4174 struct cftype *cft, u64 val) 4147 4175 { 4148 - clear_bit(CGRP_RELEASABLE, &css->cgroup->flags); 4149 4176 if (val) 4150 4177 set_bit(CGRP_NOTIFY_ON_RELEASE, &css->cgroup->flags); 4151 4178 else ··· 4322 4351 /* cgroup free path */ 4323 4352 atomic_dec(&cgrp->root->nr_cgrps); 4324 4353 cgroup_pidlist_destroy_all(cgrp); 4354 + cancel_work_sync(&cgrp->release_agent_work); 4325 4355 4326 4356 if (cgroup_parent(cgrp)) { 4327 4357 /* ··· 4785 4813 for_each_css(css, ssid, cgrp) 4786 4814 kill_css(css); 4787 4815 4788 - /* CSS_ONLINE is clear, remove from ->release_list for the last time */ 4789 - raw_spin_lock(&release_list_lock); 4790 - if (!list_empty(&cgrp->release_list)) 4791 - list_del_init(&cgrp->release_list); 4792 - raw_spin_unlock(&release_list_lock); 4793 - 4794 4816 /* 4795 4817 * Remove @cgrp directory along with the base files. @cgrp has an 4796 4818 * extra ref on its kn. 4797 4819 */ 4798 4820 kernfs_remove(cgrp->kn); 4799 4821 4800 - set_bit(CGRP_RELEASABLE, &cgroup_parent(cgrp)->flags); 4801 4822 check_for_release(cgroup_parent(cgrp)); 4802 4823 4803 4824 /* put the base reference */ ··· 4807 4842 cgrp = cgroup_kn_lock_live(kn); 4808 4843 if (!cgrp) 4809 4844 return 0; 4810 - cgroup_get(cgrp); /* for @kn->priv clearing */ 4811 4845 4812 4846 ret = cgroup_destroy_locked(cgrp); 4813 4847 4814 4848 cgroup_kn_unlock(kn); 4815 - 4816 - cgroup_put(cgrp); 4817 4849 return ret; 4818 4850 } 4819 4851 ··· 5014 5052 * - Print task's cgroup paths into seq_file, one line for each hierarchy 5015 5053 * - Used for /proc/<pid>/cgroup. 5016 5054 */ 5017 - 5018 - /* TODO: Use a proper seq_file iterator */ 5019 - int proc_cgroup_show(struct seq_file *m, void *v) 5055 + int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, 5056 + struct pid *pid, struct task_struct *tsk) 5020 5057 { 5021 - struct pid *pid; 5022 - struct task_struct *tsk; 5023 5058 char *buf, *path; 5024 5059 int retval; 5025 5060 struct cgroup_root *root; ··· 5025 5066 buf = kmalloc(PATH_MAX, GFP_KERNEL); 5026 5067 if (!buf) 5027 5068 goto out; 5028 - 5029 - retval = -ESRCH; 5030 - pid = m->private; 5031 - tsk = get_pid_task(pid, PIDTYPE_PID); 5032 - if (!tsk) 5033 - goto out_free; 5034 - 5035 - retval = 0; 5036 5069 5037 5070 mutex_lock(&cgroup_mutex); 5038 5071 down_read(&css_set_rwsem); ··· 5055 5104 seq_putc(m, '\n'); 5056 5105 } 5057 5106 5107 + retval = 0; 5058 5108 out_unlock: 5059 5109 up_read(&css_set_rwsem); 5060 5110 mutex_unlock(&cgroup_mutex); 5061 - put_task_struct(tsk); 5062 - out_free: 5063 5111 kfree(buf); 5064 5112 out: 5065 5113 return retval; ··· 5129 5179 int i; 5130 5180 5131 5181 /* 5132 - * This may race against cgroup_enable_task_cg_links(). As that 5182 + * This may race against cgroup_enable_task_cg_lists(). As that 5133 5183 * function sets use_task_css_set_links before grabbing 5134 5184 * tasklist_lock and we just went through tasklist_lock to add 5135 5185 * @child, it's guaranteed that either we see the set ··· 5144 5194 * when implementing operations which need to migrate all tasks of 5145 5195 * a cgroup to another. 5146 5196 * 5147 - * Note that if we lose to cgroup_enable_task_cg_links(), @child 5197 + * Note that if we lose to cgroup_enable_task_cg_lists(), @child 5148 5198 * will remain in init_css_set. This is safe because all tasks are 5149 5199 * in the init_css_set before cg_links is enabled and there's no 5150 5200 * operation which transfers all tasks out of init_css_set. ··· 5228 5278 } 5229 5279 5230 5280 if (put_cset) 5231 - put_css_set(cset, true); 5281 + put_css_set(cset); 5232 5282 } 5233 5283 5234 5284 static void check_for_release(struct cgroup *cgrp) 5235 5285 { 5236 - if (cgroup_is_releasable(cgrp) && list_empty(&cgrp->cset_links) && 5237 - !css_has_online_children(&cgrp->self)) { 5238 - /* 5239 - * Control Group is currently removeable. If it's not 5240 - * already queued for a userspace notification, queue 5241 - * it now 5242 - */ 5243 - int need_schedule_work = 0; 5244 - 5245 - raw_spin_lock(&release_list_lock); 5246 - if (!cgroup_is_dead(cgrp) && 5247 - list_empty(&cgrp->release_list)) { 5248 - list_add(&cgrp->release_list, &release_list); 5249 - need_schedule_work = 1; 5250 - } 5251 - raw_spin_unlock(&release_list_lock); 5252 - if (need_schedule_work) 5253 - schedule_work(&release_agent_work); 5254 - } 5286 + if (notify_on_release(cgrp) && !cgroup_has_tasks(cgrp) && 5287 + !css_has_online_children(&cgrp->self) && !cgroup_is_dead(cgrp)) 5288 + schedule_work(&cgrp->release_agent_work); 5255 5289 } 5256 5290 5257 5291 /* ··· 5263 5329 */ 5264 5330 static void cgroup_release_agent(struct work_struct *work) 5265 5331 { 5266 - BUG_ON(work != &release_agent_work); 5332 + struct cgroup *cgrp = 5333 + container_of(work, struct cgroup, release_agent_work); 5334 + char *pathbuf = NULL, *agentbuf = NULL, *path; 5335 + char *argv[3], *envp[3]; 5336 + 5267 5337 mutex_lock(&cgroup_mutex); 5268 - raw_spin_lock(&release_list_lock); 5269 - while (!list_empty(&release_list)) { 5270 - char *argv[3], *envp[3]; 5271 - int i; 5272 - char *pathbuf = NULL, *agentbuf = NULL, *path; 5273 - struct cgroup *cgrp = list_entry(release_list.next, 5274 - struct cgroup, 5275 - release_list); 5276 - list_del_init(&cgrp->release_list); 5277 - raw_spin_unlock(&release_list_lock); 5278 - pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); 5279 - if (!pathbuf) 5280 - goto continue_free; 5281 - path = cgroup_path(cgrp, pathbuf, PATH_MAX); 5282 - if (!path) 5283 - goto continue_free; 5284 - agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL); 5285 - if (!agentbuf) 5286 - goto continue_free; 5287 5338 5288 - i = 0; 5289 - argv[i++] = agentbuf; 5290 - argv[i++] = path; 5291 - argv[i] = NULL; 5339 + pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); 5340 + agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL); 5341 + if (!pathbuf || !agentbuf) 5342 + goto out; 5292 5343 5293 - i = 0; 5294 - /* minimal command environment */ 5295 - envp[i++] = "HOME=/"; 5296 - envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; 5297 - envp[i] = NULL; 5344 + path = cgroup_path(cgrp, pathbuf, PATH_MAX); 5345 + if (!path) 5346 + goto out; 5298 5347 5299 - /* Drop the lock while we invoke the usermode helper, 5300 - * since the exec could involve hitting disk and hence 5301 - * be a slow process */ 5302 - mutex_unlock(&cgroup_mutex); 5303 - call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); 5304 - mutex_lock(&cgroup_mutex); 5305 - continue_free: 5306 - kfree(pathbuf); 5307 - kfree(agentbuf); 5308 - raw_spin_lock(&release_list_lock); 5309 - } 5310 - raw_spin_unlock(&release_list_lock); 5348 + argv[0] = agentbuf; 5349 + argv[1] = path; 5350 + argv[2] = NULL; 5351 + 5352 + /* minimal command environment */ 5353 + envp[0] = "HOME=/"; 5354 + envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; 5355 + envp[2] = NULL; 5356 + 5311 5357 mutex_unlock(&cgroup_mutex); 5358 + call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); 5359 + goto out_free; 5360 + out: 5361 + mutex_unlock(&cgroup_mutex); 5362 + out_free: 5363 + kfree(agentbuf); 5364 + kfree(pathbuf); 5312 5365 } 5313 5366 5314 5367 static int __init cgroup_disable(char *str) ··· 5483 5562 5484 5563 static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft) 5485 5564 { 5486 - return test_bit(CGRP_RELEASABLE, &css->cgroup->flags); 5565 + return (!cgroup_has_tasks(css->cgroup) && 5566 + !css_has_online_children(&css->cgroup->self)); 5487 5567 } 5488 5568 5489 5569 static struct cftype debug_files[] = {
+3 -12
kernel/cpuset.c
··· 2730 2730 * and we take cpuset_mutex, keeping cpuset_attach() from changing it 2731 2731 * anyway. 2732 2732 */ 2733 - int proc_cpuset_show(struct seq_file *m, void *unused_v) 2733 + int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, 2734 + struct pid *pid, struct task_struct *tsk) 2734 2735 { 2735 - struct pid *pid; 2736 - struct task_struct *tsk; 2737 2736 char *buf, *p; 2738 2737 struct cgroup_subsys_state *css; 2739 2738 int retval; ··· 2742 2743 if (!buf) 2743 2744 goto out; 2744 2745 2745 - retval = -ESRCH; 2746 - pid = m->private; 2747 - tsk = get_pid_task(pid, PIDTYPE_PID); 2748 - if (!tsk) 2749 - goto out_free; 2750 - 2751 2746 retval = -ENAMETOOLONG; 2752 2747 rcu_read_lock(); 2753 2748 css = task_css(tsk, cpuset_cgrp_id); 2754 2749 p = cgroup_path(css->cgroup, buf, PATH_MAX); 2755 2750 rcu_read_unlock(); 2756 2751 if (!p) 2757 - goto out_put_task; 2752 + goto out_free; 2758 2753 seq_puts(m, p); 2759 2754 seq_putc(m, '\n'); 2760 2755 retval = 0; 2761 - out_put_task: 2762 - put_task_struct(tsk); 2763 2756 out_free: 2764 2757 kfree(buf); 2765 2758 out:
+1 -6
kernel/events/core.c
··· 392 392 event->cgrp->css.cgroup); 393 393 } 394 394 395 - static inline void perf_put_cgroup(struct perf_event *event) 396 - { 397 - css_put(&event->cgrp->css); 398 - } 399 - 400 395 static inline void perf_detach_cgroup(struct perf_event *event) 401 396 { 402 - perf_put_cgroup(event); 397 + css_put(&event->cgrp->css); 403 398 event->cgrp = NULL; 404 399 } 405 400
+1 -1
mm/memory-failure.c
··· 148 148 ino = cgroup_ino(css->cgroup); 149 149 css_put(css); 150 150 151 - if (!ino || ino != hwpoison_filter_memcg) 151 + if (ino != hwpoison_filter_memcg) 152 152 return -EINVAL; 153 153 154 154 return 0;