Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

locking, sched, cgroups: Annotate release_list_lock as raw

The release_list_lock can be taken in atomic context and therefore
cannot be preempted on -rt - annotate it.

In mainline this change documents the low level nature of
the lock - otherwise there's no functional difference. Lockdep
and Sparse checking will work as usual.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by

Thomas Gleixner and committed by
Ingo Molnar
cdcc136f f032a450

+9 -9
+9 -9
kernel/cgroup.c
··· 265 265 /* the list of cgroups eligible for automatic release. Protected by 266 266 * release_list_lock */ 267 267 static LIST_HEAD(release_list); 268 - static DEFINE_SPINLOCK(release_list_lock); 268 + static DEFINE_RAW_SPINLOCK(release_list_lock); 269 269 static void cgroup_release_agent(struct work_struct *work); 270 270 static DECLARE_WORK(release_agent_work, cgroup_release_agent); 271 271 static void check_for_release(struct cgroup *cgrp); ··· 4014 4014 finish_wait(&cgroup_rmdir_waitq, &wait); 4015 4015 clear_bit(CGRP_WAIT_ON_RMDIR, &cgrp->flags); 4016 4016 4017 - spin_lock(&release_list_lock); 4017 + raw_spin_lock(&release_list_lock); 4018 4018 set_bit(CGRP_REMOVED, &cgrp->flags); 4019 4019 if (!list_empty(&cgrp->release_list)) 4020 4020 list_del_init(&cgrp->release_list); 4021 - spin_unlock(&release_list_lock); 4021 + raw_spin_unlock(&release_list_lock); 4022 4022 4023 4023 cgroup_lock_hierarchy(cgrp->root); 4024 4024 /* delete this cgroup from parent->children */ ··· 4671 4671 * already queued for a userspace notification, queue 4672 4672 * it now */ 4673 4673 int need_schedule_work = 0; 4674 - spin_lock(&release_list_lock); 4674 + raw_spin_lock(&release_list_lock); 4675 4675 if (!cgroup_is_removed(cgrp) && 4676 4676 list_empty(&cgrp->release_list)) { 4677 4677 list_add(&cgrp->release_list, &release_list); 4678 4678 need_schedule_work = 1; 4679 4679 } 4680 - spin_unlock(&release_list_lock); 4680 + raw_spin_unlock(&release_list_lock); 4681 4681 if (need_schedule_work) 4682 4682 schedule_work(&release_agent_work); 4683 4683 } ··· 4729 4729 { 4730 4730 BUG_ON(work != &release_agent_work); 4731 4731 mutex_lock(&cgroup_mutex); 4732 - spin_lock(&release_list_lock); 4732 + raw_spin_lock(&release_list_lock); 4733 4733 while (!list_empty(&release_list)) { 4734 4734 char *argv[3], *envp[3]; 4735 4735 int i; ··· 4738 4738 struct cgroup, 4739 4739 release_list); 4740 4740 list_del_init(&cgrp->release_list); 4741 - spin_unlock(&release_list_lock); 4741 + raw_spin_unlock(&release_list_lock); 4742 4742 pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); 4743 4743 if (!pathbuf) 4744 4744 goto continue_free; ··· 4768 4768 continue_free: 4769 4769 kfree(pathbuf); 4770 4770 kfree(agentbuf); 4771 - spin_lock(&release_list_lock); 4771 + raw_spin_lock(&release_list_lock); 4772 4772 } 4773 - spin_unlock(&release_list_lock); 4773 + raw_spin_unlock(&release_list_lock); 4774 4774 mutex_unlock(&cgroup_mutex); 4775 4775 } 4776 4776