Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sched/swait: Rename to exclusive

Since swait basically implemented exclusive waits only, make sure
the API reflects that.

$ git grep -l -e "\<swake_up\>"
-e "\<swait_event[^ (]*"
-e "\<prepare_to_swait\>" | while read file;
do
sed -i -e 's/\<swake_up\>/&_one/g'
-e 's/\<swait_event[^ (]*/&_exclusive/g'
-e 's/\<prepare_to_swait\>/&_exclusive/g' $file;
done

With a few manual touch-ups.

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: bigeasy@linutronix.de
Cc: oleg@redhat.com
Cc: paulmck@linux.vnet.ibm.com
Cc: pbonzini@redhat.com
Link: https://lkml.kernel.org/r/20180612083909.261946548@infradead.org

authored by

Peter Zijlstra and committed by
Thomas Gleixner
b3dae109 0abf17bc

+48 -48
+2 -2
arch/mips/kvm/mips.c
··· 515 515 dvcpu->arch.wait = 0; 516 516 517 517 if (swq_has_sleeper(&dvcpu->wq)) 518 - swake_up(&dvcpu->wq); 518 + swake_up_one(&dvcpu->wq); 519 519 520 520 return 0; 521 521 } ··· 1204 1204 1205 1205 vcpu->arch.wait = 0; 1206 1206 if (swq_has_sleeper(&vcpu->wq)) 1207 - swake_up(&vcpu->wq); 1207 + swake_up_one(&vcpu->wq); 1208 1208 } 1209 1209 1210 1210 /* low level hrtimer wake routine */
+3 -3
arch/powerpc/kvm/book3s_hv.c
··· 216 216 217 217 wqp = kvm_arch_vcpu_wq(vcpu); 218 218 if (swq_has_sleeper(wqp)) { 219 - swake_up(wqp); 219 + swake_up_one(wqp); 220 220 ++vcpu->stat.halt_wakeup; 221 221 } 222 222 ··· 3188 3188 } 3189 3189 } 3190 3190 3191 - prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE); 3191 + prepare_to_swait_exclusive(&vc->wq, &wait, TASK_INTERRUPTIBLE); 3192 3192 3193 3193 if (kvmppc_vcore_check_block(vc)) { 3194 3194 finish_swait(&vc->wq, &wait); ··· 3311 3311 kvmppc_start_thread(vcpu, vc); 3312 3312 trace_kvm_guest_enter(vcpu); 3313 3313 } else if (vc->vcore_state == VCORE_SLEEPING) { 3314 - swake_up(&vc->wq); 3314 + swake_up_one(&vc->wq); 3315 3315 } 3316 3316 3317 3317 }
+1 -1
arch/s390/kvm/interrupt.c
··· 1145 1145 * yield-candidate. 1146 1146 */ 1147 1147 vcpu->preempted = true; 1148 - swake_up(&vcpu->wq); 1148 + swake_up_one(&vcpu->wq); 1149 1149 vcpu->stat.halt_wakeup++; 1150 1150 } 1151 1151 /*
+2 -2
arch/x86/kernel/kvm.c
··· 154 154 155 155 for (;;) { 156 156 if (!n.halted) 157 - prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE); 157 + prepare_to_swait_exclusive(&n.wq, &wait, TASK_UNINTERRUPTIBLE); 158 158 if (hlist_unhashed(&n.link)) 159 159 break; 160 160 ··· 188 188 if (n->halted) 189 189 smp_send_reschedule(n->cpu); 190 190 else if (swq_has_sleeper(&n->wq)) 191 - swake_up(&n->wq); 191 + swake_up_one(&n->wq); 192 192 } 193 193 194 194 static void apf_task_wake_all(void)
+1 -1
arch/x86/kvm/lapic.c
··· 1379 1379 * using swait_active() is safe. 1380 1380 */ 1381 1381 if (swait_active(q)) 1382 - swake_up(q); 1382 + swake_up_one(q); 1383 1383 1384 1384 if (apic_lvtt_tscdeadline(apic)) 1385 1385 ktimer->expired_tscdeadline = ktimer->tscdeadline;
+12 -12
include/linux/swait.h
··· 16 16 * wait-queues, but the semantics are actually completely different, and 17 17 * every single user we have ever had has been buggy (or pointless). 18 18 * 19 - * A "swake_up()" only wakes up _one_ waiter, which is not at all what 19 + * A "swake_up_one()" only wakes up _one_ waiter, which is not at all what 20 20 * "wake_up()" does, and has led to problems. In other cases, it has 21 21 * been fine, because there's only ever one waiter (kvm), but in that 22 22 * case gthe whole "simple" wait-queue is just pointless to begin with, ··· 115 115 * CPU0 - waker CPU1 - waiter 116 116 * 117 117 * for (;;) { 118 - * @cond = true; prepare_to_swait(&wq_head, &wait, state); 118 + * @cond = true; prepare_to_swait_exclusive(&wq_head, &wait, state); 119 119 * smp_mb(); // smp_mb() from set_current_state() 120 120 * if (swait_active(wq_head)) if (@cond) 121 121 * wake_up(wq_head); break; ··· 157 157 return swait_active(wq); 158 158 } 159 159 160 - extern void swake_up(struct swait_queue_head *q); 160 + extern void swake_up_one(struct swait_queue_head *q); 161 161 extern void swake_up_all(struct swait_queue_head *q); 162 162 extern void swake_up_locked(struct swait_queue_head *q); 163 163 164 - extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state); 164 + extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state); 165 165 extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); 166 166 167 167 extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait); ··· 196 196 (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ 197 197 schedule()) 198 198 199 - #define swait_event(wq, condition) \ 199 + #define swait_event_exclusive(wq, condition) \ 200 200 do { \ 201 201 if (condition) \ 202 202 break; \ ··· 208 208 TASK_UNINTERRUPTIBLE, timeout, \ 209 209 __ret = schedule_timeout(__ret)) 210 210 211 - #define swait_event_timeout(wq, condition, timeout) \ 211 + #define swait_event_timeout_exclusive(wq, condition, timeout) \ 212 212 ({ \ 213 213 long __ret = timeout; \ 214 214 if (!___wait_cond_timeout(condition)) \ ··· 220 220 ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \ 221 221 schedule()) 222 222 223 - #define swait_event_interruptible(wq, condition) \ 223 + #define swait_event_interruptible_exclusive(wq, condition) \ 224 224 ({ \ 225 225 int __ret = 0; \ 226 226 if (!(condition)) \ ··· 233 233 TASK_INTERRUPTIBLE, timeout, \ 234 234 __ret = schedule_timeout(__ret)) 235 235 236 - #define swait_event_interruptible_timeout(wq, condition, timeout) \ 236 + #define swait_event_interruptible_timeout_exclusive(wq, condition, timeout)\ 237 237 ({ \ 238 238 long __ret = timeout; \ 239 239 if (!___wait_cond_timeout(condition)) \ ··· 246 246 (void)___swait_event(wq, condition, TASK_IDLE, 0, schedule()) 247 247 248 248 /** 249 - * swait_event_idle - wait without system load contribution 249 + * swait_event_idle_exclusive - wait without system load contribution 250 250 * @wq: the waitqueue to wait on 251 251 * @condition: a C expression for the event to wait for 252 252 * ··· 257 257 * condition and doesn't want to contribute to system load. Signals are 258 258 * ignored. 259 259 */ 260 - #define swait_event_idle(wq, condition) \ 260 + #define swait_event_idle_exclusive(wq, condition) \ 261 261 do { \ 262 262 if (condition) \ 263 263 break; \ ··· 270 270 __ret = schedule_timeout(__ret)) 271 271 272 272 /** 273 - * swait_event_idle_timeout - wait up to timeout without load contribution 273 + * swait_event_idle_timeout_exclusive - wait up to timeout without load contribution 274 274 * @wq: the waitqueue to wait on 275 275 * @condition: a C expression for the event to wait for 276 276 * @timeout: timeout at which we'll give up in jiffies ··· 288 288 * or the remaining jiffies (at least 1) if the @condition evaluated 289 289 * to %true before the @timeout elapsed. 290 290 */ 291 - #define swait_event_idle_timeout(wq, condition, timeout) \ 291 + #define swait_event_idle_timeout_exclusive(wq, condition, timeout) \ 292 292 ({ \ 293 293 long __ret = timeout; \ 294 294 if (!___wait_cond_timeout(condition)) \
+2 -2
kernel/power/suspend.c
··· 92 92 /* Push all the CPUs into the idle loop. */ 93 93 wake_up_all_idle_cpus(); 94 94 /* Make the current CPU wait so it can enter the idle loop too. */ 95 - swait_event(s2idle_wait_head, 95 + swait_event_exclusive(s2idle_wait_head, 96 96 s2idle_state == S2IDLE_STATE_WAKE); 97 97 98 98 cpuidle_pause(); ··· 160 160 raw_spin_lock_irqsave(&s2idle_lock, flags); 161 161 if (s2idle_state > S2IDLE_STATE_NONE) { 162 162 s2idle_state = S2IDLE_STATE_WAKE; 163 - swake_up(&s2idle_wait_head); 163 + swake_up_one(&s2idle_wait_head); 164 164 } 165 165 raw_spin_unlock_irqrestore(&s2idle_lock, flags); 166 166 }
+2 -2
kernel/rcu/srcutiny.c
··· 110 110 111 111 WRITE_ONCE(sp->srcu_lock_nesting[idx], newval); 112 112 if (!newval && READ_ONCE(sp->srcu_gp_waiting)) 113 - swake_up(&sp->srcu_wq); 113 + swake_up_one(&sp->srcu_wq); 114 114 } 115 115 EXPORT_SYMBOL_GPL(__srcu_read_unlock); 116 116 ··· 140 140 idx = sp->srcu_idx; 141 141 WRITE_ONCE(sp->srcu_idx, !sp->srcu_idx); 142 142 WRITE_ONCE(sp->srcu_gp_waiting, true); /* srcu_read_unlock() wakes! */ 143 - swait_event(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx])); 143 + swait_event_exclusive(sp->srcu_wq, !READ_ONCE(sp->srcu_lock_nesting[idx])); 144 144 WRITE_ONCE(sp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */ 145 145 146 146 /* Invoke the callbacks we removed above. */
+4 -4
kernel/rcu/tree.c
··· 1727 1727 !READ_ONCE(rsp->gp_flags) || 1728 1728 !rsp->gp_kthread) 1729 1729 return; 1730 - swake_up(&rsp->gp_wq); 1730 + swake_up_one(&rsp->gp_wq); 1731 1731 } 1732 1732 1733 1733 /* ··· 2002 2002 } 2003 2003 2004 2004 /* 2005 - * Helper function for swait_event_idle() wakeup at force-quiescent-state 2005 + * Helper function for swait_event_idle_exclusive() wakeup at force-quiescent-state 2006 2006 * time. 2007 2007 */ 2008 2008 static bool rcu_gp_fqs_check_wake(struct rcu_state *rsp, int *gfp) ··· 2144 2144 READ_ONCE(rsp->gpnum), 2145 2145 TPS("reqwait")); 2146 2146 rsp->gp_state = RCU_GP_WAIT_GPS; 2147 - swait_event_idle(rsp->gp_wq, READ_ONCE(rsp->gp_flags) & 2147 + swait_event_idle_exclusive(rsp->gp_wq, READ_ONCE(rsp->gp_flags) & 2148 2148 RCU_GP_FLAG_INIT); 2149 2149 rsp->gp_state = RCU_GP_DONE_GPS; 2150 2150 /* Locking provides needed memory barrier. */ ··· 2176 2176 READ_ONCE(rsp->gpnum), 2177 2177 TPS("fqswait")); 2178 2178 rsp->gp_state = RCU_GP_WAIT_FQS; 2179 - ret = swait_event_idle_timeout(rsp->gp_wq, 2179 + ret = swait_event_idle_timeout_exclusive(rsp->gp_wq, 2180 2180 rcu_gp_fqs_check_wake(rsp, &gf), j); 2181 2181 rsp->gp_state = RCU_GP_DOING_FQS; 2182 2182 /* Locking provides needed memory barriers. */
+2 -2
kernel/rcu/tree_exp.h
··· 212 212 raw_spin_unlock_irqrestore_rcu_node(rnp, flags); 213 213 if (wake) { 214 214 smp_mb(); /* EGP done before wake_up(). */ 215 - swake_up(&rsp->expedited_wq); 215 + swake_up_one(&rsp->expedited_wq); 216 216 } 217 217 break; 218 218 } ··· 518 518 jiffies_start = jiffies; 519 519 520 520 for (;;) { 521 - ret = swait_event_timeout( 521 + ret = swait_event_timeout_exclusive( 522 522 rsp->expedited_wq, 523 523 sync_rcu_preempt_exp_done_unlocked(rnp_root), 524 524 jiffies_stall);
+6 -6
kernel/rcu/tree_plugin.h
··· 1854 1854 WRITE_ONCE(rdp_leader->nocb_leader_sleep, false); 1855 1855 del_timer(&rdp->nocb_timer); 1856 1856 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 1857 - smp_mb(); /* ->nocb_leader_sleep before swake_up(). */ 1858 - swake_up(&rdp_leader->nocb_wq); 1857 + smp_mb(); /* ->nocb_leader_sleep before swake_up_one(). */ 1858 + swake_up_one(&rdp_leader->nocb_wq); 1859 1859 } else { 1860 1860 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 1861 1861 } ··· 2082 2082 */ 2083 2083 trace_rcu_this_gp(rnp, rdp, c, TPS("StartWait")); 2084 2084 for (;;) { 2085 - swait_event_interruptible( 2085 + swait_event_interruptible_exclusive( 2086 2086 rnp->nocb_gp_wq[c & 0x1], 2087 2087 (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c))); 2088 2088 if (likely(d)) ··· 2111 2111 /* Wait for callbacks to appear. */ 2112 2112 if (!rcu_nocb_poll) { 2113 2113 trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, TPS("Sleep")); 2114 - swait_event_interruptible(my_rdp->nocb_wq, 2114 + swait_event_interruptible_exclusive(my_rdp->nocb_wq, 2115 2115 !READ_ONCE(my_rdp->nocb_leader_sleep)); 2116 2116 raw_spin_lock_irqsave(&my_rdp->nocb_lock, flags); 2117 2117 my_rdp->nocb_leader_sleep = true; ··· 2176 2176 raw_spin_unlock_irqrestore(&rdp->nocb_lock, flags); 2177 2177 if (rdp != my_rdp && tail == &rdp->nocb_follower_head) { 2178 2178 /* List was empty, so wake up the follower. */ 2179 - swake_up(&rdp->nocb_wq); 2179 + swake_up_one(&rdp->nocb_wq); 2180 2180 } 2181 2181 } 2182 2182 ··· 2193 2193 { 2194 2194 for (;;) { 2195 2195 trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("FollowerSleep")); 2196 - swait_event_interruptible(rdp->nocb_wq, 2196 + swait_event_interruptible_exclusive(rdp->nocb_wq, 2197 2197 READ_ONCE(rdp->nocb_follower_head)); 2198 2198 if (smp_load_acquire(&rdp->nocb_follower_head)) { 2199 2199 /* ^^^ Ensure CB invocation follows _head test. */
+5 -5
kernel/sched/swait.c
··· 32 32 } 33 33 EXPORT_SYMBOL(swake_up_locked); 34 34 35 - void swake_up(struct swait_queue_head *q) 35 + void swake_up_one(struct swait_queue_head *q) 36 36 { 37 37 unsigned long flags; 38 38 ··· 40 40 swake_up_locked(q); 41 41 raw_spin_unlock_irqrestore(&q->lock, flags); 42 42 } 43 - EXPORT_SYMBOL(swake_up); 43 + EXPORT_SYMBOL(swake_up_one); 44 44 45 45 /* 46 46 * Does not allow usage from IRQ disabled, since we must be able to ··· 76 76 list_add_tail(&wait->task_list, &q->task_list); 77 77 } 78 78 79 - void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state) 79 + void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state) 80 80 { 81 81 unsigned long flags; 82 82 ··· 85 85 set_current_state(state); 86 86 raw_spin_unlock_irqrestore(&q->lock, flags); 87 87 } 88 - EXPORT_SYMBOL(prepare_to_swait); 88 + EXPORT_SYMBOL(prepare_to_swait_exclusive); 89 89 90 90 long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state) 91 91 { ··· 95 95 raw_spin_lock_irqsave(&q->lock, flags); 96 96 if (unlikely(signal_pending_state(state, current))) { 97 97 /* 98 - * See prepare_to_wait_event(). TL;DR, subsequent swake_up() 98 + * See prepare_to_wait_event(). TL;DR, subsequent swake_up_one() 99 99 * must not see us. 100 100 */ 101 101 list_del_init(&wait->task_list);
+2 -2
virt/kvm/arm/arm.c
··· 604 604 605 605 kvm_for_each_vcpu(i, vcpu, kvm) { 606 606 vcpu->arch.pause = false; 607 - swake_up(kvm_arch_vcpu_wq(vcpu)); 607 + swake_up_one(kvm_arch_vcpu_wq(vcpu)); 608 608 } 609 609 } 610 610 ··· 612 612 { 613 613 struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu); 614 614 615 - swait_event_interruptible(*wq, ((!vcpu->arch.power_off) && 615 + swait_event_interruptible_exclusive(*wq, ((!vcpu->arch.power_off) && 616 616 (!vcpu->arch.pause))); 617 617 618 618 if (vcpu->arch.power_off || vcpu->arch.pause) {
+1 -1
virt/kvm/arm/psci.c
··· 155 155 smp_mb(); /* Make sure the above is visible */ 156 156 157 157 wq = kvm_arch_vcpu_wq(vcpu); 158 - swake_up(wq); 158 + swake_up_one(wq); 159 159 160 160 return PSCI_RET_SUCCESS; 161 161 }
+1 -1
virt/kvm/async_pf.c
··· 107 107 trace_kvm_async_pf_completed(addr, gva); 108 108 109 109 if (swq_has_sleeper(&vcpu->wq)) 110 - swake_up(&vcpu->wq); 110 + swake_up_one(&vcpu->wq); 111 111 112 112 mmput(mm); 113 113 kvm_put_kvm(vcpu->kvm);
+2 -2
virt/kvm/kvm_main.c
··· 2167 2167 kvm_arch_vcpu_blocking(vcpu); 2168 2168 2169 2169 for (;;) { 2170 - prepare_to_swait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 2170 + prepare_to_swait_exclusive(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); 2171 2171 2172 2172 if (kvm_vcpu_check_block(vcpu) < 0) 2173 2173 break; ··· 2209 2209 2210 2210 wqp = kvm_arch_vcpu_wq(vcpu); 2211 2211 if (swq_has_sleeper(wqp)) { 2212 - swake_up(wqp); 2212 + swake_up_one(wqp); 2213 2213 ++vcpu->stat.halt_wakeup; 2214 2214 return true; 2215 2215 }