Revert "rcu: Move PREEMPT_RCU preemption to switch_to() invocation"

This reverts commit 616c310e83b872024271c915c1b9ab505b9efad9.
(Move PREEMPT_RCU preemption to switch_to() invocation).
Testing by Sasha Levin <levinsasha928@gmail.com> showed that this
can result in deadlock due to invoking the scheduler when one of
the runqueue locks is held. Because this commit was simply a
performance optimization, revert it.

Reported-by: Sasha Levin <levinsasha928@gmail.com>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: Sasha Levin <levinsasha928@gmail.com>

+19 -16
-1
arch/um/drivers/mconsole_kern.c
··· 705 struct task_struct *from = current, *to = arg; 706 707 to->thread.saved_task = from; 708 - rcu_switch_from(from); 709 switch_to(from, to, from); 710 } 711
··· 705 struct task_struct *from = current, *to = arg; 706 707 to->thread.saved_task = from; 708 switch_to(from, to, from); 709 } 710
-1
include/linux/rcupdate.h
··· 184 /* Internal to kernel */ 185 extern void rcu_sched_qs(int cpu); 186 extern void rcu_bh_qs(int cpu); 187 - extern void rcu_preempt_note_context_switch(void); 188 extern void rcu_check_callbacks(int cpu, int user); 189 struct notifier_block; 190 extern void rcu_idle_enter(void);
··· 184 /* Internal to kernel */ 185 extern void rcu_sched_qs(int cpu); 186 extern void rcu_bh_qs(int cpu); 187 extern void rcu_check_callbacks(int cpu, int user); 188 struct notifier_block; 189 extern void rcu_idle_enter(void);
+6
include/linux/rcutiny.h
··· 87 88 #ifdef CONFIG_TINY_RCU 89 90 static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) 91 { 92 *delta_jiffies = ULONG_MAX; ··· 99 100 #else /* #ifdef CONFIG_TINY_RCU */ 101 102 int rcu_preempt_needs_cpu(void); 103 104 static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) ··· 113 static inline void rcu_note_context_switch(int cpu) 114 { 115 rcu_sched_qs(cpu); 116 } 117 118 /*
··· 87 88 #ifdef CONFIG_TINY_RCU 89 90 + static inline void rcu_preempt_note_context_switch(void) 91 + { 92 + } 93 + 94 static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) 95 { 96 *delta_jiffies = ULONG_MAX; ··· 95 96 #else /* #ifdef CONFIG_TINY_RCU */ 97 98 + void rcu_preempt_note_context_switch(void); 99 int rcu_preempt_needs_cpu(void); 100 101 static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies) ··· 108 static inline void rcu_note_context_switch(int cpu) 109 { 110 rcu_sched_qs(cpu); 111 + rcu_preempt_note_context_switch(); 112 } 113 114 /*
-10
include/linux/sched.h
··· 1871 INIT_LIST_HEAD(&p->rcu_node_entry); 1872 } 1873 1874 - static inline void rcu_switch_from(struct task_struct *prev) 1875 - { 1876 - if (prev->rcu_read_lock_nesting != 0) 1877 - rcu_preempt_note_context_switch(); 1878 - } 1879 - 1880 #else 1881 1882 static inline void rcu_copy_process(struct task_struct *p) 1883 - { 1884 - } 1885 - 1886 - static inline void rcu_switch_from(struct task_struct *prev) 1887 { 1888 } 1889
··· 1871 INIT_LIST_HEAD(&p->rcu_node_entry); 1872 } 1873 1874 #else 1875 1876 static inline void rcu_copy_process(struct task_struct *p) 1877 { 1878 } 1879
+1
kernel/rcutree.c
··· 201 { 202 trace_rcu_utilization("Start context switch"); 203 rcu_sched_qs(cpu); 204 trace_rcu_utilization("End context switch"); 205 } 206 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
··· 201 { 202 trace_rcu_utilization("Start context switch"); 203 rcu_sched_qs(cpu); 204 + rcu_preempt_note_context_switch(cpu); 205 trace_rcu_utilization("End context switch"); 206 } 207 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
+1
kernel/rcutree.h
··· 444 /* Forward declarations for rcutree_plugin.h */ 445 static void rcu_bootup_announce(void); 446 long rcu_batches_completed(void); 447 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); 448 #ifdef CONFIG_HOTPLUG_CPU 449 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
··· 444 /* Forward declarations for rcutree_plugin.h */ 445 static void rcu_bootup_announce(void); 446 long rcu_batches_completed(void); 447 + static void rcu_preempt_note_context_switch(int cpu); 448 static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); 449 #ifdef CONFIG_HOTPLUG_CPU 450 static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
+11 -3
kernel/rcutree_plugin.h
··· 153 * 154 * Caller must disable preemption. 155 */ 156 - void rcu_preempt_note_context_switch(void) 157 { 158 struct task_struct *t = current; 159 unsigned long flags; ··· 164 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { 165 166 /* Possibly blocking in an RCU read-side critical section. */ 167 - rdp = __this_cpu_ptr(rcu_preempt_state.rda); 168 rnp = rdp->mynode; 169 raw_spin_lock_irqsave(&rnp->lock, flags); 170 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; ··· 228 * means that we continue to block the current grace period. 229 */ 230 local_irq_save(flags); 231 - rcu_preempt_qs(smp_processor_id()); 232 local_irq_restore(flags); 233 } 234 ··· 1000 rcu_sched_force_quiescent_state(); 1001 } 1002 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 1003 1004 /* 1005 * Because preemptible RCU does not exist, there are never any preempted
··· 153 * 154 * Caller must disable preemption. 155 */ 156 + static void rcu_preempt_note_context_switch(int cpu) 157 { 158 struct task_struct *t = current; 159 unsigned long flags; ··· 164 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) { 165 166 /* Possibly blocking in an RCU read-side critical section. */ 167 + rdp = per_cpu_ptr(rcu_preempt_state.rda, cpu); 168 rnp = rdp->mynode; 169 raw_spin_lock_irqsave(&rnp->lock, flags); 170 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED; ··· 228 * means that we continue to block the current grace period. 229 */ 230 local_irq_save(flags); 231 + rcu_preempt_qs(cpu); 232 local_irq_restore(flags); 233 } 234 ··· 1000 rcu_sched_force_quiescent_state(); 1001 } 1002 EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); 1003 + 1004 + /* 1005 + * Because preemptible RCU does not exist, we never have to check for 1006 + * CPUs being in quiescent states. 1007 + */ 1008 + static void rcu_preempt_note_context_switch(int cpu) 1009 + { 1010 + } 1011 1012 /* 1013 * Because preemptible RCU does not exist, there are never any preempted
-1
kernel/sched/core.c
··· 2081 #endif 2082 2083 /* Here we just switch the register state and the stack. */ 2084 - rcu_switch_from(prev); 2085 switch_to(prev, next, prev); 2086 2087 barrier();
··· 2081 #endif 2082 2083 /* Here we just switch the register state and the stack. */ 2084 switch_to(prev, next, prev); 2085 2086 barrier();