Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

MIPS: Synchronize MIPS count one CPU at a time

The current implementation of synchronise_count_{master,slave} blocks
slave CPUs in early boot until all of them come up. This no longer
works because blocking a CPU with interrupts off after notifying the
CPU to be online causes problems with the current kernel.

Specifically, after the workqueue changes
(commit a08489c569dc1 "Pull workqueue changes from Tejun Heo")
the CPU_ONLINE notification callback workqueue_cpu_up_callback()
will hang on wait_for_completion(&idle_rebind.done), if the slave
CPUs are blocked for synchronize_count_slave().

The changes are to update synchronize_count_{master,slave}() to handle
one CPU at a time and to call synchronise_count_master() in __cpu_up()
so that the CPU_ONLINE notification goes out only after the COP0 COUNT
register is synchronized.

[ralf@linux-mips.org: This matter only to those few platforms which are
using the cp0 counter as their clocksource which are XLP, XLR and MIPS'
CMP solution.]

Signed-off-by: Jayachandran C <jchandra@broadcom.com>
Cc: linux-mips@linux-mips.org
Patchwork: https://patchwork.linux-mips.org/patch/4216/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>

authored by

Jayachandran C and committed by
Ralf Baechle
cf9bfe55 5a670445

+17 -21
+4 -4
arch/mips/include/asm/r4k-timer.h
··· 12 12 13 13 #ifdef CONFIG_SYNC_R4K 14 14 15 - extern void synchronise_count_master(void); 16 - extern void synchronise_count_slave(void); 15 + extern void synchronise_count_master(int cpu); 16 + extern void synchronise_count_slave(int cpu); 17 17 18 18 #else 19 19 20 - static inline void synchronise_count_master(void) 20 + static inline void synchronise_count_master(int cpu) 21 21 { 22 22 } 23 23 24 - static inline void synchronise_count_slave(void) 24 + static inline void synchronise_count_slave(int cpu) 25 25 { 26 26 } 27 27
+2 -2
arch/mips/kernel/smp.c
··· 130 130 131 131 cpu_set(cpu, cpu_callin_map); 132 132 133 - synchronise_count_slave(); 133 + synchronise_count_slave(cpu); 134 134 135 135 /* 136 136 * irq will be enabled in ->smp_finish(), enabling it too early ··· 173 173 void __init smp_cpus_done(unsigned int max_cpus) 174 174 { 175 175 mp_ops->cpus_done(); 176 - synchronise_count_master(); 177 176 } 178 177 179 178 /* called from main before smp_init() */ ··· 205 206 while (!cpu_isset(cpu, cpu_callin_map)) 206 207 udelay(100); 207 208 209 + synchronise_count_master(cpu); 208 210 return 0; 209 211 } 210 212
+11 -15
arch/mips/kernel/sync-r4k.c
··· 28 28 #define COUNTON 100 29 29 #define NR_LOOPS 5 30 30 31 - void __cpuinit synchronise_count_master(void) 31 + void __cpuinit synchronise_count_master(int cpu) 32 32 { 33 33 int i; 34 34 unsigned long flags; 35 35 unsigned int initcount; 36 - int nslaves; 37 36 38 37 #ifdef CONFIG_MIPS_MT_SMTC 39 38 /* ··· 42 43 return; 43 44 #endif 44 45 45 - printk(KERN_INFO "Synchronize counters across %u CPUs: ", 46 - num_online_cpus()); 46 + printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu); 47 47 48 48 local_irq_save(flags); 49 49 ··· 50 52 * Notify the slaves that it's time to start 51 53 */ 52 54 atomic_set(&count_reference, read_c0_count()); 53 - atomic_set(&count_start_flag, 1); 55 + atomic_set(&count_start_flag, cpu); 54 56 smp_wmb(); 55 57 56 58 /* Count will be initialised to current timer for all CPU's */ ··· 67 69 * two CPUs. 68 70 */ 69 71 70 - nslaves = num_online_cpus()-1; 71 72 for (i = 0; i < NR_LOOPS; i++) { 72 - /* slaves loop on '!= ncpus' */ 73 - while (atomic_read(&count_count_start) != nslaves) 73 + /* slaves loop on '!= 2' */ 74 + while (atomic_read(&count_count_start) != 1) 74 75 mb(); 75 76 atomic_set(&count_count_stop, 0); 76 77 smp_wmb(); ··· 86 89 /* 87 90 * Wait for all slaves to leave the synchronization point: 88 91 */ 89 - while (atomic_read(&count_count_stop) != nslaves) 92 + while (atomic_read(&count_count_stop) != 1) 90 93 mb(); 91 94 atomic_set(&count_count_start, 0); 92 95 smp_wmb(); ··· 94 97 } 95 98 /* Arrange for an interrupt in a short while */ 96 99 write_c0_compare(read_c0_count() + COUNTON); 100 + atomic_set(&count_start_flag, 0); 97 101 98 102 local_irq_restore(flags); 99 103 ··· 106 108 printk("done.\n"); 107 109 } 108 110 109 - void __cpuinit synchronise_count_slave(void) 111 + void __cpuinit synchronise_count_slave(int cpu) 110 112 { 111 113 int i; 112 114 unsigned int initcount; 113 - int ncpus; 114 115 115 116 #ifdef CONFIG_MIPS_MT_SMTC 116 117 /* ··· 124 127 * so we first wait for the master to say everyone is ready 125 128 */ 126 129 127 - while (!atomic_read(&count_start_flag)) 130 + while (atomic_read(&count_start_flag) != cpu) 128 131 mb(); 129 132 130 133 /* Count will be initialised to next expire for all CPU's */ 131 134 initcount = atomic_read(&count_reference); 132 135 133 - ncpus = num_online_cpus(); 134 136 for (i = 0; i < NR_LOOPS; i++) { 135 137 atomic_inc(&count_count_start); 136 - while (atomic_read(&count_count_start) != ncpus) 138 + while (atomic_read(&count_count_start) != 2) 137 139 mb(); 138 140 139 141 /* ··· 142 146 write_c0_count(initcount); 143 147 144 148 atomic_inc(&count_count_stop); 145 - while (atomic_read(&count_count_stop) != ncpus) 149 + while (atomic_read(&count_count_stop) != 2) 146 150 mb(); 147 151 } 148 152 /* Arrange for an interrupt in a short while */