Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

um: Remove broken SMP support

At times where UML used the TT mode to operate it had
kind of SMP support. It never got finished nor was
stable.
Let's rip out that cruft and stop confusing developers
which do tree-wide SMP cleanups.

If someone wants SMP support UML it has do be done from scratch.

Signed-off-by: Richard Weinberger <richard@nod.at>

+2 -366
-30
arch/um/Kconfig.um
··· 95 95 The keys are documented in <file:Documentation/sysrq.txt>. Don't say Y 96 96 unless you really know what this hack does. 97 97 98 - config SMP 99 - bool "Symmetric multi-processing support" 100 - default n 101 - depends on BROKEN 102 - help 103 - This option enables UML SMP support. 104 - It is NOT related to having a real SMP box. Not directly, at least. 105 - 106 - UML implements virtual SMP by allowing as many processes to run 107 - simultaneously on the host as there are virtual processors configured. 108 - 109 - Obviously, if the host is a uniprocessor, those processes will 110 - timeshare, but, inside UML, will appear to be running simultaneously. 111 - If the host is a multiprocessor, then UML processes may run 112 - simultaneously, depending on the host scheduler. 113 - 114 - This, however, is supported only in TT mode. So, if you use the SKAS 115 - patch on your host, switching to TT mode and enabling SMP usually 116 - gives you worse performances. 117 - Also, since the support for SMP has been under-developed, there could 118 - be some bugs being exposed by enabling SMP. 119 - 120 - If you don't know what to do, say N. 121 - 122 - config NR_CPUS 123 - int "Maximum number of CPUs (2-32)" 124 - range 2 32 125 - depends on SMP 126 - default "32" 127 - 128 98 config HIGHMEM 129 99 bool "Highmem support" 130 100 depends on !64BIT && BROKEN
-8
arch/um/include/asm/processor-generic.h
··· 98 98 99 99 extern struct cpuinfo_um boot_cpu_data; 100 100 101 - #define my_cpu_data cpu_data[smp_processor_id()] 102 - 103 - #ifdef CONFIG_SMP 104 - extern struct cpuinfo_um cpu_data[]; 105 - #define current_cpu_data cpu_data[smp_processor_id()] 106 - #else 107 101 #define cpu_data (&boot_cpu_data) 108 102 #define current_cpu_data boot_cpu_data 109 - #endif 110 - 111 103 112 104 #define KSTK_REG(tsk, reg) get_thread_reg(reg, &tsk->thread.switch_buf) 113 105 extern unsigned long get_wchan(struct task_struct *p);
-26
arch/um/include/asm/smp.h
··· 1 1 #ifndef __UM_SMP_H 2 2 #define __UM_SMP_H 3 3 4 - #ifdef CONFIG_SMP 5 - 6 - #include <linux/bitops.h> 7 - #include <asm/current.h> 8 - #include <linux/cpumask.h> 9 - 10 - #define raw_smp_processor_id() (current_thread->cpu) 11 - 12 - #define cpu_logical_map(n) (n) 13 - #define cpu_number_map(n) (n) 14 - extern int hard_smp_processor_id(void); 15 - #define NO_PROC_ID -1 16 - 17 - extern int ncpus; 18 - 19 - 20 - static inline void smp_cpus_done(unsigned int maxcpus) 21 - { 22 - } 23 - 24 - extern struct task_struct *idle_threads[NR_CPUS]; 25 - 26 - #else 27 - 28 4 #define hard_smp_processor_id() 0 29 - 30 - #endif 31 5 32 6 #endif
+1 -1
arch/um/kernel/Makefile
··· 12 12 13 13 obj-y = config.o exec.o exitcode.o irq.o ksyms.o mem.o \ 14 14 physmem.o process.o ptrace.o reboot.o sigio.o \ 15 - signal.o smp.o syscall.o sysrq.o time.o tlb.o trap.o \ 15 + signal.o syscall.o sysrq.o time.o tlb.o trap.o \ 16 16 um_arch.o umid.o maccess.o skas/ 17 17 18 18 obj-$(CONFIG_BLK_DEV_INITRD) += initrd.o
-3
arch/um/kernel/irq.c
··· 35 35 struct irq_fd *irq_fd; 36 36 int n; 37 37 38 - if (smp_sigio_handler()) 39 - return; 40 - 41 38 while (1) { 42 39 n = os_waiting_for_events(active_fds); 43 40 if (n <= 0) {
-11
arch/um/kernel/process.c
··· 259 259 return strlen_user(str); 260 260 } 261 261 262 - int smp_sigio_handler(void) 263 - { 264 - #ifdef CONFIG_SMP 265 - int cpu = current_thread_info()->cpu; 266 - IPI_handler(cpu); 267 - if (cpu != 0) 268 - return 1; 269 - #endif 270 - return 0; 271 - } 272 - 273 262 int cpu(void) 274 263 { 275 264 return current_thread_info()->cpu;
+1 -3
arch/um/kernel/skas/process.c
··· 21 21 22 22 cpu_tasks[0].pid = pid; 23 23 cpu_tasks[0].task = current; 24 - #ifdef CONFIG_SMP 25 - init_cpu_online(get_cpu_mask(0)); 26 - #endif 24 + 27 25 start_kernel(); 28 26 return 0; 29 27 }
-238
arch/um/kernel/smp.c
··· 1 - /* 2 - * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) 3 - * Licensed under the GPL 4 - */ 5 - 6 - #include <linux/percpu.h> 7 - #include <asm/pgalloc.h> 8 - #include <asm/tlb.h> 9 - 10 - #ifdef CONFIG_SMP 11 - 12 - #include <linux/sched.h> 13 - #include <linux/module.h> 14 - #include <linux/threads.h> 15 - #include <linux/interrupt.h> 16 - #include <linux/err.h> 17 - #include <linux/hardirq.h> 18 - #include <asm/smp.h> 19 - #include <asm/processor.h> 20 - #include <asm/spinlock.h> 21 - #include <kern.h> 22 - #include <irq_user.h> 23 - #include <os.h> 24 - 25 - /* Per CPU bogomips and other parameters 26 - * The only piece used here is the ipi pipe, which is set before SMP is 27 - * started and never changed. 28 - */ 29 - struct cpuinfo_um cpu_data[NR_CPUS]; 30 - 31 - /* A statistic, can be a little off */ 32 - int num_reschedules_sent = 0; 33 - 34 - /* Not changed after boot */ 35 - struct task_struct *idle_threads[NR_CPUS]; 36 - 37 - void smp_send_reschedule(int cpu) 38 - { 39 - os_write_file(cpu_data[cpu].ipi_pipe[1], "R", 1); 40 - num_reschedules_sent++; 41 - } 42 - 43 - void smp_send_stop(void) 44 - { 45 - int i; 46 - 47 - printk(KERN_INFO "Stopping all CPUs..."); 48 - for (i = 0; i < num_online_cpus(); i++) { 49 - if (i == current_thread->cpu) 50 - continue; 51 - os_write_file(cpu_data[i].ipi_pipe[1], "S", 1); 52 - } 53 - printk(KERN_CONT "done\n"); 54 - } 55 - 56 - static cpumask_t smp_commenced_mask = CPU_MASK_NONE; 57 - static cpumask_t cpu_callin_map = CPU_MASK_NONE; 58 - 59 - static int idle_proc(void *cpup) 60 - { 61 - int cpu = (int) cpup, err; 62 - 63 - err = os_pipe(cpu_data[cpu].ipi_pipe, 1, 1); 64 - if (err < 0) 65 - panic("CPU#%d failed to create IPI pipe, err = %d", cpu, -err); 66 - 67 - os_set_fd_async(cpu_data[cpu].ipi_pipe[0]); 68 - 69 - wmb(); 70 - if (cpu_test_and_set(cpu, cpu_callin_map)) { 71 - printk(KERN_ERR "huh, CPU#%d already present??\n", cpu); 72 - BUG(); 73 - } 74 - 75 - while (!cpu_isset(cpu, smp_commenced_mask)) 76 - cpu_relax(); 77 - 78 - notify_cpu_starting(cpu); 79 - set_cpu_online(cpu, true); 80 - default_idle(); 81 - return 0; 82 - } 83 - 84 - static struct task_struct *idle_thread(int cpu) 85 - { 86 - struct task_struct *new_task; 87 - 88 - current->thread.request.u.thread.proc = idle_proc; 89 - current->thread.request.u.thread.arg = (void *) cpu; 90 - new_task = fork_idle(cpu); 91 - if (IS_ERR(new_task)) 92 - panic("copy_process failed in idle_thread, error = %ld", 93 - PTR_ERR(new_task)); 94 - 95 - cpu_tasks[cpu] = ((struct cpu_task) 96 - { .pid = new_task->thread.mode.tt.extern_pid, 97 - .task = new_task } ); 98 - idle_threads[cpu] = new_task; 99 - panic("skas mode doesn't support SMP"); 100 - return new_task; 101 - } 102 - 103 - void smp_prepare_cpus(unsigned int maxcpus) 104 - { 105 - struct task_struct *idle; 106 - unsigned long waittime; 107 - int err, cpu, me = smp_processor_id(); 108 - int i; 109 - 110 - for (i = 0; i < ncpus; ++i) 111 - set_cpu_possible(i, true); 112 - 113 - set_cpu_online(me, true); 114 - cpu_set(me, cpu_callin_map); 115 - 116 - err = os_pipe(cpu_data[me].ipi_pipe, 1, 1); 117 - if (err < 0) 118 - panic("CPU#0 failed to create IPI pipe, errno = %d", -err); 119 - 120 - os_set_fd_async(cpu_data[me].ipi_pipe[0]); 121 - 122 - for (cpu = 1; cpu < ncpus; cpu++) { 123 - printk(KERN_INFO "Booting processor %d...\n", cpu); 124 - 125 - idle = idle_thread(cpu); 126 - 127 - init_idle(idle, cpu); 128 - 129 - waittime = 200000000; 130 - while (waittime-- && !cpu_isset(cpu, cpu_callin_map)) 131 - cpu_relax(); 132 - 133 - printk(KERN_INFO "%s\n", 134 - cpu_isset(cpu, cpu_calling_map) ? "done" : "failed"); 135 - } 136 - } 137 - 138 - void smp_prepare_boot_cpu(void) 139 - { 140 - set_cpu_online(smp_processor_id(), true); 141 - } 142 - 143 - int __cpu_up(unsigned int cpu, struct task_struct *tidle) 144 - { 145 - cpu_set(cpu, smp_commenced_mask); 146 - while (!cpu_online(cpu)) 147 - mb(); 148 - return 0; 149 - } 150 - 151 - int setup_profiling_timer(unsigned int multiplier) 152 - { 153 - printk(KERN_INFO "setup_profiling_timer\n"); 154 - return 0; 155 - } 156 - 157 - void smp_call_function_slave(int cpu); 158 - 159 - void IPI_handler(int cpu) 160 - { 161 - unsigned char c; 162 - int fd; 163 - 164 - fd = cpu_data[cpu].ipi_pipe[0]; 165 - while (os_read_file(fd, &c, 1) == 1) { 166 - switch (c) { 167 - case 'C': 168 - smp_call_function_slave(cpu); 169 - break; 170 - 171 - case 'R': 172 - scheduler_ipi(); 173 - break; 174 - 175 - case 'S': 176 - printk(KERN_INFO "CPU#%d stopping\n", cpu); 177 - while (1) 178 - pause(); 179 - break; 180 - 181 - default: 182 - printk(KERN_ERR "CPU#%d received unknown IPI [%c]!\n", 183 - cpu, c); 184 - break; 185 - } 186 - } 187 - } 188 - 189 - int hard_smp_processor_id(void) 190 - { 191 - return pid_to_processor_id(os_getpid()); 192 - } 193 - 194 - static DEFINE_SPINLOCK(call_lock); 195 - static atomic_t scf_started; 196 - static atomic_t scf_finished; 197 - static void (*func)(void *info); 198 - static void *info; 199 - 200 - void smp_call_function_slave(int cpu) 201 - { 202 - atomic_inc(&scf_started); 203 - (*func)(info); 204 - atomic_inc(&scf_finished); 205 - } 206 - 207 - int smp_call_function(void (*_func)(void *info), void *_info, int wait) 208 - { 209 - int cpus = num_online_cpus() - 1; 210 - int i; 211 - 212 - if (!cpus) 213 - return 0; 214 - 215 - /* Can deadlock when called with interrupts disabled */ 216 - WARN_ON(irqs_disabled()); 217 - 218 - spin_lock_bh(&call_lock); 219 - atomic_set(&scf_started, 0); 220 - atomic_set(&scf_finished, 0); 221 - func = _func; 222 - info = _info; 223 - 224 - for_each_online_cpu(i) 225 - os_write_file(cpu_data[i].ipi_pipe[1], "C", 1); 226 - 227 - while (atomic_read(&scf_started) != cpus) 228 - barrier(); 229 - 230 - if (wait) 231 - while (atomic_read(&scf_finished) != cpus) 232 - barrier(); 233 - 234 - spin_unlock_bh(&call_lock); 235 - return 0; 236 - } 237 - 238 - #endif
-35
arch/um/kernel/um_arch.c
··· 66 66 { 67 67 int index = 0; 68 68 69 - #ifdef CONFIG_SMP 70 - index = (struct cpuinfo_um *) v - cpu_data; 71 - if (!cpu_online(index)) 72 - return 0; 73 - #endif 74 - 75 69 seq_printf(m, "processor\t: %d\n", index); 76 70 seq_printf(m, "vendor_id\t: User Mode Linux\n"); 77 71 seq_printf(m, "model name\t: UML\n"); ··· 161 167 "debug\n" 162 168 " this flag is not needed to run gdb on UML in skas mode\n\n" 163 169 ); 164 - 165 - #ifdef CONFIG_SMP 166 - static int __init uml_ncpus_setup(char *line, int *add) 167 - { 168 - if (!sscanf(line, "%d", &ncpus)) { 169 - printf("Couldn't parse [%s]\n", line); 170 - return -1; 171 - } 172 - 173 - return 0; 174 - } 175 - 176 - __uml_setup("ncpus=", uml_ncpus_setup, 177 - "ncpus=<# of desired CPUs>\n" 178 - " This tells an SMP kernel how many virtual processors to start.\n\n" 179 - ); 180 - #endif 181 170 182 171 static int __init Usage(char *line, int *add) 183 172 { ··· 357 380 void apply_alternatives(struct alt_instr *start, struct alt_instr *end) 358 381 { 359 382 } 360 - 361 - #ifdef CONFIG_SMP 362 - void alternatives_smp_module_add(struct module *mod, char *name, 363 - void *locks, void *locks_end, 364 - void *text, void *text_end) 365 - { 366 - } 367 - 368 - void alternatives_smp_module_del(struct module *mod) 369 - { 370 - } 371 - #endif
-11
arch/x86/um/asm/barrier.h
··· 36 36 #endif /* CONFIG_X86_PPRO_FENCE */ 37 37 #define dma_wmb() barrier() 38 38 39 - #ifdef CONFIG_SMP 40 - 41 - #define smp_mb() mb() 42 - #define smp_rmb() dma_rmb() 43 - #define smp_wmb() barrier() 44 - #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) 45 - 46 - #else /* CONFIG_SMP */ 47 - 48 39 #define smp_mb() barrier() 49 40 #define smp_rmb() barrier() 50 41 #define smp_wmb() barrier() 51 42 #define set_mb(var, value) do { var = value; barrier(); } while (0) 52 - 53 - #endif /* CONFIG_SMP */ 54 43 55 44 #define read_barrier_depends() do { } while (0) 56 45 #define smp_read_barrier_depends() do { } while (0)