Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ARM: vexpress/realview: consolidate immitation CPU hotplug

The only difference between the hotplug implementation for Realview
and Versatile Express are the bit in the auxiliary control register
to disable coherency. Combine the two implentations accounting for
that difference.

Rename the functions to try to discourage cargo-cult copying of this
code.

Tested-by: Sudeep Holla <sudeep.holla@arm.com>
Acked-by: Sudeep Holla <sudeep.holla@arm.com>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>

+43 -149
-1
arch/arm/mach-realview/Makefile
··· 5 5 6 6 obj-y += realview-dt.o 7 7 obj-$(CONFIG_SMP) += platsmp-dt.o 8 - obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
+27 -35
arch/arm/mach-realview/hotplug.c arch/arm/plat-versatile/hotplug.c
··· 1 1 /* 2 - * linux/arch/arm/mach-realview/hotplug.c 3 - * 4 2 * Copyright (C) 2002 ARM Ltd. 5 3 * All Rights Reserved 6 4 * 7 5 * This program is free software; you can redistribute it and/or modify 8 6 * it under the terms of the GNU General Public License version 2 as 9 7 * published by the Free Software Foundation. 8 + * 9 + * This hotplug implementation is _specific_ to the situation found on 10 + * ARM development platforms where there is _no_ possibility of actually 11 + * taking a CPU offline, resetting it, or otherwise. Real platforms must 12 + * NOT copy this code. 10 13 */ 11 14 #include <linux/kernel.h> 12 15 #include <linux/errno.h> 13 16 #include <linux/smp.h> 14 17 15 - #include <asm/cp15.h> 16 18 #include <asm/smp_plat.h> 19 + #include <asm/cp15.h> 17 20 18 - static inline void cpu_enter_lowpower(void) 21 + static inline void versatile_immitation_enter_lowpower(unsigned int actrl_mask) 19 22 { 20 23 unsigned int v; 21 24 22 25 asm volatile( 23 - " mcr p15, 0, %1, c7, c5, 0\n" 26 + "mcr p15, 0, %1, c7, c5, 0\n" 24 27 " mcr p15, 0, %1, c7, c10, 4\n" 25 28 /* 26 29 * Turn off coherency 27 30 */ 28 31 " mrc p15, 0, %0, c1, c0, 1\n" 29 - " bic %0, %0, #0x20\n" 32 + " bic %0, %0, %3\n" 30 33 " mcr p15, 0, %0, c1, c0, 1\n" 31 34 " mrc p15, 0, %0, c1, c0, 0\n" 32 35 " bic %0, %0, %2\n" 33 36 " mcr p15, 0, %0, c1, c0, 0\n" 34 37 : "=&r" (v) 35 - : "r" (0), "Ir" (CR_C) 38 + : "r" (0), "Ir" (CR_C), "Ir" (actrl_mask) 36 39 : "cc"); 37 40 } 38 41 39 - static inline void cpu_leave_lowpower(void) 42 + static inline void versatile_immitation_leave_lowpower(unsigned int actrl_mask) 40 43 { 41 44 unsigned int v; 42 45 43 - asm volatile( "mrc p15, 0, %0, c1, c0, 0\n" 46 + asm volatile( 47 + "mrc p15, 0, %0, c1, c0, 0\n" 44 48 " orr %0, %0, %1\n" 45 49 " mcr p15, 0, %0, c1, c0, 0\n" 46 50 " mrc p15, 0, %0, c1, c0, 1\n" 47 - " orr %0, %0, #0x20\n" 51 + " orr %0, %0, %2\n" 48 52 " mcr p15, 0, %0, c1, c0, 1\n" 49 53 : "=&r" (v) 50 - : "Ir" (CR_C) 54 + : "Ir" (CR_C), "Ir" (actrl_mask) 51 55 : "cc"); 52 56 } 53 57 54 - static inline void platform_do_lowpower(unsigned int cpu, int *spurious) 58 + static inline void versatile_immitation_do_lowpower(unsigned int cpu, int *spurious) 55 59 { 56 60 /* 57 61 * there is no power-control hardware on this platform, so all 58 62 * we can do is put the core into WFI; this is safe as the calling 59 - * code will have already disabled interrupts 63 + * code will have already disabled interrupts. 64 + * 65 + * This code should not be used outside Versatile platforms. 60 66 */ 61 67 for (;;) { 62 - /* 63 - * here's the WFI 64 - */ 65 - asm(".word 0xe320f003\n" 66 - : 67 - : 68 - : "memory", "cc"); 68 + wfi(); 69 69 70 70 if (pen_release == cpu_logical_map(cpu)) { 71 71 /* ··· 86 86 } 87 87 88 88 /* 89 - * platform-specific code to shutdown a CPU 90 - * 91 - * Called with IRQs disabled 89 + * platform-specific code to shutdown a CPU. 90 + * This code supports immitation-style CPU hotplug for Versatile/Realview/ 91 + * Versatile Express platforms that are unable to do real CPU hotplug. 92 92 */ 93 - void realview_cpu_die(unsigned int cpu) 93 + void versatile_immitation_cpu_die(unsigned int cpu, unsigned int actrl_mask) 94 94 { 95 95 int spurious = 0; 96 96 97 - /* 98 - * we're ready for shutdown now, so do it 99 - */ 100 - cpu_enter_lowpower(); 101 - platform_do_lowpower(cpu, &spurious); 102 - 103 - /* 104 - * bring this CPU back into the world of cache 105 - * coherency, and then restore interrupts 106 - */ 107 - cpu_leave_lowpower(); 97 + versatile_immitation_enter_lowpower(actrl_mask); 98 + versatile_immitation_do_lowpower(cpu, &spurious); 99 + versatile_immitation_leave_lowpower(actrl_mask); 108 100 109 101 if (spurious) 110 102 pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
-1
arch/arm/mach-realview/hotplug.h
··· 1 - void realview_cpu_die(unsigned int cpu);
+7 -1
arch/arm/mach-realview/platsmp-dt.c
··· 17 17 #include <asm/smp_scu.h> 18 18 19 19 #include <plat/platsmp.h> 20 - #include "hotplug.h" 21 20 22 21 #define REALVIEW_SYS_FLAGSSET_OFFSET 0x30 23 22 ··· 77 78 regmap_write(map, REALVIEW_SYS_FLAGSSET_OFFSET, 78 79 __pa_symbol(versatile_secondary_startup)); 79 80 } 81 + 82 + #ifdef CONFIG_HOTPLUG_CPU 83 + static void realview_cpu_die(unsigned int cpu) 84 + { 85 + return versatile_immitation_cpu_die(cpu, 0x20); 86 + } 87 + #endif 80 88 81 89 static const struct smp_operations realview_dt_smp_ops __initconst = { 82 90 .smp_prepare_cpus = realview_smp_prepare_cpus,
-1
arch/arm/mach-vexpress/Makefile
··· 15 15 CFLAGS_tc2_pm.o += -march=armv7-a 16 16 CFLAGS_REMOVE_tc2_pm.o = -pg 17 17 obj-$(CONFIG_SMP) += platsmp.o 18 - obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o 19 18 20 19 obj-$(CONFIG_ARCH_MPS2) += v2m-mps2.o
-2
arch/arm/mach-vexpress/core.h
··· 1 1 bool vexpress_smp_init_ops(void); 2 2 3 3 extern const struct smp_operations vexpress_smp_dt_ops; 4 - 5 - extern void vexpress_cpu_die(unsigned int cpu);
-108
arch/arm/mach-vexpress/hotplug.c
··· 1 - /* 2 - * linux/arch/arm/mach-realview/hotplug.c 3 - * 4 - * Copyright (C) 2002 ARM Ltd. 5 - * All Rights Reserved 6 - * 7 - * This program is free software; you can redistribute it and/or modify 8 - * it under the terms of the GNU General Public License version 2 as 9 - * published by the Free Software Foundation. 10 - */ 11 - #include <linux/kernel.h> 12 - #include <linux/errno.h> 13 - #include <linux/smp.h> 14 - 15 - #include <asm/smp_plat.h> 16 - #include <asm/cp15.h> 17 - 18 - #include "core.h" 19 - 20 - static inline void cpu_enter_lowpower(void) 21 - { 22 - unsigned int v; 23 - 24 - asm volatile( 25 - "mcr p15, 0, %1, c7, c5, 0\n" 26 - " mcr p15, 0, %1, c7, c10, 4\n" 27 - /* 28 - * Turn off coherency 29 - */ 30 - " mrc p15, 0, %0, c1, c0, 1\n" 31 - " bic %0, %0, %3\n" 32 - " mcr p15, 0, %0, c1, c0, 1\n" 33 - " mrc p15, 0, %0, c1, c0, 0\n" 34 - " bic %0, %0, %2\n" 35 - " mcr p15, 0, %0, c1, c0, 0\n" 36 - : "=&r" (v) 37 - : "r" (0), "Ir" (CR_C), "Ir" (0x40) 38 - : "cc"); 39 - } 40 - 41 - static inline void cpu_leave_lowpower(void) 42 - { 43 - unsigned int v; 44 - 45 - asm volatile( 46 - "mrc p15, 0, %0, c1, c0, 0\n" 47 - " orr %0, %0, %1\n" 48 - " mcr p15, 0, %0, c1, c0, 0\n" 49 - " mrc p15, 0, %0, c1, c0, 1\n" 50 - " orr %0, %0, %2\n" 51 - " mcr p15, 0, %0, c1, c0, 1\n" 52 - : "=&r" (v) 53 - : "Ir" (CR_C), "Ir" (0x40) 54 - : "cc"); 55 - } 56 - 57 - static inline void platform_do_lowpower(unsigned int cpu, int *spurious) 58 - { 59 - /* 60 - * there is no power-control hardware on this platform, so all 61 - * we can do is put the core into WFI; this is safe as the calling 62 - * code will have already disabled interrupts 63 - */ 64 - for (;;) { 65 - wfi(); 66 - 67 - if (pen_release == cpu_logical_map(cpu)) { 68 - /* 69 - * OK, proper wakeup, we're done 70 - */ 71 - break; 72 - } 73 - 74 - /* 75 - * Getting here, means that we have come out of WFI without 76 - * having been woken up - this shouldn't happen 77 - * 78 - * Just note it happening - when we're woken, we can report 79 - * its occurrence. 80 - */ 81 - (*spurious)++; 82 - } 83 - } 84 - 85 - /* 86 - * platform-specific code to shutdown a CPU 87 - * 88 - * Called with IRQs disabled 89 - */ 90 - void vexpress_cpu_die(unsigned int cpu) 91 - { 92 - int spurious = 0; 93 - 94 - /* 95 - * we're ready for shutdown now, so do it 96 - */ 97 - cpu_enter_lowpower(); 98 - platform_do_lowpower(cpu, &spurious); 99 - 100 - /* 101 - * bring this CPU back into the world of cache 102 - * coherency, and then restore interrupts 103 - */ 104 - cpu_leave_lowpower(); 105 - 106 - if (spurious) 107 - pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); 108 - }
+7
arch/arm/mach-vexpress/platsmp.c
··· 82 82 vexpress_flags_set(__pa_symbol(versatile_secondary_startup)); 83 83 } 84 84 85 + #ifdef CONFIG_HOTPLUG_CPU 86 + static void vexpress_cpu_die(unsigned int cpu) 87 + { 88 + versatile_immitation_cpu_die(cpu, 0x40); 89 + } 90 + #endif 91 + 85 92 const struct smp_operations vexpress_smp_dt_ops __initconst = { 86 93 .smp_prepare_cpus = vexpress_smp_dt_prepare_cpus, 87 94 .smp_secondary_init = versatile_secondary_init,
+1
arch/arm/plat-versatile/Makefile
··· 2 2 3 3 obj-$(CONFIG_PLAT_VERSATILE_SCHED_CLOCK) += sched-clock.o 4 4 obj-$(CONFIG_SMP) += headsmp.o platsmp.o 5 + obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
+1
arch/arm/plat-versatile/include/plat/platsmp.h
··· 12 12 extern void versatile_secondary_startup(void); 13 13 extern void versatile_secondary_init(unsigned int cpu); 14 14 extern int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle); 15 + void versatile_immitation_cpu_die(unsigned int cpu, unsigned int actrl_mask);