Merge tag 'pm2' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc

Pull arm-soc cpuidle enablement for OMAP from Olof Johansson:
"Coupled cpuidle was meant to merge for 3.5 through Len Brown's tree,
but didn't go in because the pull request ended up rejected. So it
just got merged, and we got this staged branch that enables the
coupled cpuidle code on OMAP.

With a stable git workflow from the other maintainer we could have
staged this earlier, but that wasn't the case so we have had to merge
it late.

The alternative is to hold it off until 3.7 but given that the code is
well-isolated to OMAP and they are eager to see it go in, I didn't
push back hard in that direction."

* tag 'pm2' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc:
ARM: OMAP4: CPUidle: Open broadcast clock-event device.
ARM: OMAP4: CPUidle: add synchronization for coupled idle states
ARM: OMAP4: CPUidle: Use coupled cpuidle states to implement SMP cpuidle.
ARM: OMAP: timer: allow gp timer clock-event to be used on both cpus

Changed files
+101 -45
arch
arm
+1
arch/arm/mach-omap2/Kconfig
··· 62 62 select PM_OPP if PM 63 63 select USB_ARCH_HAS_EHCI if USB_SUPPORT 64 64 select ARM_CPU_SUSPEND if PM 65 + select ARCH_NEEDS_CPU_IDLE_COUPLED 65 66 66 67 config SOC_OMAP5 67 68 bool "TI OMAP5"
+97 -44
arch/arm/mach-omap2/cpuidle44xx.c
··· 21 21 #include "common.h" 22 22 #include "pm.h" 23 23 #include "prm.h" 24 + #include "clockdomain.h" 24 25 25 26 /* Machine specific information */ 26 27 struct omap4_idle_statedata { ··· 48 47 }, 49 48 }; 50 49 51 - static struct powerdomain *mpu_pd, *cpu0_pd, *cpu1_pd; 50 + static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS]; 51 + static struct clockdomain *cpu_clkdm[NR_CPUS]; 52 + 53 + static atomic_t abort_barrier; 54 + static bool cpu_done[NR_CPUS]; 52 55 53 56 /** 54 - * omap4_enter_idle - Programs OMAP4 to enter the specified state 57 + * omap4_enter_idle_coupled_[simple/coupled] - OMAP4 cpuidle entry functions 55 58 * @dev: cpuidle device 56 59 * @drv: cpuidle driver 57 60 * @index: the index of state to be entered ··· 64 59 * specified low power state selected by the governor. 65 60 * Returns the amount of time spent in the low power state. 66 61 */ 67 - static int omap4_enter_idle(struct cpuidle_device *dev, 62 + static int omap4_enter_idle_simple(struct cpuidle_device *dev, 63 + struct cpuidle_driver *drv, 64 + int index) 65 + { 66 + local_fiq_disable(); 67 + omap_do_wfi(); 68 + local_fiq_enable(); 69 + 70 + return index; 71 + } 72 + 73 + static int omap4_enter_idle_coupled(struct cpuidle_device *dev, 68 74 struct cpuidle_driver *drv, 69 75 int index) 70 76 { 71 77 struct omap4_idle_statedata *cx = &omap4_idle_data[index]; 72 - u32 cpu1_state; 73 78 int cpu_id = smp_processor_id(); 74 79 75 80 local_fiq_disable(); 76 81 77 82 /* 78 - * CPU0 has to stay ON (i.e in C1) until CPU1 is OFF state. 83 + * CPU0 has to wait and stay ON until CPU1 is OFF state. 79 84 * This is necessary to honour hardware recommondation 80 85 * of triggeing all the possible low power modes once CPU1 is 81 86 * out of coherency and in OFF mode. 82 - * Update dev->last_state so that governor stats reflects right 83 - * data. 84 87 */ 85 - cpu1_state = pwrdm_read_pwrst(cpu1_pd); 86 - if (cpu1_state != PWRDM_POWER_OFF) { 87 - index = drv->safe_state_index; 88 - cx = &omap4_idle_data[index]; 88 + if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) { 89 + while (pwrdm_read_pwrst(cpu_pd[1]) != PWRDM_POWER_OFF) { 90 + cpu_relax(); 91 + 92 + /* 93 + * CPU1 could have already entered & exited idle 94 + * without hitting off because of a wakeup 95 + * or a failed attempt to hit off mode. Check for 96 + * that here, otherwise we could spin forever 97 + * waiting for CPU1 off. 98 + */ 99 + if (cpu_done[1]) 100 + goto fail; 101 + 102 + } 89 103 } 90 104 91 - if (index > 0) 92 - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id); 105 + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu_id); 93 106 94 107 /* 95 108 * Call idle CPU PM enter notifier chain so that 96 109 * VFP and per CPU interrupt context is saved. 97 110 */ 98 - if (cx->cpu_state == PWRDM_POWER_OFF) 99 - cpu_pm_enter(); 111 + cpu_pm_enter(); 100 112 101 - pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); 102 - omap_set_pwrdm_state(mpu_pd, cx->mpu_state); 113 + if (dev->cpu == 0) { 114 + pwrdm_set_logic_retst(mpu_pd, cx->mpu_logic_state); 115 + omap_set_pwrdm_state(mpu_pd, cx->mpu_state); 103 116 104 - /* 105 - * Call idle CPU cluster PM enter notifier chain 106 - * to save GIC and wakeupgen context. 107 - */ 108 - if ((cx->mpu_state == PWRDM_POWER_RET) && 109 - (cx->mpu_logic_state == PWRDM_POWER_OFF)) 110 - cpu_cluster_pm_enter(); 117 + /* 118 + * Call idle CPU cluster PM enter notifier chain 119 + * to save GIC and wakeupgen context. 120 + */ 121 + if ((cx->mpu_state == PWRDM_POWER_RET) && 122 + (cx->mpu_logic_state == PWRDM_POWER_OFF)) 123 + cpu_cluster_pm_enter(); 124 + } 111 125 112 126 omap4_enter_lowpower(dev->cpu, cx->cpu_state); 127 + cpu_done[dev->cpu] = true; 128 + 129 + /* Wakeup CPU1 only if it is not offlined */ 130 + if (dev->cpu == 0 && cpumask_test_cpu(1, cpu_online_mask)) { 131 + clkdm_wakeup(cpu_clkdm[1]); 132 + clkdm_allow_idle(cpu_clkdm[1]); 133 + } 113 134 114 135 /* 115 136 * Call idle CPU PM exit notifier chain to restore 116 - * VFP and per CPU IRQ context. Only CPU0 state is 117 - * considered since CPU1 is managed by CPU hotplug. 137 + * VFP and per CPU IRQ context. 118 138 */ 119 - if (pwrdm_read_prev_pwrst(cpu0_pd) == PWRDM_POWER_OFF) 120 - cpu_pm_exit(); 139 + cpu_pm_exit(); 121 140 122 141 /* 123 142 * Call idle CPU cluster PM exit notifier chain ··· 150 121 if (omap4_mpuss_read_prev_context_state()) 151 122 cpu_cluster_pm_exit(); 152 123 153 - if (index > 0) 154 - clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id); 124 + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu_id); 125 + 126 + fail: 127 + cpuidle_coupled_parallel_barrier(dev, &abort_barrier); 128 + cpu_done[dev->cpu] = false; 155 129 156 130 local_fiq_enable(); 157 131 ··· 173 141 .exit_latency = 2 + 2, 174 142 .target_residency = 5, 175 143 .flags = CPUIDLE_FLAG_TIME_VALID, 176 - .enter = omap4_enter_idle, 144 + .enter = omap4_enter_idle_simple, 177 145 .name = "C1", 178 146 .desc = "MPUSS ON" 179 147 }, ··· 181 149 /* C2 - CPU0 OFF + CPU1 OFF + MPU CSWR */ 182 150 .exit_latency = 328 + 440, 183 151 .target_residency = 960, 184 - .flags = CPUIDLE_FLAG_TIME_VALID, 185 - .enter = omap4_enter_idle, 152 + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED, 153 + .enter = omap4_enter_idle_coupled, 186 154 .name = "C2", 187 155 .desc = "MPUSS CSWR", 188 156 }, ··· 190 158 /* C3 - CPU0 OFF + CPU1 OFF + MPU OSWR */ 191 159 .exit_latency = 460 + 518, 192 160 .target_residency = 1100, 193 - .flags = CPUIDLE_FLAG_TIME_VALID, 194 - .enter = omap4_enter_idle, 161 + .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_COUPLED, 162 + .enter = omap4_enter_idle_coupled, 195 163 .name = "C3", 196 164 .desc = "MPUSS OSWR", 197 165 }, ··· 199 167 .state_count = ARRAY_SIZE(omap4_idle_data), 200 168 .safe_state_index = 0, 201 169 }; 170 + 171 + /* 172 + * For each cpu, setup the broadcast timer because local timers 173 + * stops for the states above C1. 174 + */ 175 + static void omap_setup_broadcast_timer(void *arg) 176 + { 177 + int cpu = smp_processor_id(); 178 + clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &cpu); 179 + } 202 180 203 181 /** 204 182 * omap4_idle_init - Init routine for OMAP4 idle ··· 222 180 unsigned int cpu_id = 0; 223 181 224 182 mpu_pd = pwrdm_lookup("mpu_pwrdm"); 225 - cpu0_pd = pwrdm_lookup("cpu0_pwrdm"); 226 - cpu1_pd = pwrdm_lookup("cpu1_pwrdm"); 227 - if ((!mpu_pd) || (!cpu0_pd) || (!cpu1_pd)) 183 + cpu_pd[0] = pwrdm_lookup("cpu0_pwrdm"); 184 + cpu_pd[1] = pwrdm_lookup("cpu1_pwrdm"); 185 + if ((!mpu_pd) || (!cpu_pd[0]) || (!cpu_pd[1])) 228 186 return -ENODEV; 229 187 230 - dev = &per_cpu(omap4_idle_dev, cpu_id); 231 - dev->cpu = cpu_id; 188 + cpu_clkdm[0] = clkdm_lookup("mpu0_clkdm"); 189 + cpu_clkdm[1] = clkdm_lookup("mpu1_clkdm"); 190 + if (!cpu_clkdm[0] || !cpu_clkdm[1]) 191 + return -ENODEV; 232 192 233 - cpuidle_register_driver(&omap4_idle_driver); 193 + /* Configure the broadcast timer on each cpu */ 194 + on_each_cpu(omap_setup_broadcast_timer, NULL, 1); 234 195 235 - if (cpuidle_register_device(dev)) { 236 - pr_err("%s: CPUidle register device failed\n", __func__); 237 - return -EIO; 196 + for_each_cpu(cpu_id, cpu_online_mask) { 197 + dev = &per_cpu(omap4_idle_dev, cpu_id); 198 + dev->cpu = cpu_id; 199 + dev->coupled_cpus = *cpu_online_mask; 200 + 201 + cpuidle_register_driver(&omap4_idle_driver); 202 + 203 + if (cpuidle_register_device(dev)) { 204 + pr_err("%s: CPUidle register failed\n", __func__); 205 + return -EIO; 206 + } 238 207 } 239 208 240 209 return 0;
+3 -1
arch/arm/mach-omap2/timer.c
··· 130 130 .name = "gp_timer", 131 131 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 132 132 .shift = 32, 133 + .rating = 300, 133 134 .set_next_event = omap2_gp_timer_set_next_event, 134 135 .set_mode = omap2_gp_timer_set_mode, 135 136 }; ··· 224 223 clockevent_delta2ns(3, &clockevent_gpt); 225 224 /* Timer internal resynch latency. */ 226 225 227 - clockevent_gpt.cpumask = cpumask_of(0); 226 + clockevent_gpt.cpumask = cpu_possible_mask; 227 + clockevent_gpt.irq = omap_dm_timer_get_irq(&clkev); 228 228 clockevents_register_device(&clockevent_gpt); 229 229 230 230 pr_info("OMAP clockevent source: GPTIMER%d at %lu Hz\n",