Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.12 364 lines 7.9 kB view raw
1/* 2 * processor_idle - idle state cpuidle driver. 3 * Adapted from drivers/idle/intel_idle.c and 4 * drivers/acpi/processor_idle.c 5 * 6 */ 7 8#include <linux/kernel.h> 9#include <linux/module.h> 10#include <linux/init.h> 11#include <linux/moduleparam.h> 12#include <linux/cpuidle.h> 13#include <linux/cpu.h> 14#include <linux/notifier.h> 15 16#include <asm/paca.h> 17#include <asm/reg.h> 18#include <asm/machdep.h> 19#include <asm/firmware.h> 20#include <asm/runlatch.h> 21#include <asm/plpar_wrappers.h> 22 23struct cpuidle_driver pseries_idle_driver = { 24 .name = "pseries_idle", 25 .owner = THIS_MODULE, 26}; 27 28#define MAX_IDLE_STATE_COUNT 2 29 30static int max_idle_state = MAX_IDLE_STATE_COUNT - 1; 31static struct cpuidle_device __percpu *pseries_cpuidle_devices; 32static struct cpuidle_state *cpuidle_state_table; 33 34static inline void idle_loop_prolog(unsigned long *in_purr) 35{ 36 *in_purr = mfspr(SPRN_PURR); 37 /* 38 * Indicate to the HV that we are idle. Now would be 39 * a good time to find other work to dispatch. 40 */ 41 get_lppaca()->idle = 1; 42} 43 44static inline void idle_loop_epilog(unsigned long in_purr) 45{ 46 u64 wait_cycles; 47 48 wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles); 49 wait_cycles += mfspr(SPRN_PURR) - in_purr; 50 get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles); 51 get_lppaca()->idle = 0; 52} 53 54static int snooze_loop(struct cpuidle_device *dev, 55 struct cpuidle_driver *drv, 56 int index) 57{ 58 unsigned long in_purr; 59 int cpu = dev->cpu; 60 61 idle_loop_prolog(&in_purr); 62 local_irq_enable(); 63 set_thread_flag(TIF_POLLING_NRFLAG); 64 65 while ((!need_resched()) && cpu_online(cpu)) { 66 ppc64_runlatch_off(); 67 HMT_low(); 68 HMT_very_low(); 69 } 70 71 HMT_medium(); 72 clear_thread_flag(TIF_POLLING_NRFLAG); 73 smp_mb(); 74 75 idle_loop_epilog(in_purr); 76 77 return index; 78} 79 80static void check_and_cede_processor(void) 81{ 82 /* 83 * Ensure our interrupt state is properly tracked, 84 * also checks if no interrupt has occurred while we 85 * were soft-disabled 86 */ 87 if (prep_irq_for_idle()) { 88 cede_processor(); 89#ifdef CONFIG_TRACE_IRQFLAGS 90 /* Ensure that H_CEDE returns with IRQs on */ 91 if (WARN_ON(!(mfmsr() & MSR_EE))) 92 __hard_irq_enable(); 93#endif 94 } 95} 96 97static int dedicated_cede_loop(struct cpuidle_device *dev, 98 struct cpuidle_driver *drv, 99 int index) 100{ 101 unsigned long in_purr; 102 103 idle_loop_prolog(&in_purr); 104 get_lppaca()->donate_dedicated_cpu = 1; 105 106 ppc64_runlatch_off(); 107 HMT_medium(); 108 check_and_cede_processor(); 109 110 get_lppaca()->donate_dedicated_cpu = 0; 111 112 idle_loop_epilog(in_purr); 113 114 return index; 115} 116 117static int shared_cede_loop(struct cpuidle_device *dev, 118 struct cpuidle_driver *drv, 119 int index) 120{ 121 unsigned long in_purr; 122 123 idle_loop_prolog(&in_purr); 124 125 /* 126 * Yield the processor to the hypervisor. We return if 127 * an external interrupt occurs (which are driven prior 128 * to returning here) or if a prod occurs from another 129 * processor. When returning here, external interrupts 130 * are enabled. 131 */ 132 check_and_cede_processor(); 133 134 idle_loop_epilog(in_purr); 135 136 return index; 137} 138 139/* 140 * States for dedicated partition case. 141 */ 142static struct cpuidle_state dedicated_states[MAX_IDLE_STATE_COUNT] = { 143 { /* Snooze */ 144 .name = "snooze", 145 .desc = "snooze", 146 .flags = CPUIDLE_FLAG_TIME_VALID, 147 .exit_latency = 0, 148 .target_residency = 0, 149 .enter = &snooze_loop }, 150 { /* CEDE */ 151 .name = "CEDE", 152 .desc = "CEDE", 153 .flags = CPUIDLE_FLAG_TIME_VALID, 154 .exit_latency = 10, 155 .target_residency = 100, 156 .enter = &dedicated_cede_loop }, 157}; 158 159/* 160 * States for shared partition case. 161 */ 162static struct cpuidle_state shared_states[MAX_IDLE_STATE_COUNT] = { 163 { /* Shared Cede */ 164 .name = "Shared Cede", 165 .desc = "Shared Cede", 166 .flags = CPUIDLE_FLAG_TIME_VALID, 167 .exit_latency = 0, 168 .target_residency = 0, 169 .enter = &shared_cede_loop }, 170}; 171 172void update_smt_snooze_delay(int cpu, int residency) 173{ 174 struct cpuidle_driver *drv = cpuidle_get_driver(); 175 struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); 176 177 if (cpuidle_state_table != dedicated_states) 178 return; 179 180 if (residency < 0) { 181 /* Disable the Nap state on that cpu */ 182 if (dev) 183 dev->states_usage[1].disable = 1; 184 } else 185 if (drv) 186 drv->states[1].target_residency = residency; 187} 188 189static int pseries_cpuidle_add_cpu_notifier(struct notifier_block *n, 190 unsigned long action, void *hcpu) 191{ 192 int hotcpu = (unsigned long)hcpu; 193 struct cpuidle_device *dev = 194 per_cpu_ptr(pseries_cpuidle_devices, hotcpu); 195 196 if (dev && cpuidle_get_driver()) { 197 switch (action) { 198 case CPU_ONLINE: 199 case CPU_ONLINE_FROZEN: 200 cpuidle_pause_and_lock(); 201 cpuidle_enable_device(dev); 202 cpuidle_resume_and_unlock(); 203 break; 204 205 case CPU_DEAD: 206 case CPU_DEAD_FROZEN: 207 cpuidle_pause_and_lock(); 208 cpuidle_disable_device(dev); 209 cpuidle_resume_and_unlock(); 210 break; 211 212 default: 213 return NOTIFY_DONE; 214 } 215 } 216 return NOTIFY_OK; 217} 218 219static struct notifier_block setup_hotplug_notifier = { 220 .notifier_call = pseries_cpuidle_add_cpu_notifier, 221}; 222 223/* 224 * pseries_cpuidle_driver_init() 225 */ 226static int pseries_cpuidle_driver_init(void) 227{ 228 int idle_state; 229 struct cpuidle_driver *drv = &pseries_idle_driver; 230 231 drv->state_count = 0; 232 233 for (idle_state = 0; idle_state < MAX_IDLE_STATE_COUNT; ++idle_state) { 234 235 if (idle_state > max_idle_state) 236 break; 237 238 /* is the state not enabled? */ 239 if (cpuidle_state_table[idle_state].enter == NULL) 240 continue; 241 242 drv->states[drv->state_count] = /* structure copy */ 243 cpuidle_state_table[idle_state]; 244 245 drv->state_count += 1; 246 } 247 248 return 0; 249} 250 251/* pseries_idle_devices_uninit(void) 252 * unregister cpuidle devices and de-allocate memory 253 */ 254static void pseries_idle_devices_uninit(void) 255{ 256 int i; 257 struct cpuidle_device *dev; 258 259 for_each_possible_cpu(i) { 260 dev = per_cpu_ptr(pseries_cpuidle_devices, i); 261 cpuidle_unregister_device(dev); 262 } 263 264 free_percpu(pseries_cpuidle_devices); 265 return; 266} 267 268/* pseries_idle_devices_init() 269 * allocate, initialize and register cpuidle device 270 */ 271static int pseries_idle_devices_init(void) 272{ 273 int i; 274 struct cpuidle_driver *drv = &pseries_idle_driver; 275 struct cpuidle_device *dev; 276 277 pseries_cpuidle_devices = alloc_percpu(struct cpuidle_device); 278 if (pseries_cpuidle_devices == NULL) 279 return -ENOMEM; 280 281 for_each_possible_cpu(i) { 282 dev = per_cpu_ptr(pseries_cpuidle_devices, i); 283 dev->state_count = drv->state_count; 284 dev->cpu = i; 285 if (cpuidle_register_device(dev)) { 286 printk(KERN_DEBUG \ 287 "cpuidle_register_device %d failed!\n", i); 288 return -EIO; 289 } 290 } 291 292 return 0; 293} 294 295/* 296 * pseries_idle_probe() 297 * Choose state table for shared versus dedicated partition 298 */ 299static int pseries_idle_probe(void) 300{ 301 302 if (!firmware_has_feature(FW_FEATURE_SPLPAR)) 303 return -ENODEV; 304 305 if (cpuidle_disable != IDLE_NO_OVERRIDE) 306 return -ENODEV; 307 308 if (max_idle_state == 0) { 309 printk(KERN_DEBUG "pseries processor idle disabled.\n"); 310 return -EPERM; 311 } 312 313 if (lppaca_shared_proc(get_lppaca())) 314 cpuidle_state_table = shared_states; 315 else 316 cpuidle_state_table = dedicated_states; 317 318 return 0; 319} 320 321static int __init pseries_processor_idle_init(void) 322{ 323 int retval; 324 325 retval = pseries_idle_probe(); 326 if (retval) 327 return retval; 328 329 pseries_cpuidle_driver_init(); 330 retval = cpuidle_register_driver(&pseries_idle_driver); 331 if (retval) { 332 printk(KERN_DEBUG "Registration of pseries driver failed.\n"); 333 return retval; 334 } 335 336 retval = pseries_idle_devices_init(); 337 if (retval) { 338 pseries_idle_devices_uninit(); 339 cpuidle_unregister_driver(&pseries_idle_driver); 340 return retval; 341 } 342 343 register_cpu_notifier(&setup_hotplug_notifier); 344 printk(KERN_DEBUG "pseries_idle_driver registered\n"); 345 346 return 0; 347} 348 349static void __exit pseries_processor_idle_exit(void) 350{ 351 352 unregister_cpu_notifier(&setup_hotplug_notifier); 353 pseries_idle_devices_uninit(); 354 cpuidle_unregister_driver(&pseries_idle_driver); 355 356 return; 357} 358 359module_init(pseries_processor_idle_init); 360module_exit(pseries_processor_idle_exit); 361 362MODULE_AUTHOR("Deepthi Dharwar <deepthi@linux.vnet.ibm.com>"); 363MODULE_DESCRIPTION("Cpuidle driver for POWER"); 364MODULE_LICENSE("GPL");