Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * processor_idle - idle state submodule to the ACPI processor driver
4 *
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
8 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9 * - Added processor hotplug support
10 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
11 * - Added support for C3 on SMP
12 */
13#define pr_fmt(fmt) "ACPI: " fmt
14
15#include <linux/module.h>
16#include <linux/acpi.h>
17#include <linux/dmi.h>
18#include <linux/sched.h> /* need_resched() */
19#include <linux/sort.h>
20#include <linux/tick.h>
21#include <linux/cpuidle.h>
22#include <linux/cpu.h>
23#include <linux/minmax.h>
24#include <acpi/processor.h>
25
26/*
27 * Include the apic definitions for x86 to have the APIC timer related defines
28 * available also for UP (on SMP it gets magically included via linux/smp.h).
29 * asm/acpi.h is not an option, as it would require more include magic. Also
30 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
31 */
32#ifdef CONFIG_X86
33#include <asm/apic.h>
34#include <asm/cpu.h>
35#endif
36
37#define ACPI_IDLE_STATE_START (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0)
38
39static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
40module_param(max_cstate, uint, 0000);
41static unsigned int nocst __read_mostly;
42module_param(nocst, uint, 0000);
43static int bm_check_disable __read_mostly;
44module_param(bm_check_disable, uint, 0000);
45
46static unsigned int latency_factor __read_mostly = 2;
47module_param(latency_factor, uint, 0644);
48
49static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
50
51struct cpuidle_driver acpi_idle_driver = {
52 .name = "acpi_idle",
53 .owner = THIS_MODULE,
54};
55
56#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
57static
58DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
59
60static int disabled_by_idle_boot_param(void)
61{
62 return boot_option_idle_override == IDLE_POLL ||
63 boot_option_idle_override == IDLE_HALT;
64}
65
66/*
67 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
68 * For now disable this. Probably a bug somewhere else.
69 *
70 * To skip this limit, boot/load with a large max_cstate limit.
71 */
72static int set_max_cstate(const struct dmi_system_id *id)
73{
74 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
75 return 0;
76
77 pr_notice("%s detected - limiting to C%ld max_cstate."
78 " Override with \"processor.max_cstate=%d\"\n", id->ident,
79 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
80
81 max_cstate = (long)id->driver_data;
82
83 return 0;
84}
85
86static const struct dmi_system_id processor_power_dmi_table[] = {
87 { set_max_cstate, "Clevo 5600D", {
88 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
89 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
90 (void *)2},
91 { set_max_cstate, "Pavilion zv5000", {
92 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
93 DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
94 (void *)1},
95 { set_max_cstate, "Asus L8400B", {
96 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
97 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
98 (void *)1},
99 {},
100};
101
102
103/*
104 * Callers should disable interrupts before the call and enable
105 * interrupts after return.
106 */
107static void __cpuidle acpi_safe_halt(void)
108{
109 if (!tif_need_resched()) {
110 safe_halt();
111 local_irq_disable();
112 }
113}
114
115#ifdef ARCH_APICTIMER_STOPS_ON_C3
116
117/*
118 * Some BIOS implementations switch to C3 in the published C2 state.
119 * This seems to be a common problem on AMD boxen, but other vendors
120 * are affected too. We pick the most conservative approach: we assume
121 * that the local APIC stops in both C2 and C3.
122 */
123static void lapic_timer_check_state(int state, struct acpi_processor *pr,
124 struct acpi_processor_cx *cx)
125{
126 struct acpi_processor_power *pwr = &pr->power;
127 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
128
129 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
130 return;
131
132 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E))
133 type = ACPI_STATE_C1;
134
135 /*
136 * Check, if one of the previous states already marked the lapic
137 * unstable
138 */
139 if (pwr->timer_broadcast_on_state < state)
140 return;
141
142 if (cx->type >= type)
143 pr->power.timer_broadcast_on_state = state;
144}
145
146static void __lapic_timer_propagate_broadcast(void *arg)
147{
148 struct acpi_processor *pr = (struct acpi_processor *) arg;
149
150 if (pr->power.timer_broadcast_on_state < INT_MAX)
151 tick_broadcast_enable();
152 else
153 tick_broadcast_disable();
154}
155
156static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
157{
158 smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
159 (void *)pr, 1);
160}
161
162/* Power(C) State timer broadcast control */
163static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
164 struct acpi_processor_cx *cx)
165{
166 return cx - pr->power.states >= pr->power.timer_broadcast_on_state;
167}
168
169#else
170
171static void lapic_timer_check_state(int state, struct acpi_processor *pr,
172 struct acpi_processor_cx *cstate) { }
173static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
174
175static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
176 struct acpi_processor_cx *cx)
177{
178 return false;
179}
180
181#endif
182
183#if defined(CONFIG_X86)
184static void tsc_check_state(int state)
185{
186 switch (boot_cpu_data.x86_vendor) {
187 case X86_VENDOR_HYGON:
188 case X86_VENDOR_AMD:
189 case X86_VENDOR_INTEL:
190 case X86_VENDOR_CENTAUR:
191 case X86_VENDOR_ZHAOXIN:
192 /*
193 * AMD Fam10h TSC will tick in all
194 * C/P/S0/S1 states when this bit is set.
195 */
196 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
197 return;
198 fallthrough;
199 default:
200 /* TSC could halt in idle, so notify users */
201 if (state > ACPI_STATE_C1)
202 mark_tsc_unstable("TSC halts in idle");
203 }
204}
205#else
206static void tsc_check_state(int state) { return; }
207#endif
208
209static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
210{
211
212 if (!pr->pblk)
213 return -ENODEV;
214
215 /* if info is obtained from pblk/fadt, type equals state */
216 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
217 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
218
219#ifndef CONFIG_HOTPLUG_CPU
220 /*
221 * Check for P_LVL2_UP flag before entering C2 and above on
222 * an SMP system.
223 */
224 if ((num_online_cpus() > 1) &&
225 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
226 return -ENODEV;
227#endif
228
229 /* determine C2 and C3 address from pblk */
230 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
231 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
232
233 /* determine latencies from FADT */
234 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
235 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
236
237 /*
238 * FADT specified C2 latency must be less than or equal to
239 * 100 microseconds.
240 */
241 if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
242 acpi_handle_debug(pr->handle, "C2 latency too large [%d]\n",
243 acpi_gbl_FADT.c2_latency);
244 /* invalidate C2 */
245 pr->power.states[ACPI_STATE_C2].address = 0;
246 }
247
248 /*
249 * FADT supplied C3 latency must be less than or equal to
250 * 1000 microseconds.
251 */
252 if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
253 acpi_handle_debug(pr->handle, "C3 latency too large [%d]\n",
254 acpi_gbl_FADT.c3_latency);
255 /* invalidate C3 */
256 pr->power.states[ACPI_STATE_C3].address = 0;
257 }
258
259 acpi_handle_debug(pr->handle, "lvl2[0x%08x] lvl3[0x%08x]\n",
260 pr->power.states[ACPI_STATE_C2].address,
261 pr->power.states[ACPI_STATE_C3].address);
262
263 snprintf(pr->power.states[ACPI_STATE_C2].desc,
264 ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x",
265 pr->power.states[ACPI_STATE_C2].address);
266 snprintf(pr->power.states[ACPI_STATE_C3].desc,
267 ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
268 pr->power.states[ACPI_STATE_C3].address);
269
270 return 0;
271}
272
273static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
274{
275 if (!pr->power.states[ACPI_STATE_C1].valid) {
276 /* set the first C-State to C1 */
277 /* all processors need to support C1 */
278 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
279 pr->power.states[ACPI_STATE_C1].valid = 1;
280 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
281
282 snprintf(pr->power.states[ACPI_STATE_C1].desc,
283 ACPI_CX_DESC_LEN, "ACPI HLT");
284 }
285 /* the C0 state only exists as a filler in our array */
286 pr->power.states[ACPI_STATE_C0].valid = 1;
287 return 0;
288}
289
290static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
291{
292 int ret;
293
294 if (nocst)
295 return -ENODEV;
296
297 ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power);
298 if (ret)
299 return ret;
300
301 if (!pr->power.count)
302 return -EFAULT;
303
304 pr->flags.has_cst = 1;
305 return 0;
306}
307
308static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
309 struct acpi_processor_cx *cx)
310{
311 static int bm_check_flag = -1;
312 static int bm_control_flag = -1;
313
314
315 if (!cx->address)
316 return;
317
318 /*
319 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
320 * DMA transfers are used by any ISA device to avoid livelock.
321 * Note that we could disable Type-F DMA (as recommended by
322 * the erratum), but this is known to disrupt certain ISA
323 * devices thus we take the conservative approach.
324 */
325 else if (errata.piix4.fdma) {
326 acpi_handle_debug(pr->handle,
327 "C3 not supported on PIIX4 with Type-F DMA\n");
328 return;
329 }
330
331 /* All the logic here assumes flags.bm_check is same across all CPUs */
332 if (bm_check_flag == -1) {
333 /* Determine whether bm_check is needed based on CPU */
334 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
335 bm_check_flag = pr->flags.bm_check;
336 bm_control_flag = pr->flags.bm_control;
337 } else {
338 pr->flags.bm_check = bm_check_flag;
339 pr->flags.bm_control = bm_control_flag;
340 }
341
342 if (pr->flags.bm_check) {
343 if (!pr->flags.bm_control) {
344 if (pr->flags.has_cst != 1) {
345 /* bus mastering control is necessary */
346 acpi_handle_debug(pr->handle,
347 "C3 support requires BM control\n");
348 return;
349 } else {
350 /* Here we enter C3 without bus mastering */
351 acpi_handle_debug(pr->handle,
352 "C3 support without BM control\n");
353 }
354 }
355 } else {
356 /*
357 * WBINVD should be set in fadt, for C3 state to be
358 * supported on when bm_check is not required.
359 */
360 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
361 acpi_handle_debug(pr->handle,
362 "Cache invalidation should work properly"
363 " for C3 to be enabled on SMP systems\n");
364 return;
365 }
366 }
367
368 /*
369 * Otherwise we've met all of our C3 requirements.
370 * Normalize the C3 latency to expidite policy. Enable
371 * checking of bus mastering status (bm_check) so we can
372 * use this in our C3 policy
373 */
374 cx->valid = 1;
375
376 /*
377 * On older chipsets, BM_RLD needs to be set
378 * in order for Bus Master activity to wake the
379 * system from C3. Newer chipsets handle DMA
380 * during C3 automatically and BM_RLD is a NOP.
381 * In either case, the proper way to
382 * handle BM_RLD is to set it and leave it set.
383 */
384 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
385
386 return;
387}
388
389static int acpi_cst_latency_cmp(const void *a, const void *b)
390{
391 const struct acpi_processor_cx *x = a, *y = b;
392
393 if (!(x->valid && y->valid))
394 return 0;
395 if (x->latency > y->latency)
396 return 1;
397 if (x->latency < y->latency)
398 return -1;
399 return 0;
400}
401static void acpi_cst_latency_swap(void *a, void *b, int n)
402{
403 struct acpi_processor_cx *x = a, *y = b;
404
405 if (!(x->valid && y->valid))
406 return;
407 swap(x->latency, y->latency);
408}
409
410static int acpi_processor_power_verify(struct acpi_processor *pr)
411{
412 unsigned int i;
413 unsigned int working = 0;
414 unsigned int last_latency = 0;
415 unsigned int last_type = 0;
416 bool buggy_latency = false;
417
418 pr->power.timer_broadcast_on_state = INT_MAX;
419
420 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
421 struct acpi_processor_cx *cx = &pr->power.states[i];
422
423 switch (cx->type) {
424 case ACPI_STATE_C1:
425 cx->valid = 1;
426 break;
427
428 case ACPI_STATE_C2:
429 if (!cx->address)
430 break;
431 cx->valid = 1;
432 break;
433
434 case ACPI_STATE_C3:
435 acpi_processor_power_verify_c3(pr, cx);
436 break;
437 }
438 if (!cx->valid)
439 continue;
440 if (cx->type >= last_type && cx->latency < last_latency)
441 buggy_latency = true;
442 last_latency = cx->latency;
443 last_type = cx->type;
444
445 lapic_timer_check_state(i, pr, cx);
446 tsc_check_state(cx->type);
447 working++;
448 }
449
450 if (buggy_latency) {
451 pr_notice("FW issue: working around C-state latencies out of order\n");
452 sort(&pr->power.states[1], max_cstate,
453 sizeof(struct acpi_processor_cx),
454 acpi_cst_latency_cmp,
455 acpi_cst_latency_swap);
456 }
457
458 lapic_timer_propagate_broadcast(pr);
459
460 return (working);
461}
462
463static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
464{
465 unsigned int i;
466 int result;
467
468
469 /* NOTE: the idle thread may not be running while calling
470 * this function */
471
472 /* Zero initialize all the C-states info. */
473 memset(pr->power.states, 0, sizeof(pr->power.states));
474
475 result = acpi_processor_get_power_info_cst(pr);
476 if (result == -ENODEV)
477 result = acpi_processor_get_power_info_fadt(pr);
478
479 if (result)
480 return result;
481
482 acpi_processor_get_power_info_default(pr);
483
484 pr->power.count = acpi_processor_power_verify(pr);
485
486 /*
487 * if one state of type C2 or C3 is available, mark this
488 * CPU as being "idle manageable"
489 */
490 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
491 if (pr->power.states[i].valid) {
492 pr->power.count = i;
493 pr->flags.power = 1;
494 }
495 }
496
497 return 0;
498}
499
500/**
501 * acpi_idle_bm_check - checks if bus master activity was detected
502 */
503static int acpi_idle_bm_check(void)
504{
505 u32 bm_status = 0;
506
507 if (bm_check_disable)
508 return 0;
509
510 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
511 if (bm_status)
512 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
513 /*
514 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
515 * the true state of bus mastering activity; forcing us to
516 * manually check the BMIDEA bit of each IDE channel.
517 */
518 else if (errata.piix4.bmisx) {
519 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
520 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
521 bm_status = 1;
522 }
523 return bm_status;
524}
525
526static void wait_for_freeze(void)
527{
528#ifdef CONFIG_X86
529 /* No delay is needed if we are in guest */
530 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
531 return;
532#endif
533 /* Dummy wait op - must do something useless after P_LVL2 read
534 because chipsets cannot guarantee that STPCLK# signal
535 gets asserted in time to freeze execution properly. */
536 inl(acpi_gbl_FADT.xpm_timer_block.address);
537}
538
539/**
540 * acpi_idle_do_entry - enter idle state using the appropriate method
541 * @cx: cstate data
542 *
543 * Caller disables interrupt before call and enables interrupt after return.
544 */
545static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
546{
547 if (cx->entry_method == ACPI_CSTATE_FFH) {
548 /* Call into architectural FFH based C-state */
549 acpi_processor_ffh_cstate_enter(cx);
550 } else if (cx->entry_method == ACPI_CSTATE_HALT) {
551 acpi_safe_halt();
552 } else {
553 /* IO port based C-state */
554 inb(cx->address);
555 wait_for_freeze();
556 }
557}
558
559/**
560 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
561 * @dev: the target CPU
562 * @index: the index of suggested state
563 */
564static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
565{
566 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
567
568 if (cx->type == ACPI_STATE_C3)
569 ACPI_FLUSH_CPU_CACHE();
570
571 while (1) {
572
573 if (cx->entry_method == ACPI_CSTATE_HALT)
574 safe_halt();
575 else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
576 inb(cx->address);
577 wait_for_freeze();
578 } else
579 return -ENODEV;
580
581#if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU)
582 cond_wakeup_cpu0();
583#endif
584 }
585
586 /* Never reached */
587 return 0;
588}
589
590static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
591{
592 return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
593 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
594}
595
596static int c3_cpu_count;
597static DEFINE_RAW_SPINLOCK(c3_lock);
598
599/**
600 * acpi_idle_enter_bm - enters C3 with proper BM handling
601 * @drv: cpuidle driver
602 * @pr: Target processor
603 * @cx: Target state context
604 * @index: index of target state
605 */
606static int acpi_idle_enter_bm(struct cpuidle_driver *drv,
607 struct acpi_processor *pr,
608 struct acpi_processor_cx *cx,
609 int index)
610{
611 static struct acpi_processor_cx safe_cx = {
612 .entry_method = ACPI_CSTATE_HALT,
613 };
614
615 /*
616 * disable bus master
617 * bm_check implies we need ARB_DIS
618 * bm_control implies whether we can do ARB_DIS
619 *
620 * That leaves a case where bm_check is set and bm_control is not set.
621 * In that case we cannot do much, we enter C3 without doing anything.
622 */
623 bool dis_bm = pr->flags.bm_control;
624
625 /* If we can skip BM, demote to a safe state. */
626 if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
627 dis_bm = false;
628 index = drv->safe_state_index;
629 if (index >= 0) {
630 cx = this_cpu_read(acpi_cstate[index]);
631 } else {
632 cx = &safe_cx;
633 index = -EBUSY;
634 }
635 }
636
637 if (dis_bm) {
638 raw_spin_lock(&c3_lock);
639 c3_cpu_count++;
640 /* Disable bus master arbitration when all CPUs are in C3 */
641 if (c3_cpu_count == num_online_cpus())
642 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
643 raw_spin_unlock(&c3_lock);
644 }
645
646 rcu_idle_enter();
647
648 acpi_idle_do_entry(cx);
649
650 rcu_idle_exit();
651
652 /* Re-enable bus master arbitration */
653 if (dis_bm) {
654 raw_spin_lock(&c3_lock);
655 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
656 c3_cpu_count--;
657 raw_spin_unlock(&c3_lock);
658 }
659
660 return index;
661}
662
663static int acpi_idle_enter(struct cpuidle_device *dev,
664 struct cpuidle_driver *drv, int index)
665{
666 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
667 struct acpi_processor *pr;
668
669 pr = __this_cpu_read(processors);
670 if (unlikely(!pr))
671 return -EINVAL;
672
673 if (cx->type != ACPI_STATE_C1) {
674 if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check)
675 return acpi_idle_enter_bm(drv, pr, cx, index);
676
677 /* C2 to C1 demotion. */
678 if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
679 index = ACPI_IDLE_STATE_START;
680 cx = per_cpu(acpi_cstate[index], dev->cpu);
681 }
682 }
683
684 if (cx->type == ACPI_STATE_C3)
685 ACPI_FLUSH_CPU_CACHE();
686
687 acpi_idle_do_entry(cx);
688
689 return index;
690}
691
692static int acpi_idle_enter_s2idle(struct cpuidle_device *dev,
693 struct cpuidle_driver *drv, int index)
694{
695 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
696
697 if (cx->type == ACPI_STATE_C3) {
698 struct acpi_processor *pr = __this_cpu_read(processors);
699
700 if (unlikely(!pr))
701 return 0;
702
703 if (pr->flags.bm_check) {
704 u8 bm_sts_skip = cx->bm_sts_skip;
705
706 /* Don't check BM_STS, do an unconditional ARB_DIS for S2IDLE */
707 cx->bm_sts_skip = 1;
708 acpi_idle_enter_bm(drv, pr, cx, index);
709 cx->bm_sts_skip = bm_sts_skip;
710
711 return 0;
712 } else {
713 ACPI_FLUSH_CPU_CACHE();
714 }
715 }
716 acpi_idle_do_entry(cx);
717
718 return 0;
719}
720
721static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
722 struct cpuidle_device *dev)
723{
724 int i, count = ACPI_IDLE_STATE_START;
725 struct acpi_processor_cx *cx;
726 struct cpuidle_state *state;
727
728 if (max_cstate == 0)
729 max_cstate = 1;
730
731 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
732 state = &acpi_idle_driver.states[count];
733 cx = &pr->power.states[i];
734
735 if (!cx->valid)
736 continue;
737
738 per_cpu(acpi_cstate[count], dev->cpu) = cx;
739
740 if (lapic_timer_needs_broadcast(pr, cx))
741 state->flags |= CPUIDLE_FLAG_TIMER_STOP;
742
743 if (cx->type == ACPI_STATE_C3) {
744 state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
745 if (pr->flags.bm_check)
746 state->flags |= CPUIDLE_FLAG_RCU_IDLE;
747 }
748
749 count++;
750 if (count == CPUIDLE_STATE_MAX)
751 break;
752 }
753
754 if (!count)
755 return -EINVAL;
756
757 return 0;
758}
759
760static int acpi_processor_setup_cstates(struct acpi_processor *pr)
761{
762 int i, count;
763 struct acpi_processor_cx *cx;
764 struct cpuidle_state *state;
765 struct cpuidle_driver *drv = &acpi_idle_driver;
766
767 if (max_cstate == 0)
768 max_cstate = 1;
769
770 if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) {
771 cpuidle_poll_state_init(drv);
772 count = 1;
773 } else {
774 count = 0;
775 }
776
777 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
778 cx = &pr->power.states[i];
779
780 if (!cx->valid)
781 continue;
782
783 state = &drv->states[count];
784 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
785 strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
786 state->exit_latency = cx->latency;
787 state->target_residency = cx->latency * latency_factor;
788 state->enter = acpi_idle_enter;
789
790 state->flags = 0;
791 if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 ||
792 cx->type == ACPI_STATE_C3) {
793 state->enter_dead = acpi_idle_play_dead;
794 drv->safe_state_index = count;
795 }
796 /*
797 * Halt-induced C1 is not good for ->enter_s2idle, because it
798 * re-enables interrupts on exit. Moreover, C1 is generally not
799 * particularly interesting from the suspend-to-idle angle, so
800 * avoid C1 and the situations in which we may need to fall back
801 * to it altogether.
802 */
803 if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
804 state->enter_s2idle = acpi_idle_enter_s2idle;
805
806 count++;
807 if (count == CPUIDLE_STATE_MAX)
808 break;
809 }
810
811 drv->state_count = count;
812
813 if (!count)
814 return -EINVAL;
815
816 return 0;
817}
818
819static inline void acpi_processor_cstate_first_run_checks(void)
820{
821 static int first_run;
822
823 if (first_run)
824 return;
825 dmi_check_system(processor_power_dmi_table);
826 max_cstate = acpi_processor_cstate_check(max_cstate);
827 if (max_cstate < ACPI_C_STATES_MAX)
828 pr_notice("processor limited to max C-state %d\n", max_cstate);
829
830 first_run++;
831
832 if (nocst)
833 return;
834
835 acpi_processor_claim_cst_control();
836}
837#else
838
839static inline int disabled_by_idle_boot_param(void) { return 0; }
840static inline void acpi_processor_cstate_first_run_checks(void) { }
841static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
842{
843 return -ENODEV;
844}
845
846static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
847 struct cpuidle_device *dev)
848{
849 return -EINVAL;
850}
851
852static int acpi_processor_setup_cstates(struct acpi_processor *pr)
853{
854 return -EINVAL;
855}
856
857#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
858
859struct acpi_lpi_states_array {
860 unsigned int size;
861 unsigned int composite_states_size;
862 struct acpi_lpi_state *entries;
863 struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER];
864};
865
866static int obj_get_integer(union acpi_object *obj, u32 *value)
867{
868 if (obj->type != ACPI_TYPE_INTEGER)
869 return -EINVAL;
870
871 *value = obj->integer.value;
872 return 0;
873}
874
875static int acpi_processor_evaluate_lpi(acpi_handle handle,
876 struct acpi_lpi_states_array *info)
877{
878 acpi_status status;
879 int ret = 0;
880 int pkg_count, state_idx = 1, loop;
881 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
882 union acpi_object *lpi_data;
883 struct acpi_lpi_state *lpi_state;
884
885 status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer);
886 if (ACPI_FAILURE(status)) {
887 acpi_handle_debug(handle, "No _LPI, giving up\n");
888 return -ENODEV;
889 }
890
891 lpi_data = buffer.pointer;
892
893 /* There must be at least 4 elements = 3 elements + 1 package */
894 if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE ||
895 lpi_data->package.count < 4) {
896 pr_debug("not enough elements in _LPI\n");
897 ret = -ENODATA;
898 goto end;
899 }
900
901 pkg_count = lpi_data->package.elements[2].integer.value;
902
903 /* Validate number of power states. */
904 if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) {
905 pr_debug("count given by _LPI is not valid\n");
906 ret = -ENODATA;
907 goto end;
908 }
909
910 lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL);
911 if (!lpi_state) {
912 ret = -ENOMEM;
913 goto end;
914 }
915
916 info->size = pkg_count;
917 info->entries = lpi_state;
918
919 /* LPI States start at index 3 */
920 for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) {
921 union acpi_object *element, *pkg_elem, *obj;
922
923 element = &lpi_data->package.elements[loop];
924 if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7)
925 continue;
926
927 pkg_elem = element->package.elements;
928
929 obj = pkg_elem + 6;
930 if (obj->type == ACPI_TYPE_BUFFER) {
931 struct acpi_power_register *reg;
932
933 reg = (struct acpi_power_register *)obj->buffer.pointer;
934 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
935 reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)
936 continue;
937
938 lpi_state->address = reg->address;
939 lpi_state->entry_method =
940 reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ?
941 ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO;
942 } else if (obj->type == ACPI_TYPE_INTEGER) {
943 lpi_state->entry_method = ACPI_CSTATE_INTEGER;
944 lpi_state->address = obj->integer.value;
945 } else {
946 continue;
947 }
948
949 /* elements[7,8] skipped for now i.e. Residency/Usage counter*/
950
951 obj = pkg_elem + 9;
952 if (obj->type == ACPI_TYPE_STRING)
953 strlcpy(lpi_state->desc, obj->string.pointer,
954 ACPI_CX_DESC_LEN);
955
956 lpi_state->index = state_idx;
957 if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) {
958 pr_debug("No min. residency found, assuming 10 us\n");
959 lpi_state->min_residency = 10;
960 }
961
962 if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) {
963 pr_debug("No wakeup residency found, assuming 10 us\n");
964 lpi_state->wake_latency = 10;
965 }
966
967 if (obj_get_integer(pkg_elem + 2, &lpi_state->flags))
968 lpi_state->flags = 0;
969
970 if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags))
971 lpi_state->arch_flags = 0;
972
973 if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq))
974 lpi_state->res_cnt_freq = 1;
975
976 if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state))
977 lpi_state->enable_parent_state = 0;
978 }
979
980 acpi_handle_debug(handle, "Found %d power states\n", state_idx);
981end:
982 kfree(buffer.pointer);
983 return ret;
984}
985
986/*
987 * flat_state_cnt - the number of composite LPI states after the process of flattening
988 */
989static int flat_state_cnt;
990
991/**
992 * combine_lpi_states - combine local and parent LPI states to form a composite LPI state
993 *
994 * @local: local LPI state
995 * @parent: parent LPI state
996 * @result: composite LPI state
997 */
998static bool combine_lpi_states(struct acpi_lpi_state *local,
999 struct acpi_lpi_state *parent,
1000 struct acpi_lpi_state *result)
1001{
1002 if (parent->entry_method == ACPI_CSTATE_INTEGER) {
1003 if (!parent->address) /* 0 means autopromotable */
1004 return false;
1005 result->address = local->address + parent->address;
1006 } else {
1007 result->address = parent->address;
1008 }
1009
1010 result->min_residency = max(local->min_residency, parent->min_residency);
1011 result->wake_latency = local->wake_latency + parent->wake_latency;
1012 result->enable_parent_state = parent->enable_parent_state;
1013 result->entry_method = local->entry_method;
1014
1015 result->flags = parent->flags;
1016 result->arch_flags = parent->arch_flags;
1017 result->index = parent->index;
1018
1019 strlcpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
1020 strlcat(result->desc, "+", ACPI_CX_DESC_LEN);
1021 strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN);
1022 return true;
1023}
1024
1025#define ACPI_LPI_STATE_FLAGS_ENABLED BIT(0)
1026
1027static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
1028 struct acpi_lpi_state *t)
1029{
1030 curr_level->composite_states[curr_level->composite_states_size++] = t;
1031}
1032
1033static int flatten_lpi_states(struct acpi_processor *pr,
1034 struct acpi_lpi_states_array *curr_level,
1035 struct acpi_lpi_states_array *prev_level)
1036{
1037 int i, j, state_count = curr_level->size;
1038 struct acpi_lpi_state *p, *t = curr_level->entries;
1039
1040 curr_level->composite_states_size = 0;
1041 for (j = 0; j < state_count; j++, t++) {
1042 struct acpi_lpi_state *flpi;
1043
1044 if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED))
1045 continue;
1046
1047 if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) {
1048 pr_warn("Limiting number of LPI states to max (%d)\n",
1049 ACPI_PROCESSOR_MAX_POWER);
1050 pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
1051 break;
1052 }
1053
1054 flpi = &pr->power.lpi_states[flat_state_cnt];
1055
1056 if (!prev_level) { /* leaf/processor node */
1057 memcpy(flpi, t, sizeof(*t));
1058 stash_composite_state(curr_level, flpi);
1059 flat_state_cnt++;
1060 continue;
1061 }
1062
1063 for (i = 0; i < prev_level->composite_states_size; i++) {
1064 p = prev_level->composite_states[i];
1065 if (t->index <= p->enable_parent_state &&
1066 combine_lpi_states(p, t, flpi)) {
1067 stash_composite_state(curr_level, flpi);
1068 flat_state_cnt++;
1069 flpi++;
1070 }
1071 }
1072 }
1073
1074 kfree(curr_level->entries);
1075 return 0;
1076}
1077
1078static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
1079{
1080 int ret, i;
1081 acpi_status status;
1082 acpi_handle handle = pr->handle, pr_ahandle;
1083 struct acpi_device *d = NULL;
1084 struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
1085
1086 if (!osc_pc_lpi_support_confirmed)
1087 return -EOPNOTSUPP;
1088
1089 if (!acpi_has_method(handle, "_LPI"))
1090 return -EINVAL;
1091
1092 flat_state_cnt = 0;
1093 prev = &info[0];
1094 curr = &info[1];
1095 handle = pr->handle;
1096 ret = acpi_processor_evaluate_lpi(handle, prev);
1097 if (ret)
1098 return ret;
1099 flatten_lpi_states(pr, prev, NULL);
1100
1101 status = acpi_get_parent(handle, &pr_ahandle);
1102 while (ACPI_SUCCESS(status)) {
1103 d = acpi_fetch_acpi_dev(pr_ahandle);
1104 handle = pr_ahandle;
1105
1106 if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID))
1107 break;
1108
1109 /* can be optional ? */
1110 if (!acpi_has_method(handle, "_LPI"))
1111 break;
1112
1113 ret = acpi_processor_evaluate_lpi(handle, curr);
1114 if (ret)
1115 break;
1116
1117 /* flatten all the LPI states in this level of hierarchy */
1118 flatten_lpi_states(pr, curr, prev);
1119
1120 tmp = prev, prev = curr, curr = tmp;
1121
1122 status = acpi_get_parent(handle, &pr_ahandle);
1123 }
1124
1125 pr->power.count = flat_state_cnt;
1126 /* reset the index after flattening */
1127 for (i = 0; i < pr->power.count; i++)
1128 pr->power.lpi_states[i].index = i;
1129
1130 /* Tell driver that _LPI is supported. */
1131 pr->flags.has_lpi = 1;
1132 pr->flags.power = 1;
1133
1134 return 0;
1135}
1136
1137int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
1138{
1139 return -ENODEV;
1140}
1141
1142int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
1143{
1144 return -ENODEV;
1145}
1146
1147/**
1148 * acpi_idle_lpi_enter - enters an ACPI any LPI state
1149 * @dev: the target CPU
1150 * @drv: cpuidle driver containing cpuidle state info
1151 * @index: index of target state
1152 *
1153 * Return: 0 for success or negative value for error
1154 */
1155static int acpi_idle_lpi_enter(struct cpuidle_device *dev,
1156 struct cpuidle_driver *drv, int index)
1157{
1158 struct acpi_processor *pr;
1159 struct acpi_lpi_state *lpi;
1160
1161 pr = __this_cpu_read(processors);
1162
1163 if (unlikely(!pr))
1164 return -EINVAL;
1165
1166 lpi = &pr->power.lpi_states[index];
1167 if (lpi->entry_method == ACPI_CSTATE_FFH)
1168 return acpi_processor_ffh_lpi_enter(lpi);
1169
1170 return -EINVAL;
1171}
1172
1173static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
1174{
1175 int i;
1176 struct acpi_lpi_state *lpi;
1177 struct cpuidle_state *state;
1178 struct cpuidle_driver *drv = &acpi_idle_driver;
1179
1180 if (!pr->flags.has_lpi)
1181 return -EOPNOTSUPP;
1182
1183 for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) {
1184 lpi = &pr->power.lpi_states[i];
1185
1186 state = &drv->states[i];
1187 snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i);
1188 strlcpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
1189 state->exit_latency = lpi->wake_latency;
1190 state->target_residency = lpi->min_residency;
1191 if (lpi->arch_flags)
1192 state->flags |= CPUIDLE_FLAG_TIMER_STOP;
1193 state->enter = acpi_idle_lpi_enter;
1194 drv->safe_state_index = i;
1195 }
1196
1197 drv->state_count = i;
1198
1199 return 0;
1200}
1201
1202/**
1203 * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle
1204 * global state data i.e. idle routines
1205 *
1206 * @pr: the ACPI processor
1207 */
1208static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
1209{
1210 int i;
1211 struct cpuidle_driver *drv = &acpi_idle_driver;
1212
1213 if (!pr->flags.power_setup_done || !pr->flags.power)
1214 return -EINVAL;
1215
1216 drv->safe_state_index = -1;
1217 for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
1218 drv->states[i].name[0] = '\0';
1219 drv->states[i].desc[0] = '\0';
1220 }
1221
1222 if (pr->flags.has_lpi)
1223 return acpi_processor_setup_lpi_states(pr);
1224
1225 return acpi_processor_setup_cstates(pr);
1226}
1227
1228/**
1229 * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE
1230 * device i.e. per-cpu data
1231 *
1232 * @pr: the ACPI processor
1233 * @dev : the cpuidle device
1234 */
1235static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr,
1236 struct cpuidle_device *dev)
1237{
1238 if (!pr->flags.power_setup_done || !pr->flags.power || !dev)
1239 return -EINVAL;
1240
1241 dev->cpu = pr->id;
1242 if (pr->flags.has_lpi)
1243 return acpi_processor_ffh_lpi_probe(pr->id);
1244
1245 return acpi_processor_setup_cpuidle_cx(pr, dev);
1246}
1247
1248static int acpi_processor_get_power_info(struct acpi_processor *pr)
1249{
1250 int ret;
1251
1252 ret = acpi_processor_get_lpi_info(pr);
1253 if (ret)
1254 ret = acpi_processor_get_cstate_info(pr);
1255
1256 return ret;
1257}
1258
1259int acpi_processor_hotplug(struct acpi_processor *pr)
1260{
1261 int ret = 0;
1262 struct cpuidle_device *dev;
1263
1264 if (disabled_by_idle_boot_param())
1265 return 0;
1266
1267 if (!pr->flags.power_setup_done)
1268 return -ENODEV;
1269
1270 dev = per_cpu(acpi_cpuidle_device, pr->id);
1271 cpuidle_pause_and_lock();
1272 cpuidle_disable_device(dev);
1273 ret = acpi_processor_get_power_info(pr);
1274 if (!ret && pr->flags.power) {
1275 acpi_processor_setup_cpuidle_dev(pr, dev);
1276 ret = cpuidle_enable_device(dev);
1277 }
1278 cpuidle_resume_and_unlock();
1279
1280 return ret;
1281}
1282
1283int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
1284{
1285 int cpu;
1286 struct acpi_processor *_pr;
1287 struct cpuidle_device *dev;
1288
1289 if (disabled_by_idle_boot_param())
1290 return 0;
1291
1292 if (!pr->flags.power_setup_done)
1293 return -ENODEV;
1294
1295 /*
1296 * FIXME: Design the ACPI notification to make it once per
1297 * system instead of once per-cpu. This condition is a hack
1298 * to make the code that updates C-States be called once.
1299 */
1300
1301 if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
1302
1303 /* Protect against cpu-hotplug */
1304 cpus_read_lock();
1305 cpuidle_pause_and_lock();
1306
1307 /* Disable all cpuidle devices */
1308 for_each_online_cpu(cpu) {
1309 _pr = per_cpu(processors, cpu);
1310 if (!_pr || !_pr->flags.power_setup_done)
1311 continue;
1312 dev = per_cpu(acpi_cpuidle_device, cpu);
1313 cpuidle_disable_device(dev);
1314 }
1315
1316 /* Populate Updated C-state information */
1317 acpi_processor_get_power_info(pr);
1318 acpi_processor_setup_cpuidle_states(pr);
1319
1320 /* Enable all cpuidle devices */
1321 for_each_online_cpu(cpu) {
1322 _pr = per_cpu(processors, cpu);
1323 if (!_pr || !_pr->flags.power_setup_done)
1324 continue;
1325 acpi_processor_get_power_info(_pr);
1326 if (_pr->flags.power) {
1327 dev = per_cpu(acpi_cpuidle_device, cpu);
1328 acpi_processor_setup_cpuidle_dev(_pr, dev);
1329 cpuidle_enable_device(dev);
1330 }
1331 }
1332 cpuidle_resume_and_unlock();
1333 cpus_read_unlock();
1334 }
1335
1336 return 0;
1337}
1338
1339static int acpi_processor_registered;
1340
1341int acpi_processor_power_init(struct acpi_processor *pr)
1342{
1343 int retval;
1344 struct cpuidle_device *dev;
1345
1346 if (disabled_by_idle_boot_param())
1347 return 0;
1348
1349 acpi_processor_cstate_first_run_checks();
1350
1351 if (!acpi_processor_get_power_info(pr))
1352 pr->flags.power_setup_done = 1;
1353
1354 /*
1355 * Install the idle handler if processor power management is supported.
1356 * Note that we use previously set idle handler will be used on
1357 * platforms that only support C1.
1358 */
1359 if (pr->flags.power) {
1360 /* Register acpi_idle_driver if not already registered */
1361 if (!acpi_processor_registered) {
1362 acpi_processor_setup_cpuidle_states(pr);
1363 retval = cpuidle_register_driver(&acpi_idle_driver);
1364 if (retval)
1365 return retval;
1366 pr_debug("%s registered with cpuidle\n",
1367 acpi_idle_driver.name);
1368 }
1369
1370 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1371 if (!dev)
1372 return -ENOMEM;
1373 per_cpu(acpi_cpuidle_device, pr->id) = dev;
1374
1375 acpi_processor_setup_cpuidle_dev(pr, dev);
1376
1377 /* Register per-cpu cpuidle_device. Cpuidle driver
1378 * must already be registered before registering device
1379 */
1380 retval = cpuidle_register_device(dev);
1381 if (retval) {
1382 if (acpi_processor_registered == 0)
1383 cpuidle_unregister_driver(&acpi_idle_driver);
1384 return retval;
1385 }
1386 acpi_processor_registered++;
1387 }
1388 return 0;
1389}
1390
1391int acpi_processor_power_exit(struct acpi_processor *pr)
1392{
1393 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
1394
1395 if (disabled_by_idle_boot_param())
1396 return 0;
1397
1398 if (pr->flags.power) {
1399 cpuidle_unregister_device(dev);
1400 acpi_processor_registered--;
1401 if (acpi_processor_registered == 0)
1402 cpuidle_unregister_driver(&acpi_idle_driver);
1403 }
1404
1405 pr->flags.power_setup_done = 0;
1406 return 0;
1407}