Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * processor_idle - idle state submodule to the ACPI processor driver
4 *
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
8 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9 * - Added processor hotplug support
10 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
11 * - Added support for C3 on SMP
12 */
13#define pr_fmt(fmt) "ACPI: " fmt
14
15#include <linux/module.h>
16#include <linux/acpi.h>
17#include <linux/dmi.h>
18#include <linux/sched.h> /* need_resched() */
19#include <linux/sort.h>
20#include <linux/tick.h>
21#include <linux/cpuidle.h>
22#include <linux/cpu.h>
23#include <linux/minmax.h>
24#include <acpi/processor.h>
25
26/*
27 * Include the apic definitions for x86 to have the APIC timer related defines
28 * available also for UP (on SMP it gets magically included via linux/smp.h).
29 * asm/acpi.h is not an option, as it would require more include magic. Also
30 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
31 */
32#ifdef CONFIG_X86
33#include <asm/apic.h>
34#include <asm/cpu.h>
35#endif
36
37#define ACPI_IDLE_STATE_START (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0)
38
39static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
40module_param(max_cstate, uint, 0000);
41static unsigned int nocst __read_mostly;
42module_param(nocst, uint, 0000);
43static int bm_check_disable __read_mostly;
44module_param(bm_check_disable, uint, 0000);
45
46static unsigned int latency_factor __read_mostly = 2;
47module_param(latency_factor, uint, 0644);
48
49static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
50
51struct cpuidle_driver acpi_idle_driver = {
52 .name = "acpi_idle",
53 .owner = THIS_MODULE,
54};
55
56#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
57static
58DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
59
60static int disabled_by_idle_boot_param(void)
61{
62 return boot_option_idle_override == IDLE_POLL ||
63 boot_option_idle_override == IDLE_HALT;
64}
65
66/*
67 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
68 * For now disable this. Probably a bug somewhere else.
69 *
70 * To skip this limit, boot/load with a large max_cstate limit.
71 */
72static int set_max_cstate(const struct dmi_system_id *id)
73{
74 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
75 return 0;
76
77 pr_notice("%s detected - limiting to C%ld max_cstate."
78 " Override with \"processor.max_cstate=%d\"\n", id->ident,
79 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
80
81 max_cstate = (long)id->driver_data;
82
83 return 0;
84}
85
86static const struct dmi_system_id processor_power_dmi_table[] = {
87 { set_max_cstate, "Clevo 5600D", {
88 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
89 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
90 (void *)2},
91 { set_max_cstate, "Pavilion zv5000", {
92 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
93 DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
94 (void *)1},
95 { set_max_cstate, "Asus L8400B", {
96 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
97 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
98 (void *)1},
99 {},
100};
101
102
103/*
104 * Callers should disable interrupts before the call and enable
105 * interrupts after return.
106 */
107static void __cpuidle acpi_safe_halt(void)
108{
109 if (!tif_need_resched()) {
110 safe_halt();
111 local_irq_disable();
112 }
113}
114
115#ifdef ARCH_APICTIMER_STOPS_ON_C3
116
117/*
118 * Some BIOS implementations switch to C3 in the published C2 state.
119 * This seems to be a common problem on AMD boxen, but other vendors
120 * are affected too. We pick the most conservative approach: we assume
121 * that the local APIC stops in both C2 and C3.
122 */
123static void lapic_timer_check_state(int state, struct acpi_processor *pr,
124 struct acpi_processor_cx *cx)
125{
126 struct acpi_processor_power *pwr = &pr->power;
127 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
128
129 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
130 return;
131
132 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E))
133 type = ACPI_STATE_C1;
134
135 /*
136 * Check, if one of the previous states already marked the lapic
137 * unstable
138 */
139 if (pwr->timer_broadcast_on_state < state)
140 return;
141
142 if (cx->type >= type)
143 pr->power.timer_broadcast_on_state = state;
144}
145
146static void __lapic_timer_propagate_broadcast(void *arg)
147{
148 struct acpi_processor *pr = (struct acpi_processor *) arg;
149
150 if (pr->power.timer_broadcast_on_state < INT_MAX)
151 tick_broadcast_enable();
152 else
153 tick_broadcast_disable();
154}
155
156static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
157{
158 smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
159 (void *)pr, 1);
160}
161
162/* Power(C) State timer broadcast control */
163static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
164 struct acpi_processor_cx *cx)
165{
166 return cx - pr->power.states >= pr->power.timer_broadcast_on_state;
167}
168
169#else
170
171static void lapic_timer_check_state(int state, struct acpi_processor *pr,
172 struct acpi_processor_cx *cstate) { }
173static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
174
175static bool lapic_timer_needs_broadcast(struct acpi_processor *pr,
176 struct acpi_processor_cx *cx)
177{
178 return false;
179}
180
181#endif
182
183#if defined(CONFIG_X86)
184static void tsc_check_state(int state)
185{
186 switch (boot_cpu_data.x86_vendor) {
187 case X86_VENDOR_HYGON:
188 case X86_VENDOR_AMD:
189 case X86_VENDOR_INTEL:
190 case X86_VENDOR_CENTAUR:
191 case X86_VENDOR_ZHAOXIN:
192 /*
193 * AMD Fam10h TSC will tick in all
194 * C/P/S0/S1 states when this bit is set.
195 */
196 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
197 return;
198 fallthrough;
199 default:
200 /* TSC could halt in idle, so notify users */
201 if (state > ACPI_STATE_C1)
202 mark_tsc_unstable("TSC halts in idle");
203 }
204}
205#else
206static void tsc_check_state(int state) { return; }
207#endif
208
209static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
210{
211
212 if (!pr->pblk)
213 return -ENODEV;
214
215 /* if info is obtained from pblk/fadt, type equals state */
216 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
217 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
218
219#ifndef CONFIG_HOTPLUG_CPU
220 /*
221 * Check for P_LVL2_UP flag before entering C2 and above on
222 * an SMP system.
223 */
224 if ((num_online_cpus() > 1) &&
225 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
226 return -ENODEV;
227#endif
228
229 /* determine C2 and C3 address from pblk */
230 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
231 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
232
233 /* determine latencies from FADT */
234 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
235 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
236
237 /*
238 * FADT specified C2 latency must be less than or equal to
239 * 100 microseconds.
240 */
241 if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
242 acpi_handle_debug(pr->handle, "C2 latency too large [%d]\n",
243 acpi_gbl_FADT.c2_latency);
244 /* invalidate C2 */
245 pr->power.states[ACPI_STATE_C2].address = 0;
246 }
247
248 /*
249 * FADT supplied C3 latency must be less than or equal to
250 * 1000 microseconds.
251 */
252 if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
253 acpi_handle_debug(pr->handle, "C3 latency too large [%d]\n",
254 acpi_gbl_FADT.c3_latency);
255 /* invalidate C3 */
256 pr->power.states[ACPI_STATE_C3].address = 0;
257 }
258
259 acpi_handle_debug(pr->handle, "lvl2[0x%08x] lvl3[0x%08x]\n",
260 pr->power.states[ACPI_STATE_C2].address,
261 pr->power.states[ACPI_STATE_C3].address);
262
263 snprintf(pr->power.states[ACPI_STATE_C2].desc,
264 ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x",
265 pr->power.states[ACPI_STATE_C2].address);
266 snprintf(pr->power.states[ACPI_STATE_C3].desc,
267 ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
268 pr->power.states[ACPI_STATE_C3].address);
269
270 return 0;
271}
272
273static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
274{
275 if (!pr->power.states[ACPI_STATE_C1].valid) {
276 /* set the first C-State to C1 */
277 /* all processors need to support C1 */
278 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
279 pr->power.states[ACPI_STATE_C1].valid = 1;
280 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
281
282 snprintf(pr->power.states[ACPI_STATE_C1].desc,
283 ACPI_CX_DESC_LEN, "ACPI HLT");
284 }
285 /* the C0 state only exists as a filler in our array */
286 pr->power.states[ACPI_STATE_C0].valid = 1;
287 return 0;
288}
289
290static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
291{
292 int ret;
293
294 if (nocst)
295 return -ENODEV;
296
297 ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power);
298 if (ret)
299 return ret;
300
301 if (!pr->power.count)
302 return -EFAULT;
303
304 pr->flags.has_cst = 1;
305 return 0;
306}
307
308static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
309 struct acpi_processor_cx *cx)
310{
311 static int bm_check_flag = -1;
312 static int bm_control_flag = -1;
313
314
315 if (!cx->address)
316 return;
317
318 /*
319 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
320 * DMA transfers are used by any ISA device to avoid livelock.
321 * Note that we could disable Type-F DMA (as recommended by
322 * the erratum), but this is known to disrupt certain ISA
323 * devices thus we take the conservative approach.
324 */
325 else if (errata.piix4.fdma) {
326 acpi_handle_debug(pr->handle,
327 "C3 not supported on PIIX4 with Type-F DMA\n");
328 return;
329 }
330
331 /* All the logic here assumes flags.bm_check is same across all CPUs */
332 if (bm_check_flag == -1) {
333 /* Determine whether bm_check is needed based on CPU */
334 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
335 bm_check_flag = pr->flags.bm_check;
336 bm_control_flag = pr->flags.bm_control;
337 } else {
338 pr->flags.bm_check = bm_check_flag;
339 pr->flags.bm_control = bm_control_flag;
340 }
341
342 if (pr->flags.bm_check) {
343 if (!pr->flags.bm_control) {
344 if (pr->flags.has_cst != 1) {
345 /* bus mastering control is necessary */
346 acpi_handle_debug(pr->handle,
347 "C3 support requires BM control\n");
348 return;
349 } else {
350 /* Here we enter C3 without bus mastering */
351 acpi_handle_debug(pr->handle,
352 "C3 support without BM control\n");
353 }
354 }
355 } else {
356 /*
357 * WBINVD should be set in fadt, for C3 state to be
358 * supported on when bm_check is not required.
359 */
360 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
361 acpi_handle_debug(pr->handle,
362 "Cache invalidation should work properly"
363 " for C3 to be enabled on SMP systems\n");
364 return;
365 }
366 }
367
368 /*
369 * Otherwise we've met all of our C3 requirements.
370 * Normalize the C3 latency to expidite policy. Enable
371 * checking of bus mastering status (bm_check) so we can
372 * use this in our C3 policy
373 */
374 cx->valid = 1;
375
376 /*
377 * On older chipsets, BM_RLD needs to be set
378 * in order for Bus Master activity to wake the
379 * system from C3. Newer chipsets handle DMA
380 * during C3 automatically and BM_RLD is a NOP.
381 * In either case, the proper way to
382 * handle BM_RLD is to set it and leave it set.
383 */
384 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
385
386 return;
387}
388
389static int acpi_cst_latency_cmp(const void *a, const void *b)
390{
391 const struct acpi_processor_cx *x = a, *y = b;
392
393 if (!(x->valid && y->valid))
394 return 0;
395 if (x->latency > y->latency)
396 return 1;
397 if (x->latency < y->latency)
398 return -1;
399 return 0;
400}
401static void acpi_cst_latency_swap(void *a, void *b, int n)
402{
403 struct acpi_processor_cx *x = a, *y = b;
404
405 if (!(x->valid && y->valid))
406 return;
407 swap(x->latency, y->latency);
408}
409
410static int acpi_processor_power_verify(struct acpi_processor *pr)
411{
412 unsigned int i;
413 unsigned int working = 0;
414 unsigned int last_latency = 0;
415 unsigned int last_type = 0;
416 bool buggy_latency = false;
417
418 pr->power.timer_broadcast_on_state = INT_MAX;
419
420 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
421 struct acpi_processor_cx *cx = &pr->power.states[i];
422
423 switch (cx->type) {
424 case ACPI_STATE_C1:
425 cx->valid = 1;
426 break;
427
428 case ACPI_STATE_C2:
429 if (!cx->address)
430 break;
431 cx->valid = 1;
432 break;
433
434 case ACPI_STATE_C3:
435 acpi_processor_power_verify_c3(pr, cx);
436 break;
437 }
438 if (!cx->valid)
439 continue;
440 if (cx->type >= last_type && cx->latency < last_latency)
441 buggy_latency = true;
442 last_latency = cx->latency;
443 last_type = cx->type;
444
445 lapic_timer_check_state(i, pr, cx);
446 tsc_check_state(cx->type);
447 working++;
448 }
449
450 if (buggy_latency) {
451 pr_notice("FW issue: working around C-state latencies out of order\n");
452 sort(&pr->power.states[1], max_cstate,
453 sizeof(struct acpi_processor_cx),
454 acpi_cst_latency_cmp,
455 acpi_cst_latency_swap);
456 }
457
458 lapic_timer_propagate_broadcast(pr);
459
460 return (working);
461}
462
463static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
464{
465 unsigned int i;
466 int result;
467
468
469 /* NOTE: the idle thread may not be running while calling
470 * this function */
471
472 /* Zero initialize all the C-states info. */
473 memset(pr->power.states, 0, sizeof(pr->power.states));
474
475 result = acpi_processor_get_power_info_cst(pr);
476 if (result == -ENODEV)
477 result = acpi_processor_get_power_info_fadt(pr);
478
479 if (result)
480 return result;
481
482 acpi_processor_get_power_info_default(pr);
483
484 pr->power.count = acpi_processor_power_verify(pr);
485
486 /*
487 * if one state of type C2 or C3 is available, mark this
488 * CPU as being "idle manageable"
489 */
490 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
491 if (pr->power.states[i].valid) {
492 pr->power.count = i;
493 pr->flags.power = 1;
494 }
495 }
496
497 return 0;
498}
499
500/**
501 * acpi_idle_bm_check - checks if bus master activity was detected
502 */
503static int acpi_idle_bm_check(void)
504{
505 u32 bm_status = 0;
506
507 if (bm_check_disable)
508 return 0;
509
510 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
511 if (bm_status)
512 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
513 /*
514 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
515 * the true state of bus mastering activity; forcing us to
516 * manually check the BMIDEA bit of each IDE channel.
517 */
518 else if (errata.piix4.bmisx) {
519 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
520 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
521 bm_status = 1;
522 }
523 return bm_status;
524}
525
526static void wait_for_freeze(void)
527{
528#ifdef CONFIG_X86
529 /* No delay is needed if we are in guest */
530 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
531 return;
532#endif
533 /* Dummy wait op - must do something useless after P_LVL2 read
534 because chipsets cannot guarantee that STPCLK# signal
535 gets asserted in time to freeze execution properly. */
536 inl(acpi_gbl_FADT.xpm_timer_block.address);
537}
538
539/**
540 * acpi_idle_do_entry - enter idle state using the appropriate method
541 * @cx: cstate data
542 *
543 * Caller disables interrupt before call and enables interrupt after return.
544 */
545static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
546{
547 if (cx->entry_method == ACPI_CSTATE_FFH) {
548 /* Call into architectural FFH based C-state */
549 acpi_processor_ffh_cstate_enter(cx);
550 } else if (cx->entry_method == ACPI_CSTATE_HALT) {
551 acpi_safe_halt();
552 } else {
553 /* IO port based C-state */
554 inb(cx->address);
555 wait_for_freeze();
556 }
557}
558
559/**
560 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
561 * @dev: the target CPU
562 * @index: the index of suggested state
563 */
564static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
565{
566 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
567
568 ACPI_FLUSH_CPU_CACHE();
569
570 while (1) {
571
572 if (cx->entry_method == ACPI_CSTATE_HALT)
573 safe_halt();
574 else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
575 inb(cx->address);
576 wait_for_freeze();
577 } else
578 return -ENODEV;
579
580#if defined(CONFIG_X86) && defined(CONFIG_HOTPLUG_CPU)
581 cond_wakeup_cpu0();
582#endif
583 }
584
585 /* Never reached */
586 return 0;
587}
588
589static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
590{
591 return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
592 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
593}
594
595static int c3_cpu_count;
596static DEFINE_RAW_SPINLOCK(c3_lock);
597
598/**
599 * acpi_idle_enter_bm - enters C3 with proper BM handling
600 * @drv: cpuidle driver
601 * @pr: Target processor
602 * @cx: Target state context
603 * @index: index of target state
604 */
605static int acpi_idle_enter_bm(struct cpuidle_driver *drv,
606 struct acpi_processor *pr,
607 struct acpi_processor_cx *cx,
608 int index)
609{
610 static struct acpi_processor_cx safe_cx = {
611 .entry_method = ACPI_CSTATE_HALT,
612 };
613
614 /*
615 * disable bus master
616 * bm_check implies we need ARB_DIS
617 * bm_control implies whether we can do ARB_DIS
618 *
619 * That leaves a case where bm_check is set and bm_control is not set.
620 * In that case we cannot do much, we enter C3 without doing anything.
621 */
622 bool dis_bm = pr->flags.bm_control;
623
624 /* If we can skip BM, demote to a safe state. */
625 if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
626 dis_bm = false;
627 index = drv->safe_state_index;
628 if (index >= 0) {
629 cx = this_cpu_read(acpi_cstate[index]);
630 } else {
631 cx = &safe_cx;
632 index = -EBUSY;
633 }
634 }
635
636 if (dis_bm) {
637 raw_spin_lock(&c3_lock);
638 c3_cpu_count++;
639 /* Disable bus master arbitration when all CPUs are in C3 */
640 if (c3_cpu_count == num_online_cpus())
641 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
642 raw_spin_unlock(&c3_lock);
643 }
644
645 rcu_idle_enter();
646
647 acpi_idle_do_entry(cx);
648
649 rcu_idle_exit();
650
651 /* Re-enable bus master arbitration */
652 if (dis_bm) {
653 raw_spin_lock(&c3_lock);
654 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
655 c3_cpu_count--;
656 raw_spin_unlock(&c3_lock);
657 }
658
659 return index;
660}
661
662static int acpi_idle_enter(struct cpuidle_device *dev,
663 struct cpuidle_driver *drv, int index)
664{
665 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
666 struct acpi_processor *pr;
667
668 pr = __this_cpu_read(processors);
669 if (unlikely(!pr))
670 return -EINVAL;
671
672 if (cx->type != ACPI_STATE_C1) {
673 if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check)
674 return acpi_idle_enter_bm(drv, pr, cx, index);
675
676 /* C2 to C1 demotion. */
677 if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
678 index = ACPI_IDLE_STATE_START;
679 cx = per_cpu(acpi_cstate[index], dev->cpu);
680 }
681 }
682
683 if (cx->type == ACPI_STATE_C3)
684 ACPI_FLUSH_CPU_CACHE();
685
686 acpi_idle_do_entry(cx);
687
688 return index;
689}
690
691static int acpi_idle_enter_s2idle(struct cpuidle_device *dev,
692 struct cpuidle_driver *drv, int index)
693{
694 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
695
696 if (cx->type == ACPI_STATE_C3) {
697 struct acpi_processor *pr = __this_cpu_read(processors);
698
699 if (unlikely(!pr))
700 return 0;
701
702 if (pr->flags.bm_check) {
703 u8 bm_sts_skip = cx->bm_sts_skip;
704
705 /* Don't check BM_STS, do an unconditional ARB_DIS for S2IDLE */
706 cx->bm_sts_skip = 1;
707 acpi_idle_enter_bm(drv, pr, cx, index);
708 cx->bm_sts_skip = bm_sts_skip;
709
710 return 0;
711 } else {
712 ACPI_FLUSH_CPU_CACHE();
713 }
714 }
715 acpi_idle_do_entry(cx);
716
717 return 0;
718}
719
720static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
721 struct cpuidle_device *dev)
722{
723 int i, count = ACPI_IDLE_STATE_START;
724 struct acpi_processor_cx *cx;
725 struct cpuidle_state *state;
726
727 if (max_cstate == 0)
728 max_cstate = 1;
729
730 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
731 state = &acpi_idle_driver.states[count];
732 cx = &pr->power.states[i];
733
734 if (!cx->valid)
735 continue;
736
737 per_cpu(acpi_cstate[count], dev->cpu) = cx;
738
739 if (lapic_timer_needs_broadcast(pr, cx))
740 state->flags |= CPUIDLE_FLAG_TIMER_STOP;
741
742 if (cx->type == ACPI_STATE_C3) {
743 state->flags |= CPUIDLE_FLAG_TLB_FLUSHED;
744 if (pr->flags.bm_check)
745 state->flags |= CPUIDLE_FLAG_RCU_IDLE;
746 }
747
748 count++;
749 if (count == CPUIDLE_STATE_MAX)
750 break;
751 }
752
753 if (!count)
754 return -EINVAL;
755
756 return 0;
757}
758
759static int acpi_processor_setup_cstates(struct acpi_processor *pr)
760{
761 int i, count;
762 struct acpi_processor_cx *cx;
763 struct cpuidle_state *state;
764 struct cpuidle_driver *drv = &acpi_idle_driver;
765
766 if (max_cstate == 0)
767 max_cstate = 1;
768
769 if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) {
770 cpuidle_poll_state_init(drv);
771 count = 1;
772 } else {
773 count = 0;
774 }
775
776 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
777 cx = &pr->power.states[i];
778
779 if (!cx->valid)
780 continue;
781
782 state = &drv->states[count];
783 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
784 strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
785 state->exit_latency = cx->latency;
786 state->target_residency = cx->latency * latency_factor;
787 state->enter = acpi_idle_enter;
788
789 state->flags = 0;
790 if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 ||
791 cx->type == ACPI_STATE_C3) {
792 state->enter_dead = acpi_idle_play_dead;
793 if (cx->type != ACPI_STATE_C3)
794 drv->safe_state_index = count;
795 }
796 /*
797 * Halt-induced C1 is not good for ->enter_s2idle, because it
798 * re-enables interrupts on exit. Moreover, C1 is generally not
799 * particularly interesting from the suspend-to-idle angle, so
800 * avoid C1 and the situations in which we may need to fall back
801 * to it altogether.
802 */
803 if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
804 state->enter_s2idle = acpi_idle_enter_s2idle;
805
806 count++;
807 if (count == CPUIDLE_STATE_MAX)
808 break;
809 }
810
811 drv->state_count = count;
812
813 if (!count)
814 return -EINVAL;
815
816 return 0;
817}
818
819static inline void acpi_processor_cstate_first_run_checks(void)
820{
821 static int first_run;
822
823 if (first_run)
824 return;
825 dmi_check_system(processor_power_dmi_table);
826 max_cstate = acpi_processor_cstate_check(max_cstate);
827 if (max_cstate < ACPI_C_STATES_MAX)
828 pr_notice("processor limited to max C-state %d\n", max_cstate);
829
830 first_run++;
831
832 if (nocst)
833 return;
834
835 acpi_processor_claim_cst_control();
836}
837#else
838
839static inline int disabled_by_idle_boot_param(void) { return 0; }
840static inline void acpi_processor_cstate_first_run_checks(void) { }
841static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
842{
843 return -ENODEV;
844}
845
846static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
847 struct cpuidle_device *dev)
848{
849 return -EINVAL;
850}
851
852static int acpi_processor_setup_cstates(struct acpi_processor *pr)
853{
854 return -EINVAL;
855}
856
857#endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
858
859struct acpi_lpi_states_array {
860 unsigned int size;
861 unsigned int composite_states_size;
862 struct acpi_lpi_state *entries;
863 struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER];
864};
865
866static int obj_get_integer(union acpi_object *obj, u32 *value)
867{
868 if (obj->type != ACPI_TYPE_INTEGER)
869 return -EINVAL;
870
871 *value = obj->integer.value;
872 return 0;
873}
874
875static int acpi_processor_evaluate_lpi(acpi_handle handle,
876 struct acpi_lpi_states_array *info)
877{
878 acpi_status status;
879 int ret = 0;
880 int pkg_count, state_idx = 1, loop;
881 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
882 union acpi_object *lpi_data;
883 struct acpi_lpi_state *lpi_state;
884
885 status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer);
886 if (ACPI_FAILURE(status)) {
887 acpi_handle_debug(handle, "No _LPI, giving up\n");
888 return -ENODEV;
889 }
890
891 lpi_data = buffer.pointer;
892
893 /* There must be at least 4 elements = 3 elements + 1 package */
894 if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE ||
895 lpi_data->package.count < 4) {
896 pr_debug("not enough elements in _LPI\n");
897 ret = -ENODATA;
898 goto end;
899 }
900
901 pkg_count = lpi_data->package.elements[2].integer.value;
902
903 /* Validate number of power states. */
904 if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) {
905 pr_debug("count given by _LPI is not valid\n");
906 ret = -ENODATA;
907 goto end;
908 }
909
910 lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL);
911 if (!lpi_state) {
912 ret = -ENOMEM;
913 goto end;
914 }
915
916 info->size = pkg_count;
917 info->entries = lpi_state;
918
919 /* LPI States start at index 3 */
920 for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) {
921 union acpi_object *element, *pkg_elem, *obj;
922
923 element = &lpi_data->package.elements[loop];
924 if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7)
925 continue;
926
927 pkg_elem = element->package.elements;
928
929 obj = pkg_elem + 6;
930 if (obj->type == ACPI_TYPE_BUFFER) {
931 struct acpi_power_register *reg;
932
933 reg = (struct acpi_power_register *)obj->buffer.pointer;
934 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
935 reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)
936 continue;
937
938 lpi_state->address = reg->address;
939 lpi_state->entry_method =
940 reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ?
941 ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO;
942 } else if (obj->type == ACPI_TYPE_INTEGER) {
943 lpi_state->entry_method = ACPI_CSTATE_INTEGER;
944 lpi_state->address = obj->integer.value;
945 } else {
946 continue;
947 }
948
949 /* elements[7,8] skipped for now i.e. Residency/Usage counter*/
950
951 obj = pkg_elem + 9;
952 if (obj->type == ACPI_TYPE_STRING)
953 strlcpy(lpi_state->desc, obj->string.pointer,
954 ACPI_CX_DESC_LEN);
955
956 lpi_state->index = state_idx;
957 if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) {
958 pr_debug("No min. residency found, assuming 10 us\n");
959 lpi_state->min_residency = 10;
960 }
961
962 if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) {
963 pr_debug("No wakeup residency found, assuming 10 us\n");
964 lpi_state->wake_latency = 10;
965 }
966
967 if (obj_get_integer(pkg_elem + 2, &lpi_state->flags))
968 lpi_state->flags = 0;
969
970 if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags))
971 lpi_state->arch_flags = 0;
972
973 if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq))
974 lpi_state->res_cnt_freq = 1;
975
976 if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state))
977 lpi_state->enable_parent_state = 0;
978 }
979
980 acpi_handle_debug(handle, "Found %d power states\n", state_idx);
981end:
982 kfree(buffer.pointer);
983 return ret;
984}
985
986/*
987 * flat_state_cnt - the number of composite LPI states after the process of flattening
988 */
989static int flat_state_cnt;
990
991/**
992 * combine_lpi_states - combine local and parent LPI states to form a composite LPI state
993 *
994 * @local: local LPI state
995 * @parent: parent LPI state
996 * @result: composite LPI state
997 */
998static bool combine_lpi_states(struct acpi_lpi_state *local,
999 struct acpi_lpi_state *parent,
1000 struct acpi_lpi_state *result)
1001{
1002 if (parent->entry_method == ACPI_CSTATE_INTEGER) {
1003 if (!parent->address) /* 0 means autopromotable */
1004 return false;
1005 result->address = local->address + parent->address;
1006 } else {
1007 result->address = parent->address;
1008 }
1009
1010 result->min_residency = max(local->min_residency, parent->min_residency);
1011 result->wake_latency = local->wake_latency + parent->wake_latency;
1012 result->enable_parent_state = parent->enable_parent_state;
1013 result->entry_method = local->entry_method;
1014
1015 result->flags = parent->flags;
1016 result->arch_flags = parent->arch_flags;
1017 result->index = parent->index;
1018
1019 strlcpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
1020 strlcat(result->desc, "+", ACPI_CX_DESC_LEN);
1021 strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN);
1022 return true;
1023}
1024
1025#define ACPI_LPI_STATE_FLAGS_ENABLED BIT(0)
1026
1027static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
1028 struct acpi_lpi_state *t)
1029{
1030 curr_level->composite_states[curr_level->composite_states_size++] = t;
1031}
1032
1033static int flatten_lpi_states(struct acpi_processor *pr,
1034 struct acpi_lpi_states_array *curr_level,
1035 struct acpi_lpi_states_array *prev_level)
1036{
1037 int i, j, state_count = curr_level->size;
1038 struct acpi_lpi_state *p, *t = curr_level->entries;
1039
1040 curr_level->composite_states_size = 0;
1041 for (j = 0; j < state_count; j++, t++) {
1042 struct acpi_lpi_state *flpi;
1043
1044 if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED))
1045 continue;
1046
1047 if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) {
1048 pr_warn("Limiting number of LPI states to max (%d)\n",
1049 ACPI_PROCESSOR_MAX_POWER);
1050 pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
1051 break;
1052 }
1053
1054 flpi = &pr->power.lpi_states[flat_state_cnt];
1055
1056 if (!prev_level) { /* leaf/processor node */
1057 memcpy(flpi, t, sizeof(*t));
1058 stash_composite_state(curr_level, flpi);
1059 flat_state_cnt++;
1060 continue;
1061 }
1062
1063 for (i = 0; i < prev_level->composite_states_size; i++) {
1064 p = prev_level->composite_states[i];
1065 if (t->index <= p->enable_parent_state &&
1066 combine_lpi_states(p, t, flpi)) {
1067 stash_composite_state(curr_level, flpi);
1068 flat_state_cnt++;
1069 flpi++;
1070 }
1071 }
1072 }
1073
1074 kfree(curr_level->entries);
1075 return 0;
1076}
1077
1078int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
1079{
1080 return -EOPNOTSUPP;
1081}
1082
1083static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
1084{
1085 int ret, i;
1086 acpi_status status;
1087 acpi_handle handle = pr->handle, pr_ahandle;
1088 struct acpi_device *d = NULL;
1089 struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
1090
1091 /* make sure our architecture has support */
1092 ret = acpi_processor_ffh_lpi_probe(pr->id);
1093 if (ret == -EOPNOTSUPP)
1094 return ret;
1095
1096 if (!osc_pc_lpi_support_confirmed)
1097 return -EOPNOTSUPP;
1098
1099 if (!acpi_has_method(handle, "_LPI"))
1100 return -EINVAL;
1101
1102 flat_state_cnt = 0;
1103 prev = &info[0];
1104 curr = &info[1];
1105 handle = pr->handle;
1106 ret = acpi_processor_evaluate_lpi(handle, prev);
1107 if (ret)
1108 return ret;
1109 flatten_lpi_states(pr, prev, NULL);
1110
1111 status = acpi_get_parent(handle, &pr_ahandle);
1112 while (ACPI_SUCCESS(status)) {
1113 d = acpi_fetch_acpi_dev(pr_ahandle);
1114 handle = pr_ahandle;
1115
1116 if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID))
1117 break;
1118
1119 /* can be optional ? */
1120 if (!acpi_has_method(handle, "_LPI"))
1121 break;
1122
1123 ret = acpi_processor_evaluate_lpi(handle, curr);
1124 if (ret)
1125 break;
1126
1127 /* flatten all the LPI states in this level of hierarchy */
1128 flatten_lpi_states(pr, curr, prev);
1129
1130 tmp = prev, prev = curr, curr = tmp;
1131
1132 status = acpi_get_parent(handle, &pr_ahandle);
1133 }
1134
1135 pr->power.count = flat_state_cnt;
1136 /* reset the index after flattening */
1137 for (i = 0; i < pr->power.count; i++)
1138 pr->power.lpi_states[i].index = i;
1139
1140 /* Tell driver that _LPI is supported. */
1141 pr->flags.has_lpi = 1;
1142 pr->flags.power = 1;
1143
1144 return 0;
1145}
1146
1147int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
1148{
1149 return -ENODEV;
1150}
1151
1152/**
1153 * acpi_idle_lpi_enter - enters an ACPI any LPI state
1154 * @dev: the target CPU
1155 * @drv: cpuidle driver containing cpuidle state info
1156 * @index: index of target state
1157 *
1158 * Return: 0 for success or negative value for error
1159 */
1160static int acpi_idle_lpi_enter(struct cpuidle_device *dev,
1161 struct cpuidle_driver *drv, int index)
1162{
1163 struct acpi_processor *pr;
1164 struct acpi_lpi_state *lpi;
1165
1166 pr = __this_cpu_read(processors);
1167
1168 if (unlikely(!pr))
1169 return -EINVAL;
1170
1171 lpi = &pr->power.lpi_states[index];
1172 if (lpi->entry_method == ACPI_CSTATE_FFH)
1173 return acpi_processor_ffh_lpi_enter(lpi);
1174
1175 return -EINVAL;
1176}
1177
1178static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
1179{
1180 int i;
1181 struct acpi_lpi_state *lpi;
1182 struct cpuidle_state *state;
1183 struct cpuidle_driver *drv = &acpi_idle_driver;
1184
1185 if (!pr->flags.has_lpi)
1186 return -EOPNOTSUPP;
1187
1188 for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) {
1189 lpi = &pr->power.lpi_states[i];
1190
1191 state = &drv->states[i];
1192 snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i);
1193 strlcpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
1194 state->exit_latency = lpi->wake_latency;
1195 state->target_residency = lpi->min_residency;
1196 if (lpi->arch_flags)
1197 state->flags |= CPUIDLE_FLAG_TIMER_STOP;
1198 state->enter = acpi_idle_lpi_enter;
1199 drv->safe_state_index = i;
1200 }
1201
1202 drv->state_count = i;
1203
1204 return 0;
1205}
1206
1207/**
1208 * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle
1209 * global state data i.e. idle routines
1210 *
1211 * @pr: the ACPI processor
1212 */
1213static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
1214{
1215 int i;
1216 struct cpuidle_driver *drv = &acpi_idle_driver;
1217
1218 if (!pr->flags.power_setup_done || !pr->flags.power)
1219 return -EINVAL;
1220
1221 drv->safe_state_index = -1;
1222 for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
1223 drv->states[i].name[0] = '\0';
1224 drv->states[i].desc[0] = '\0';
1225 }
1226
1227 if (pr->flags.has_lpi)
1228 return acpi_processor_setup_lpi_states(pr);
1229
1230 return acpi_processor_setup_cstates(pr);
1231}
1232
1233/**
1234 * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE
1235 * device i.e. per-cpu data
1236 *
1237 * @pr: the ACPI processor
1238 * @dev : the cpuidle device
1239 */
1240static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr,
1241 struct cpuidle_device *dev)
1242{
1243 if (!pr->flags.power_setup_done || !pr->flags.power || !dev)
1244 return -EINVAL;
1245
1246 dev->cpu = pr->id;
1247 if (pr->flags.has_lpi)
1248 return acpi_processor_ffh_lpi_probe(pr->id);
1249
1250 return acpi_processor_setup_cpuidle_cx(pr, dev);
1251}
1252
1253static int acpi_processor_get_power_info(struct acpi_processor *pr)
1254{
1255 int ret;
1256
1257 ret = acpi_processor_get_lpi_info(pr);
1258 if (ret)
1259 ret = acpi_processor_get_cstate_info(pr);
1260
1261 return ret;
1262}
1263
1264int acpi_processor_hotplug(struct acpi_processor *pr)
1265{
1266 int ret = 0;
1267 struct cpuidle_device *dev;
1268
1269 if (disabled_by_idle_boot_param())
1270 return 0;
1271
1272 if (!pr->flags.power_setup_done)
1273 return -ENODEV;
1274
1275 dev = per_cpu(acpi_cpuidle_device, pr->id);
1276 cpuidle_pause_and_lock();
1277 cpuidle_disable_device(dev);
1278 ret = acpi_processor_get_power_info(pr);
1279 if (!ret && pr->flags.power) {
1280 acpi_processor_setup_cpuidle_dev(pr, dev);
1281 ret = cpuidle_enable_device(dev);
1282 }
1283 cpuidle_resume_and_unlock();
1284
1285 return ret;
1286}
1287
1288int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
1289{
1290 int cpu;
1291 struct acpi_processor *_pr;
1292 struct cpuidle_device *dev;
1293
1294 if (disabled_by_idle_boot_param())
1295 return 0;
1296
1297 if (!pr->flags.power_setup_done)
1298 return -ENODEV;
1299
1300 /*
1301 * FIXME: Design the ACPI notification to make it once per
1302 * system instead of once per-cpu. This condition is a hack
1303 * to make the code that updates C-States be called once.
1304 */
1305
1306 if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
1307
1308 /* Protect against cpu-hotplug */
1309 cpus_read_lock();
1310 cpuidle_pause_and_lock();
1311
1312 /* Disable all cpuidle devices */
1313 for_each_online_cpu(cpu) {
1314 _pr = per_cpu(processors, cpu);
1315 if (!_pr || !_pr->flags.power_setup_done)
1316 continue;
1317 dev = per_cpu(acpi_cpuidle_device, cpu);
1318 cpuidle_disable_device(dev);
1319 }
1320
1321 /* Populate Updated C-state information */
1322 acpi_processor_get_power_info(pr);
1323 acpi_processor_setup_cpuidle_states(pr);
1324
1325 /* Enable all cpuidle devices */
1326 for_each_online_cpu(cpu) {
1327 _pr = per_cpu(processors, cpu);
1328 if (!_pr || !_pr->flags.power_setup_done)
1329 continue;
1330 acpi_processor_get_power_info(_pr);
1331 if (_pr->flags.power) {
1332 dev = per_cpu(acpi_cpuidle_device, cpu);
1333 acpi_processor_setup_cpuidle_dev(_pr, dev);
1334 cpuidle_enable_device(dev);
1335 }
1336 }
1337 cpuidle_resume_and_unlock();
1338 cpus_read_unlock();
1339 }
1340
1341 return 0;
1342}
1343
1344static int acpi_processor_registered;
1345
1346int acpi_processor_power_init(struct acpi_processor *pr)
1347{
1348 int retval;
1349 struct cpuidle_device *dev;
1350
1351 if (disabled_by_idle_boot_param())
1352 return 0;
1353
1354 acpi_processor_cstate_first_run_checks();
1355
1356 if (!acpi_processor_get_power_info(pr))
1357 pr->flags.power_setup_done = 1;
1358
1359 /*
1360 * Install the idle handler if processor power management is supported.
1361 * Note that we use previously set idle handler will be used on
1362 * platforms that only support C1.
1363 */
1364 if (pr->flags.power) {
1365 /* Register acpi_idle_driver if not already registered */
1366 if (!acpi_processor_registered) {
1367 acpi_processor_setup_cpuidle_states(pr);
1368 retval = cpuidle_register_driver(&acpi_idle_driver);
1369 if (retval)
1370 return retval;
1371 pr_debug("%s registered with cpuidle\n",
1372 acpi_idle_driver.name);
1373 }
1374
1375 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1376 if (!dev)
1377 return -ENOMEM;
1378 per_cpu(acpi_cpuidle_device, pr->id) = dev;
1379
1380 acpi_processor_setup_cpuidle_dev(pr, dev);
1381
1382 /* Register per-cpu cpuidle_device. Cpuidle driver
1383 * must already be registered before registering device
1384 */
1385 retval = cpuidle_register_device(dev);
1386 if (retval) {
1387 if (acpi_processor_registered == 0)
1388 cpuidle_unregister_driver(&acpi_idle_driver);
1389 return retval;
1390 }
1391 acpi_processor_registered++;
1392 }
1393 return 0;
1394}
1395
1396int acpi_processor_power_exit(struct acpi_processor *pr)
1397{
1398 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
1399
1400 if (disabled_by_idle_boot_param())
1401 return 0;
1402
1403 if (pr->flags.power) {
1404 cpuidle_unregister_device(dev);
1405 acpi_processor_registered--;
1406 if (acpi_processor_registered == 0)
1407 cpuidle_unregister_driver(&acpi_idle_driver);
1408 }
1409
1410 pr->flags.power_setup_done = 0;
1411 return 0;
1412}