at master 29 kB view raw
1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * sleep.c - ACPI sleep support. 4 * 5 * Copyright (c) 2005 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com> 6 * Copyright (c) 2004 David Shaohua Li <shaohua.li@intel.com> 7 * Copyright (c) 2000-2003 Patrick Mochel 8 * Copyright (c) 2003 Open Source Development Lab 9 */ 10 11#define pr_fmt(fmt) "ACPI: PM: " fmt 12 13#include <linux/delay.h> 14#include <linux/irq.h> 15#include <linux/dmi.h> 16#include <linux/device.h> 17#include <linux/interrupt.h> 18#include <linux/suspend.h> 19#include <linux/reboot.h> 20#include <linux/acpi.h> 21#include <linux/module.h> 22#include <linux/syscore_ops.h> 23#include <asm/io.h> 24#include <trace/events/power.h> 25 26#include "internal.h" 27#include "sleep.h" 28 29/* 30 * Some HW-full platforms do not have _S5, so they may need 31 * to leverage efi power off for a shutdown. 32 */ 33bool acpi_no_s5; 34static u8 sleep_states[ACPI_S_STATE_COUNT]; 35 36static void acpi_sleep_tts_switch(u32 acpi_state) 37{ 38 acpi_status status; 39 40 status = acpi_execute_simple_method(NULL, "\\_TTS", acpi_state); 41 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 42 /* 43 * OS can't evaluate the _TTS object correctly. Some warning 44 * message will be printed. But it won't break anything. 45 */ 46 pr_notice("Failure in evaluating _TTS object\n"); 47 } 48} 49 50static int tts_notify_reboot(struct notifier_block *this, 51 unsigned long code, void *x) 52{ 53 acpi_sleep_tts_switch(ACPI_STATE_S5); 54 return NOTIFY_DONE; 55} 56 57static struct notifier_block tts_notifier = { 58 .notifier_call = tts_notify_reboot, 59 .next = NULL, 60 .priority = 0, 61}; 62 63#ifndef acpi_skip_set_wakeup_address 64#define acpi_skip_set_wakeup_address() false 65#endif 66 67static int acpi_sleep_prepare(u32 acpi_state) 68{ 69#ifdef CONFIG_ACPI_SLEEP 70 unsigned long acpi_wakeup_address; 71 72 /* do we have a wakeup address for S2 and S3? */ 73 if (acpi_state == ACPI_STATE_S3 && !acpi_skip_set_wakeup_address()) { 74 acpi_wakeup_address = acpi_get_wakeup_address(); 75 if (!acpi_wakeup_address) 76 return -EFAULT; 77 acpi_set_waking_vector(acpi_wakeup_address); 78 79 } 80#endif 81 pr_info("Preparing to enter system sleep state S%d\n", acpi_state); 82 acpi_enable_wakeup_devices(acpi_state); 83 acpi_enter_sleep_state_prep(acpi_state); 84 return 0; 85} 86 87bool acpi_sleep_state_supported(u8 sleep_state) 88{ 89 acpi_status status; 90 u8 type_a, type_b; 91 92 status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b); 93 return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware 94 || (acpi_gbl_FADT.sleep_control.address 95 && acpi_gbl_FADT.sleep_status.address)); 96} 97 98#ifdef CONFIG_ACPI_SLEEP 99static u32 acpi_target_sleep_state = ACPI_STATE_S0; 100 101u32 acpi_target_system_state(void) 102{ 103 return acpi_target_sleep_state; 104} 105EXPORT_SYMBOL_GPL(acpi_target_system_state); 106 107static bool pwr_btn_event_pending; 108 109/* 110 * The ACPI specification wants us to save NVS memory regions during hibernation 111 * and to restore them during the subsequent resume. Windows does that also for 112 * suspend to RAM. However, it is known that this mechanism does not work on 113 * all machines, so we allow the user to disable it with the help of the 114 * 'acpi_sleep=nonvs' kernel command line option. 115 */ 116static bool nvs_nosave; 117 118void __init acpi_nvs_nosave(void) 119{ 120 nvs_nosave = true; 121} 122 123/* 124 * The ACPI specification wants us to save NVS memory regions during hibernation 125 * but says nothing about saving NVS during S3. Not all versions of Windows 126 * save NVS on S3 suspend either, and it is clear that not all systems need 127 * NVS to be saved at S3 time. To improve suspend/resume time, allow the 128 * user to disable saving NVS on S3 if their system does not require it, but 129 * continue to save/restore NVS for S4 as specified. 130 */ 131static bool nvs_nosave_s3; 132 133void __init acpi_nvs_nosave_s3(void) 134{ 135 nvs_nosave_s3 = true; 136} 137 138static int __init init_nvs_save_s3(const struct dmi_system_id *d) 139{ 140 nvs_nosave_s3 = false; 141 return 0; 142} 143 144/* 145 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the 146 * user to request that behavior by using the 'acpi_old_suspend_ordering' 147 * kernel command line option that causes the following variable to be set. 148 */ 149static bool old_suspend_ordering; 150 151void __init acpi_old_suspend_ordering(void) 152{ 153 old_suspend_ordering = true; 154} 155 156static int __init init_old_suspend_ordering(const struct dmi_system_id *d) 157{ 158 acpi_old_suspend_ordering(); 159 return 0; 160} 161 162static int __init init_nvs_nosave(const struct dmi_system_id *d) 163{ 164 acpi_nvs_nosave(); 165 return 0; 166} 167 168bool acpi_sleep_default_s3; 169 170static int __init init_default_s3(const struct dmi_system_id *d) 171{ 172 acpi_sleep_default_s3 = true; 173 return 0; 174} 175 176static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { 177 { 178 .callback = init_old_suspend_ordering, 179 .ident = "Abit KN9 (nForce4 variant)", 180 .matches = { 181 DMI_MATCH(DMI_BOARD_VENDOR, "http://www.abit.com.tw/"), 182 DMI_MATCH(DMI_BOARD_NAME, "KN9 Series(NF-CK804)"), 183 }, 184 }, 185 { 186 .callback = init_old_suspend_ordering, 187 .ident = "HP xw4600 Workstation", 188 .matches = { 189 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 190 DMI_MATCH(DMI_PRODUCT_NAME, "HP xw4600 Workstation"), 191 }, 192 }, 193 { 194 .callback = init_old_suspend_ordering, 195 .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)", 196 .matches = { 197 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."), 198 DMI_MATCH(DMI_BOARD_NAME, "M2N8L"), 199 }, 200 }, 201 { 202 .callback = init_old_suspend_ordering, 203 .ident = "Panasonic CF51-2L", 204 .matches = { 205 DMI_MATCH(DMI_BOARD_VENDOR, 206 "Matsushita Electric Industrial Co.,Ltd."), 207 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), 208 }, 209 }, 210 { 211 .callback = init_nvs_nosave, 212 .ident = "Sony Vaio VGN-FW41E_H", 213 .matches = { 214 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 215 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW41E_H"), 216 }, 217 }, 218 { 219 .callback = init_nvs_nosave, 220 .ident = "Sony Vaio VGN-FW21E", 221 .matches = { 222 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 223 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21E"), 224 }, 225 }, 226 { 227 .callback = init_nvs_nosave, 228 .ident = "Sony Vaio VGN-FW21M", 229 .matches = { 230 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 231 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21M"), 232 }, 233 }, 234 { 235 .callback = init_nvs_nosave, 236 .ident = "Sony Vaio VPCEB17FX", 237 .matches = { 238 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 239 DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB17FX"), 240 }, 241 }, 242 { 243 .callback = init_nvs_nosave, 244 .ident = "Sony Vaio VGN-SR11M", 245 .matches = { 246 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 247 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR11M"), 248 }, 249 }, 250 { 251 .callback = init_nvs_nosave, 252 .ident = "Everex StepNote Series", 253 .matches = { 254 DMI_MATCH(DMI_SYS_VENDOR, "Everex Systems, Inc."), 255 DMI_MATCH(DMI_PRODUCT_NAME, "Everex StepNote Series"), 256 }, 257 }, 258 { 259 .callback = init_nvs_nosave, 260 .ident = "Sony Vaio VPCEB1Z1E", 261 .matches = { 262 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 263 DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1Z1E"), 264 }, 265 }, 266 { 267 .callback = init_nvs_nosave, 268 .ident = "Sony Vaio VGN-NW130D", 269 .matches = { 270 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 271 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"), 272 }, 273 }, 274 { 275 .callback = init_nvs_nosave, 276 .ident = "Sony Vaio VPCCW29FX", 277 .matches = { 278 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 279 DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"), 280 }, 281 }, 282 { 283 .callback = init_nvs_nosave, 284 .ident = "Averatec AV1020-ED2", 285 .matches = { 286 DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"), 287 DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"), 288 }, 289 }, 290 { 291 .callback = init_old_suspend_ordering, 292 .ident = "Asus A8N-SLI DELUXE", 293 .matches = { 294 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), 295 DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI DELUXE"), 296 }, 297 }, 298 { 299 .callback = init_old_suspend_ordering, 300 .ident = "Asus A8N-SLI Premium", 301 .matches = { 302 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), 303 DMI_MATCH(DMI_BOARD_NAME, "A8N-SLI Premium"), 304 }, 305 }, 306 { 307 .callback = init_nvs_nosave, 308 .ident = "Sony Vaio VGN-SR26GN_P", 309 .matches = { 310 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 311 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-SR26GN_P"), 312 }, 313 }, 314 { 315 .callback = init_nvs_nosave, 316 .ident = "Sony Vaio VPCEB1S1E", 317 .matches = { 318 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 319 DMI_MATCH(DMI_PRODUCT_NAME, "VPCEB1S1E"), 320 }, 321 }, 322 { 323 .callback = init_nvs_nosave, 324 .ident = "Sony Vaio VGN-FW520F", 325 .matches = { 326 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 327 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"), 328 }, 329 }, 330 { 331 .callback = init_nvs_nosave, 332 .ident = "Asus K54C", 333 .matches = { 334 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 335 DMI_MATCH(DMI_PRODUCT_NAME, "K54C"), 336 }, 337 }, 338 { 339 .callback = init_nvs_nosave, 340 .ident = "Asus K54HR", 341 .matches = { 342 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 343 DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"), 344 }, 345 }, 346 { 347 .callback = init_nvs_save_s3, 348 .ident = "Asus 1025C", 349 .matches = { 350 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 351 DMI_MATCH(DMI_PRODUCT_NAME, "1025C"), 352 }, 353 }, 354 /* 355 * The ASUS ROG M16 from 2023 has many events which wake it from s2idle 356 * resulting in excessive battery drain and risk of laptop overheating, 357 * these events can be caused by the MMC or y AniMe display if installed. 358 * The match is valid for all of the GU604V<x> range. 359 */ 360 { 361 .callback = init_default_s3, 362 .ident = "ASUS ROG Zephyrus M16 (2023)", 363 .matches = { 364 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 365 DMI_MATCH(DMI_PRODUCT_NAME, "ROG Zephyrus M16 GU604V"), 366 }, 367 }, 368 /* 369 * https://bugzilla.kernel.org/show_bug.cgi?id=189431 370 * Lenovo G50-45 is a platform later than 2012, but needs nvs memory 371 * saving during S3. 372 */ 373 { 374 .callback = init_nvs_save_s3, 375 .ident = "Lenovo G50-45", 376 .matches = { 377 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 378 DMI_MATCH(DMI_PRODUCT_NAME, "80E3"), 379 }, 380 }, 381 { 382 .callback = init_nvs_save_s3, 383 .ident = "Lenovo G40-45", 384 .matches = { 385 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 386 DMI_MATCH(DMI_PRODUCT_NAME, "80E1"), 387 }, 388 }, 389 /* 390 * ThinkPad X1 Tablet(2016) cannot do suspend-to-idle using 391 * the Low Power S0 Idle firmware interface (see 392 * https://bugzilla.kernel.org/show_bug.cgi?id=199057). 393 */ 394 { 395 .callback = init_default_s3, 396 .ident = "ThinkPad X1 Tablet(2016)", 397 .matches = { 398 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), 399 DMI_MATCH(DMI_PRODUCT_NAME, "20GGA00L00"), 400 }, 401 }, 402 {}, 403}; 404 405static bool ignore_blacklist; 406 407void __init acpi_sleep_no_blacklist(void) 408{ 409 ignore_blacklist = true; 410} 411 412static void __init acpi_sleep_dmi_check(void) 413{ 414 if (ignore_blacklist) 415 return; 416 417 if (dmi_get_bios_year() >= 2012) 418 acpi_nvs_nosave_s3(); 419 420 dmi_check_system(acpisleep_dmi_table); 421} 422 423/** 424 * acpi_pm_freeze - Disable the GPEs and suspend EC transactions. 425 */ 426static int acpi_pm_freeze(void) 427{ 428 acpi_disable_all_gpes(); 429 acpi_os_wait_events_complete(); 430 acpi_ec_block_transactions(); 431 return 0; 432} 433 434/** 435 * acpi_pm_pre_suspend - Enable wakeup devices, "freeze" EC and save NVS. 436 */ 437static int acpi_pm_pre_suspend(void) 438{ 439 acpi_pm_freeze(); 440 return suspend_nvs_save(); 441} 442 443/** 444 * __acpi_pm_prepare - Prepare the platform to enter the target state. 445 * 446 * If necessary, set the firmware waking vector and do arch-specific 447 * nastiness to get the wakeup code to the waking vector. 448 */ 449static int __acpi_pm_prepare(void) 450{ 451 int error = acpi_sleep_prepare(acpi_target_sleep_state); 452 if (error) 453 acpi_target_sleep_state = ACPI_STATE_S0; 454 455 return error; 456} 457 458/** 459 * acpi_pm_prepare - Prepare the platform to enter the target sleep 460 * state and disable the GPEs. 461 */ 462static int acpi_pm_prepare(void) 463{ 464 int error = __acpi_pm_prepare(); 465 if (!error) 466 error = acpi_pm_pre_suspend(); 467 468 return error; 469} 470 471/** 472 * acpi_pm_finish - Instruct the platform to leave a sleep state. 473 * 474 * This is called after we wake back up (or if entering the sleep state 475 * failed). 476 */ 477static void acpi_pm_finish(void) 478{ 479 struct acpi_device *pwr_btn_adev; 480 u32 acpi_state = acpi_target_sleep_state; 481 482 acpi_ec_unblock_transactions(); 483 suspend_nvs_free(); 484 485 if (acpi_state == ACPI_STATE_S0) 486 return; 487 488 pr_info("Waking up from system sleep state S%d\n", acpi_state); 489 acpi_disable_wakeup_devices(acpi_state); 490 acpi_leave_sleep_state(acpi_state); 491 492 /* reset firmware waking vector */ 493 acpi_set_waking_vector(0); 494 495 acpi_target_sleep_state = ACPI_STATE_S0; 496 497 acpi_resume_power_resources(); 498 499 /* If we were woken with the fixed power button, provide a small 500 * hint to userspace in the form of a wakeup event on the fixed power 501 * button device (if it can be found). 502 * 503 * We delay the event generation til now, as the PM layer requires 504 * timekeeping to be running before we generate events. */ 505 if (!pwr_btn_event_pending) 506 return; 507 508 pwr_btn_event_pending = false; 509 pwr_btn_adev = acpi_dev_get_first_match_dev(ACPI_BUTTON_HID_POWERF, 510 NULL, -1); 511 if (pwr_btn_adev) { 512 pm_wakeup_event(&pwr_btn_adev->dev, 0); 513 acpi_dev_put(pwr_btn_adev); 514 } 515} 516 517/** 518 * acpi_pm_start - Start system PM transition. 519 * @acpi_state: The target ACPI power state to transition to. 520 */ 521static void acpi_pm_start(u32 acpi_state) 522{ 523 acpi_target_sleep_state = acpi_state; 524 acpi_sleep_tts_switch(acpi_target_sleep_state); 525 acpi_scan_lock_acquire(); 526} 527 528/** 529 * acpi_pm_end - Finish up system PM transition. 530 */ 531static void acpi_pm_end(void) 532{ 533 acpi_turn_off_unused_power_resources(); 534 acpi_scan_lock_release(); 535 /* 536 * This is necessary in case acpi_pm_finish() is not called during a 537 * failing transition to a sleep state. 538 */ 539 acpi_target_sleep_state = ACPI_STATE_S0; 540 acpi_sleep_tts_switch(acpi_target_sleep_state); 541} 542#else /* !CONFIG_ACPI_SLEEP */ 543#define sleep_no_lps0 (1) 544#define acpi_target_sleep_state ACPI_STATE_S0 545#define acpi_sleep_default_s3 (1) 546static inline void acpi_sleep_dmi_check(void) {} 547#endif /* CONFIG_ACPI_SLEEP */ 548 549#ifdef CONFIG_SUSPEND 550static u32 acpi_suspend_states[] = { 551 [PM_SUSPEND_ON] = ACPI_STATE_S0, 552 [PM_SUSPEND_STANDBY] = ACPI_STATE_S1, 553 [PM_SUSPEND_MEM] = ACPI_STATE_S3, 554 [PM_SUSPEND_MAX] = ACPI_STATE_S5 555}; 556 557/** 558 * acpi_suspend_begin - Set the target system sleep state to the state 559 * associated with given @pm_state, if supported. 560 * @pm_state: The target system power management state. 561 */ 562static int acpi_suspend_begin(suspend_state_t pm_state) 563{ 564 u32 acpi_state = acpi_suspend_states[pm_state]; 565 int error; 566 567 error = (nvs_nosave || nvs_nosave_s3) ? 0 : suspend_nvs_alloc(); 568 if (error) 569 return error; 570 571 if (!sleep_states[acpi_state]) { 572 pr_err("ACPI does not support sleep state S%u\n", acpi_state); 573 return -ENOSYS; 574 } 575 if (acpi_state > ACPI_STATE_S1) 576 pm_set_suspend_via_firmware(); 577 578 acpi_pm_start(acpi_state); 579 return 0; 580} 581 582/** 583 * acpi_suspend_enter - Actually enter a sleep state. 584 * @pm_state: ignored 585 * 586 * Flush caches and go to sleep. For STR we have to call arch-specific 587 * assembly, which in turn call acpi_enter_sleep_state(). 588 * It's unfortunate, but it works. Please fix if you're feeling frisky. 589 */ 590static int acpi_suspend_enter(suspend_state_t pm_state) 591{ 592 acpi_status status = AE_OK; 593 u32 acpi_state = acpi_target_sleep_state; 594 int error; 595 596 trace_suspend_resume(TPS("acpi_suspend"), acpi_state, true); 597 switch (acpi_state) { 598 case ACPI_STATE_S1: 599 barrier(); 600 status = acpi_enter_sleep_state(acpi_state); 601 break; 602 603 case ACPI_STATE_S3: 604 if (!acpi_suspend_lowlevel) 605 return -ENOSYS; 606 error = acpi_suspend_lowlevel(); 607 if (error) 608 return error; 609 pr_info("Low-level resume complete\n"); 610 pm_set_resume_via_firmware(); 611 break; 612 } 613 trace_suspend_resume(TPS("acpi_suspend"), acpi_state, false); 614 615 /* This violates the spec but is required for bug compatibility. */ 616 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); 617 618 /* Reprogram control registers */ 619 acpi_leave_sleep_state_prep(acpi_state); 620 621 /* ACPI 3.0 specs (P62) says that it's the responsibility 622 * of the OSPM to clear the status bit [ implying that the 623 * POWER_BUTTON event should not reach userspace ] 624 * 625 * However, we do generate a small hint for userspace in the form of 626 * a wakeup event. We flag this condition for now and generate the 627 * event later, as we're currently too early in resume to be able to 628 * generate wakeup events. 629 */ 630 if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) { 631 acpi_event_status pwr_btn_status = ACPI_EVENT_FLAG_DISABLED; 632 633 acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status); 634 635 if (pwr_btn_status & ACPI_EVENT_FLAG_STATUS_SET) { 636 acpi_clear_event(ACPI_EVENT_POWER_BUTTON); 637 /* Flag for later */ 638 pwr_btn_event_pending = true; 639 } 640 } 641 642 /* 643 * Disable all GPE and clear their status bits before interrupts are 644 * enabled. Some GPEs (like wakeup GPEs) have no handlers and this can 645 * prevent them from producing spurious interrupts. 646 * 647 * acpi_leave_sleep_state() will reenable specific GPEs later. 648 * 649 * Because this code runs on one CPU with disabled interrupts (all of 650 * the other CPUs are offline at this time), it need not acquire any 651 * sleeping locks which may trigger an implicit preemption point even 652 * if there is no contention, so avoid doing that by using a low-level 653 * library routine here. 654 */ 655 acpi_hw_disable_all_gpes(); 656 /* Allow EC transactions to happen. */ 657 acpi_ec_unblock_transactions(); 658 659 suspend_nvs_restore(); 660 661 return ACPI_SUCCESS(status) ? 0 : -EFAULT; 662} 663 664static int acpi_suspend_state_valid(suspend_state_t pm_state) 665{ 666 u32 acpi_state; 667 668 switch (pm_state) { 669 case PM_SUSPEND_ON: 670 case PM_SUSPEND_STANDBY: 671 case PM_SUSPEND_MEM: 672 acpi_state = acpi_suspend_states[pm_state]; 673 674 return sleep_states[acpi_state]; 675 default: 676 return 0; 677 } 678} 679 680static const struct platform_suspend_ops acpi_suspend_ops = { 681 .valid = acpi_suspend_state_valid, 682 .begin = acpi_suspend_begin, 683 .prepare_late = acpi_pm_prepare, 684 .enter = acpi_suspend_enter, 685 .wake = acpi_pm_finish, 686 .end = acpi_pm_end, 687}; 688 689/** 690 * acpi_suspend_begin_old - Set the target system sleep state to the 691 * state associated with given @pm_state, if supported, and 692 * execute the _PTS control method. This function is used if the 693 * pre-ACPI 2.0 suspend ordering has been requested. 694 * @pm_state: The target suspend state for the system. 695 */ 696static int acpi_suspend_begin_old(suspend_state_t pm_state) 697{ 698 int error = acpi_suspend_begin(pm_state); 699 if (!error) 700 error = __acpi_pm_prepare(); 701 702 return error; 703} 704 705/* 706 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has 707 * been requested. 708 */ 709static const struct platform_suspend_ops acpi_suspend_ops_old = { 710 .valid = acpi_suspend_state_valid, 711 .begin = acpi_suspend_begin_old, 712 .prepare_late = acpi_pm_pre_suspend, 713 .enter = acpi_suspend_enter, 714 .wake = acpi_pm_finish, 715 .end = acpi_pm_end, 716 .recover = acpi_pm_finish, 717}; 718 719static bool s2idle_wakeup; 720 721int acpi_s2idle_begin(void) 722{ 723 acpi_scan_lock_acquire(); 724 return 0; 725} 726 727int acpi_s2idle_prepare(void) 728{ 729 if (acpi_sci_irq_valid()) { 730 int error; 731 732 error = enable_irq_wake(acpi_sci_irq); 733 if (error) 734 pr_warn("Warning: Failed to enable wakeup from IRQ %d: %d\n", 735 acpi_sci_irq, error); 736 737 acpi_ec_set_gpe_wake_mask(ACPI_GPE_ENABLE); 738 } 739 740 acpi_enable_wakeup_devices(ACPI_STATE_S0); 741 742 /* Change the configuration of GPEs to avoid spurious wakeup. */ 743 acpi_enable_all_wakeup_gpes(); 744 acpi_os_wait_events_complete(); 745 746 s2idle_wakeup = true; 747 return 0; 748} 749 750bool acpi_s2idle_wake(void) 751{ 752 if (!acpi_sci_irq_valid()) 753 return pm_wakeup_pending(); 754 755 while (pm_wakeup_pending()) { 756 /* 757 * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the 758 * SCI has not triggered while suspended, so bail out (the 759 * wakeup is pending anyway and the SCI is not the source of 760 * it). 761 */ 762 if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) { 763 pm_pr_dbg("Wakeup unrelated to ACPI SCI\n"); 764 return true; 765 } 766 767 /* 768 * If the status bit of any enabled fixed event is set, the 769 * wakeup is regarded as valid. 770 */ 771 if (acpi_any_fixed_event_status_set()) { 772 pm_pr_dbg("ACPI fixed event wakeup\n"); 773 return true; 774 } 775 776 /* Check wakeups from drivers sharing the SCI. */ 777 if (acpi_check_wakeup_handlers()) { 778 pm_pr_dbg("ACPI custom handler wakeup\n"); 779 return true; 780 } 781 782 /* 783 * Check non-EC GPE wakeups and if there are none, cancel the 784 * SCI-related wakeup and dispatch the EC GPE. 785 */ 786 if (acpi_ec_dispatch_gpe()) { 787 pm_pr_dbg("ACPI non-EC GPE wakeup\n"); 788 return true; 789 } 790 791 acpi_os_wait_events_complete(); 792 793 /* 794 * The SCI is in the "suspended" state now and it cannot produce 795 * new wakeup events till the rearming below, so if any of them 796 * are pending here, they must be resulting from the processing 797 * of EC events above or coming from somewhere else. 798 */ 799 if (pm_wakeup_pending()) { 800 pm_pr_dbg("Wakeup after ACPI Notify sync\n"); 801 return true; 802 } 803 804 pm_pr_dbg("Rearming ACPI SCI for wakeup\n"); 805 806 pm_wakeup_clear(acpi_sci_irq); 807 rearm_wake_irq(acpi_sci_irq); 808 } 809 810 return false; 811} 812 813void acpi_s2idle_restore(void) 814{ 815 /* 816 * Drain pending events before restoring the working-state configuration 817 * of GPEs. 818 */ 819 acpi_os_wait_events_complete(); /* synchronize GPE processing */ 820 acpi_ec_flush_work(); /* flush the EC driver's workqueues */ 821 acpi_os_wait_events_complete(); /* synchronize Notify handling */ 822 823 s2idle_wakeup = false; 824 825 acpi_enable_all_runtime_gpes(); 826 827 acpi_disable_wakeup_devices(ACPI_STATE_S0); 828 829 if (acpi_sci_irq_valid()) { 830 acpi_ec_set_gpe_wake_mask(ACPI_GPE_DISABLE); 831 disable_irq_wake(acpi_sci_irq); 832 } 833} 834 835void acpi_s2idle_end(void) 836{ 837 acpi_scan_lock_release(); 838} 839 840static const struct platform_s2idle_ops acpi_s2idle_ops = { 841 .begin = acpi_s2idle_begin, 842 .prepare = acpi_s2idle_prepare, 843 .wake = acpi_s2idle_wake, 844 .restore = acpi_s2idle_restore, 845 .end = acpi_s2idle_end, 846}; 847 848void __weak acpi_s2idle_setup(void) 849{ 850 if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) 851 pr_info("Efficient low-power S0 idle declared\n"); 852 853 s2idle_set_ops(&acpi_s2idle_ops); 854} 855 856static void __init acpi_sleep_suspend_setup(void) 857{ 858 bool suspend_ops_needed = false; 859 int i; 860 861 for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) 862 if (acpi_sleep_state_supported(i)) { 863 sleep_states[i] = 1; 864 suspend_ops_needed = true; 865 } 866 867 if (suspend_ops_needed) 868 suspend_set_ops(old_suspend_ordering ? 869 &acpi_suspend_ops_old : &acpi_suspend_ops); 870 871 acpi_s2idle_setup(); 872} 873 874#else /* !CONFIG_SUSPEND */ 875#define s2idle_wakeup (false) 876static inline void acpi_sleep_suspend_setup(void) {} 877#endif /* !CONFIG_SUSPEND */ 878 879bool acpi_s2idle_wakeup(void) 880{ 881 return s2idle_wakeup; 882} 883 884#ifdef CONFIG_PM_SLEEP 885static u32 saved_bm_rld; 886 887static int acpi_save_bm_rld(void *data) 888{ 889 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld); 890 return 0; 891} 892 893static void acpi_restore_bm_rld(void *data) 894{ 895 u32 resumed_bm_rld = 0; 896 897 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld); 898 if (resumed_bm_rld == saved_bm_rld) 899 return; 900 901 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld); 902} 903 904static const struct syscore_ops acpi_sleep_syscore_ops = { 905 .suspend = acpi_save_bm_rld, 906 .resume = acpi_restore_bm_rld, 907}; 908 909static struct syscore acpi_sleep_syscore = { 910 .ops = &acpi_sleep_syscore_ops, 911}; 912 913static void acpi_sleep_syscore_init(void) 914{ 915 register_syscore(&acpi_sleep_syscore); 916} 917#else 918static inline void acpi_sleep_syscore_init(void) {} 919#endif /* CONFIG_PM_SLEEP */ 920 921#ifdef CONFIG_HIBERNATION 922static unsigned long s4_hardware_signature; 923static struct acpi_table_facs *facs; 924int acpi_check_s4_hw_signature = -1; /* Default behaviour is just to warn */ 925 926static int acpi_hibernation_begin(pm_message_t stage) 927{ 928 if (!nvs_nosave) { 929 int error = suspend_nvs_alloc(); 930 if (error) 931 return error; 932 } 933 934 if (stage.event == PM_EVENT_HIBERNATE) 935 pm_set_suspend_via_firmware(); 936 937 acpi_pm_start(ACPI_STATE_S4); 938 return 0; 939} 940 941static int acpi_hibernation_enter(void) 942{ 943 acpi_status status = AE_OK; 944 945 /* This shouldn't return. If it returns, we have a problem */ 946 status = acpi_enter_sleep_state(ACPI_STATE_S4); 947 /* Reprogram control registers */ 948 acpi_leave_sleep_state_prep(ACPI_STATE_S4); 949 950 return ACPI_SUCCESS(status) ? 0 : -EFAULT; 951} 952 953static void acpi_hibernation_leave(void) 954{ 955 pm_set_resume_via_firmware(); 956 /* 957 * If ACPI is not enabled by the BIOS and the boot kernel, we need to 958 * enable it here. 959 */ 960 acpi_enable(); 961 /* Reprogram control registers */ 962 acpi_leave_sleep_state_prep(ACPI_STATE_S4); 963 /* Check the hardware signature */ 964 if (facs && s4_hardware_signature != facs->hardware_signature) 965 pr_crit("Hardware changed while hibernated, success doubtful!\n"); 966 /* Restore the NVS memory area */ 967 suspend_nvs_restore(); 968 /* Allow EC transactions to happen. */ 969 acpi_ec_unblock_transactions(); 970} 971 972static void acpi_pm_thaw(void) 973{ 974 acpi_ec_unblock_transactions(); 975 acpi_enable_all_runtime_gpes(); 976} 977 978static const struct platform_hibernation_ops acpi_hibernation_ops = { 979 .begin = acpi_hibernation_begin, 980 .end = acpi_pm_end, 981 .pre_snapshot = acpi_pm_prepare, 982 .finish = acpi_pm_finish, 983 .prepare = acpi_pm_prepare, 984 .enter = acpi_hibernation_enter, 985 .leave = acpi_hibernation_leave, 986 .pre_restore = acpi_pm_freeze, 987 .restore_cleanup = acpi_pm_thaw, 988}; 989 990/** 991 * acpi_hibernation_begin_old - Set the target system sleep state to 992 * ACPI_STATE_S4 and execute the _PTS control method. This 993 * function is used if the pre-ACPI 2.0 suspend ordering has been 994 * requested. 995 * @stage: The power management event message. 996 */ 997static int acpi_hibernation_begin_old(pm_message_t stage) 998{ 999 int error; 1000 /* 1001 * The _TTS object should always be evaluated before the _PTS object. 1002 * When the old_suspended_ordering is true, the _PTS object is 1003 * evaluated in the acpi_sleep_prepare. 1004 */ 1005 acpi_sleep_tts_switch(ACPI_STATE_S4); 1006 1007 error = acpi_sleep_prepare(ACPI_STATE_S4); 1008 if (error) 1009 return error; 1010 1011 if (!nvs_nosave) { 1012 error = suspend_nvs_alloc(); 1013 if (error) 1014 return error; 1015 } 1016 1017 if (stage.event == PM_EVENT_HIBERNATE) 1018 pm_set_suspend_via_firmware(); 1019 1020 acpi_target_sleep_state = ACPI_STATE_S4; 1021 acpi_scan_lock_acquire(); 1022 return 0; 1023} 1024 1025/* 1026 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has 1027 * been requested. 1028 */ 1029static const struct platform_hibernation_ops acpi_hibernation_ops_old = { 1030 .begin = acpi_hibernation_begin_old, 1031 .end = acpi_pm_end, 1032 .pre_snapshot = acpi_pm_pre_suspend, 1033 .prepare = acpi_pm_freeze, 1034 .finish = acpi_pm_finish, 1035 .enter = acpi_hibernation_enter, 1036 .leave = acpi_hibernation_leave, 1037 .pre_restore = acpi_pm_freeze, 1038 .restore_cleanup = acpi_pm_thaw, 1039 .recover = acpi_pm_finish, 1040}; 1041 1042static void acpi_sleep_hibernate_setup(void) 1043{ 1044 if (!acpi_sleep_state_supported(ACPI_STATE_S4)) 1045 return; 1046 1047 hibernation_set_ops(old_suspend_ordering ? 1048 &acpi_hibernation_ops_old : &acpi_hibernation_ops); 1049 sleep_states[ACPI_STATE_S4] = 1; 1050 if (!acpi_check_s4_hw_signature) 1051 return; 1052 1053 acpi_get_table(ACPI_SIG_FACS, 1, (struct acpi_table_header **)&facs); 1054 if (facs) { 1055 /* 1056 * s4_hardware_signature is the local variable which is just 1057 * used to warn about mismatch after we're attempting to 1058 * resume (in violation of the ACPI specification.) 1059 */ 1060 s4_hardware_signature = facs->hardware_signature; 1061 1062 if (acpi_check_s4_hw_signature > 0) { 1063 /* 1064 * If we're actually obeying the ACPI specification 1065 * then the signature is written out as part of the 1066 * swsusp header, in order to allow the boot kernel 1067 * to gracefully decline to resume. 1068 */ 1069 swsusp_hardware_signature = facs->hardware_signature; 1070 } 1071 } 1072} 1073#else /* !CONFIG_HIBERNATION */ 1074static inline void acpi_sleep_hibernate_setup(void) {} 1075#endif /* !CONFIG_HIBERNATION */ 1076 1077static int acpi_power_off_prepare(struct sys_off_data *data) 1078{ 1079 /* Prepare to power off the system */ 1080 acpi_sleep_prepare(ACPI_STATE_S5); 1081 acpi_disable_all_gpes(); 1082 acpi_os_wait_events_complete(); 1083 return NOTIFY_DONE; 1084} 1085 1086static int acpi_power_off(struct sys_off_data *data) 1087{ 1088 /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ 1089 pr_debug("%s called\n", __func__); 1090 local_irq_disable(); 1091 acpi_enter_sleep_state(ACPI_STATE_S5); 1092 return NOTIFY_DONE; 1093} 1094 1095int __init acpi_sleep_init(void) 1096{ 1097 char supported[ACPI_S_STATE_COUNT * 3 + 1]; 1098 char *pos = supported; 1099 int i; 1100 1101 acpi_sleep_dmi_check(); 1102 1103 sleep_states[ACPI_STATE_S0] = 1; 1104 1105 acpi_sleep_syscore_init(); 1106 acpi_sleep_suspend_setup(); 1107 acpi_sleep_hibernate_setup(); 1108 1109 if (acpi_sleep_state_supported(ACPI_STATE_S5)) { 1110 sleep_states[ACPI_STATE_S5] = 1; 1111 1112 register_sys_off_handler(SYS_OFF_MODE_POWER_OFF_PREPARE, 1113 SYS_OFF_PRIO_FIRMWARE, 1114 acpi_power_off_prepare, NULL); 1115 1116 register_sys_off_handler(SYS_OFF_MODE_POWER_OFF, 1117 SYS_OFF_PRIO_FIRMWARE, 1118 acpi_power_off, NULL); 1119 1120 /* 1121 * Windows uses S5 for reboot, so some BIOSes depend on it to 1122 * perform proper reboot. 1123 */ 1124 register_sys_off_handler(SYS_OFF_MODE_RESTART_PREPARE, 1125 SYS_OFF_PRIO_FIRMWARE, 1126 acpi_power_off_prepare, NULL); 1127 } else { 1128 acpi_no_s5 = true; 1129 } 1130 1131 supported[0] = 0; 1132 for (i = 0; i < ACPI_S_STATE_COUNT; i++) { 1133 if (sleep_states[i]) 1134 pos += sprintf(pos, " S%d", i); 1135 } 1136 pr_info("(supports%s)\n", supported); 1137 1138 /* 1139 * Register the tts_notifier to reboot notifier list so that the _TTS 1140 * object can also be evaluated when the system enters S5. 1141 */ 1142 register_reboot_notifier(&tts_notifier); 1143 return 0; 1144}