Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'pm-sleep'

* pm-sleep:
ACPI / PM: Check low power idle constraints for debug only
PM / s2idle: Rename platform operations structure
PM / s2idle: Rename ->enter_freeze to ->enter_s2idle
PM / s2idle: Rename freeze_state enum and related items
PM / s2idle: Rename PM_SUSPEND_FREEZE to PM_SUSPEND_TO_IDLE
ACPI / PM: Prefer suspend-to-idle over S3 on some systems
platform/x86: intel-hid: Wake up Dell Latitude 7275 from suspend-to-idle
PM / suspend: Define pr_fmt() in suspend.c
PM / suspend: Use mem_sleep_labels[] strings in messages
PM / sleep: Put pm_test under CONFIG_PM_SLEEP_DEBUG
PM / sleep: Check pm_wakeup_pending() in __device_suspend_noirq()
PM / core: Add error argument to dpm_show_time()
PM / core: Split dpm_suspend_noirq() and dpm_resume_noirq()
PM / s2idle: Rearrange the main suspend-to-idle loop
PM / timekeeping: Print debug messages when requested
PM / sleep: Mark suspend/hibernation start and finish
PM / sleep: Do not print debug messages by default
PM / suspend: Export pm_suspend_target_state

+622 -291
+12
Documentation/ABI/testing/sysfs-power
··· 273 273 274 274 This output is useful for system wakeup diagnostics of spurious 275 275 wakeup interrupts. 276 + 277 + What: /sys/power/pm_debug_messages 278 + Date: July 2017 279 + Contact: Rafael J. Wysocki <rjw@rjwysocki.net> 280 + Description: 281 + The /sys/power/pm_debug_messages file controls the printing 282 + of debug messages from the system suspend/hiberbation 283 + infrastructure to the kernel log. 284 + 285 + Writing a "1" to this file enables the debug messages and 286 + writing a "0" (default) to it disables them. Reads from 287 + this file return the current value.
+3 -1
Documentation/power/states.txt
··· 35 35 The default suspend mode (ie. the one to be used without writing anything into 36 36 /sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or 37 37 "s2idle", but it can be overridden by the value of the "mem_sleep_default" 38 - parameter in the kernel command line. 38 + parameter in the kernel command line. On some ACPI-based systems, depending on 39 + the information in the FADT, the default may be "s2idle" even if Suspend-To-RAM 40 + is supported. 39 41 40 42 The properties of all of the sleep states are described below. 41 43
+2 -2
arch/arm/mach-tegra/cpuidle-tegra114.c
··· 60 60 return index; 61 61 } 62 62 63 - static void tegra114_idle_enter_freeze(struct cpuidle_device *dev, 63 + static void tegra114_idle_enter_s2idle(struct cpuidle_device *dev, 64 64 struct cpuidle_driver *drv, 65 65 int index) 66 66 { ··· 77 77 #ifdef CONFIG_PM_SLEEP 78 78 [1] = { 79 79 .enter = tegra114_idle_power_down, 80 - .enter_freeze = tegra114_idle_enter_freeze, 80 + .enter_s2idle = tegra114_idle_enter_s2idle, 81 81 .exit_latency = 500, 82 82 .target_residency = 1000, 83 83 .flags = CPUIDLE_FLAG_TIMER_STOP,
+3 -3
drivers/acpi/processor_idle.c
··· 791 791 return index; 792 792 } 793 793 794 - static void acpi_idle_enter_freeze(struct cpuidle_device *dev, 794 + static void acpi_idle_enter_s2idle(struct cpuidle_device *dev, 795 795 struct cpuidle_driver *drv, int index) 796 796 { 797 797 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); ··· 876 876 drv->safe_state_index = count; 877 877 } 878 878 /* 879 - * Halt-induced C1 is not good for ->enter_freeze, because it 879 + * Halt-induced C1 is not good for ->enter_s2idle, because it 880 880 * re-enables interrupts on exit. Moreover, C1 is generally not 881 881 * particularly interesting from the suspend-to-idle angle, so 882 882 * avoid C1 and the situations in which we may need to fall back 883 883 * to it altogether. 884 884 */ 885 885 if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr)) 886 - state->enter_freeze = acpi_idle_enter_freeze; 886 + state->enter_s2idle = acpi_idle_enter_s2idle; 887 887 888 888 count++; 889 889 if (count == CPUIDLE_STATE_MAX)
+188 -14
drivers/acpi/sleep.c
··· 669 669 670 670 #define ACPI_LPS0_DSM_UUID "c4eb40a0-6cd2-11e2-bcfd-0800200c9a66" 671 671 672 + #define ACPI_LPS0_GET_DEVICE_CONSTRAINTS 1 672 673 #define ACPI_LPS0_SCREEN_OFF 3 673 674 #define ACPI_LPS0_SCREEN_ON 4 674 675 #define ACPI_LPS0_ENTRY 5 ··· 680 679 static acpi_handle lps0_device_handle; 681 680 static guid_t lps0_dsm_guid; 682 681 static char lps0_dsm_func_mask; 682 + 683 + /* Device constraint entry structure */ 684 + struct lpi_device_info { 685 + char *name; 686 + int enabled; 687 + union acpi_object *package; 688 + }; 689 + 690 + /* Constraint package structure */ 691 + struct lpi_device_constraint { 692 + int uid; 693 + int min_dstate; 694 + int function_states; 695 + }; 696 + 697 + struct lpi_constraints { 698 + acpi_handle handle; 699 + int min_dstate; 700 + }; 701 + 702 + static struct lpi_constraints *lpi_constraints_table; 703 + static int lpi_constraints_table_size; 704 + 705 + static void lpi_device_get_constraints(void) 706 + { 707 + union acpi_object *out_obj; 708 + int i; 709 + 710 + out_obj = acpi_evaluate_dsm_typed(lps0_device_handle, &lps0_dsm_guid, 711 + 1, ACPI_LPS0_GET_DEVICE_CONSTRAINTS, 712 + NULL, ACPI_TYPE_PACKAGE); 713 + 714 + acpi_handle_debug(lps0_device_handle, "_DSM function 1 eval %s\n", 715 + out_obj ? "successful" : "failed"); 716 + 717 + if (!out_obj) 718 + return; 719 + 720 + lpi_constraints_table = kcalloc(out_obj->package.count, 721 + sizeof(*lpi_constraints_table), 722 + GFP_KERNEL); 723 + if (!lpi_constraints_table) 724 + goto free_acpi_buffer; 725 + 726 + acpi_handle_debug(lps0_device_handle, "LPI: constraints list begin:\n"); 727 + 728 + for (i = 0; i < out_obj->package.count; i++) { 729 + struct lpi_constraints *constraint; 730 + acpi_status status; 731 + union acpi_object *package = &out_obj->package.elements[i]; 732 + struct lpi_device_info info = { }; 733 + int package_count = 0, j; 734 + 735 + if (!package) 736 + continue; 737 + 738 + for (j = 0; j < package->package.count; ++j) { 739 + union acpi_object *element = 740 + &(package->package.elements[j]); 741 + 742 + switch (element->type) { 743 + case ACPI_TYPE_INTEGER: 744 + info.enabled = element->integer.value; 745 + break; 746 + case ACPI_TYPE_STRING: 747 + info.name = element->string.pointer; 748 + break; 749 + case ACPI_TYPE_PACKAGE: 750 + package_count = element->package.count; 751 + info.package = element->package.elements; 752 + break; 753 + } 754 + } 755 + 756 + if (!info.enabled || !info.package || !info.name) 757 + continue; 758 + 759 + constraint = &lpi_constraints_table[lpi_constraints_table_size]; 760 + 761 + status = acpi_get_handle(NULL, info.name, &constraint->handle); 762 + if (ACPI_FAILURE(status)) 763 + continue; 764 + 765 + acpi_handle_debug(lps0_device_handle, 766 + "index:%d Name:%s\n", i, info.name); 767 + 768 + constraint->min_dstate = -1; 769 + 770 + for (j = 0; j < package_count; ++j) { 771 + union acpi_object *info_obj = &info.package[j]; 772 + union acpi_object *cnstr_pkg; 773 + union acpi_object *obj; 774 + struct lpi_device_constraint dev_info; 775 + 776 + switch (info_obj->type) { 777 + case ACPI_TYPE_INTEGER: 778 + /* version */ 779 + break; 780 + case ACPI_TYPE_PACKAGE: 781 + if (info_obj->package.count < 2) 782 + break; 783 + 784 + cnstr_pkg = info_obj->package.elements; 785 + obj = &cnstr_pkg[0]; 786 + dev_info.uid = obj->integer.value; 787 + obj = &cnstr_pkg[1]; 788 + dev_info.min_dstate = obj->integer.value; 789 + 790 + acpi_handle_debug(lps0_device_handle, 791 + "uid:%d min_dstate:%s\n", 792 + dev_info.uid, 793 + acpi_power_state_string(dev_info.min_dstate)); 794 + 795 + constraint->min_dstate = dev_info.min_dstate; 796 + break; 797 + } 798 + } 799 + 800 + if (constraint->min_dstate < 0) { 801 + acpi_handle_debug(lps0_device_handle, 802 + "Incomplete constraint defined\n"); 803 + continue; 804 + } 805 + 806 + lpi_constraints_table_size++; 807 + } 808 + 809 + acpi_handle_debug(lps0_device_handle, "LPI: constraints list end\n"); 810 + 811 + free_acpi_buffer: 812 + ACPI_FREE(out_obj); 813 + } 814 + 815 + static void lpi_check_constraints(void) 816 + { 817 + int i; 818 + 819 + for (i = 0; i < lpi_constraints_table_size; ++i) { 820 + struct acpi_device *adev; 821 + 822 + if (acpi_bus_get_device(lpi_constraints_table[i].handle, &adev)) 823 + continue; 824 + 825 + acpi_handle_debug(adev->handle, 826 + "LPI: required min power state:%s current power state:%s\n", 827 + acpi_power_state_string(lpi_constraints_table[i].min_dstate), 828 + acpi_power_state_string(adev->power.state)); 829 + 830 + if (!adev->flags.power_manageable) { 831 + acpi_handle_info(adev->handle, "LPI: Device not power manageble\n"); 832 + continue; 833 + } 834 + 835 + if (adev->power.state < lpi_constraints_table[i].min_dstate) 836 + acpi_handle_info(adev->handle, 837 + "LPI: Constraint not met; min power state:%s current power state:%s\n", 838 + acpi_power_state_string(lpi_constraints_table[i].min_dstate), 839 + acpi_power_state_string(adev->power.state)); 840 + } 841 + } 683 842 684 843 static void acpi_sleep_run_lps0_dsm(unsigned int func) 685 844 { ··· 875 714 if ((bitmask & ACPI_S2IDLE_FUNC_MASK) == ACPI_S2IDLE_FUNC_MASK) { 876 715 lps0_dsm_func_mask = bitmask; 877 716 lps0_device_handle = adev->handle; 717 + /* 718 + * Use suspend-to-idle by default if the default 719 + * suspend mode was not set from the command line. 720 + */ 721 + if (mem_sleep_default > PM_SUSPEND_MEM) 722 + mem_sleep_current = PM_SUSPEND_TO_IDLE; 878 723 } 879 724 880 725 acpi_handle_debug(adev->handle, "_DSM function mask: 0x%x\n", ··· 890 723 "_DSM function 0 evaluation failed\n"); 891 724 } 892 725 ACPI_FREE(out_obj); 726 + 727 + lpi_device_get_constraints(); 728 + 893 729 return 0; 894 730 } 895 731 ··· 901 731 .attach = lps0_device_attach, 902 732 }; 903 733 904 - static int acpi_freeze_begin(void) 734 + static int acpi_s2idle_begin(void) 905 735 { 906 736 acpi_scan_lock_acquire(); 907 737 s2idle_in_progress = true; 908 738 return 0; 909 739 } 910 740 911 - static int acpi_freeze_prepare(void) 741 + static int acpi_s2idle_prepare(void) 912 742 { 913 743 if (lps0_device_handle) { 914 744 acpi_sleep_run_lps0_dsm(ACPI_LPS0_SCREEN_OFF); ··· 928 758 return 0; 929 759 } 930 760 931 - static void acpi_freeze_wake(void) 761 + static void acpi_s2idle_wake(void) 932 762 { 763 + 764 + if (pm_debug_messages_on) 765 + lpi_check_constraints(); 766 + 933 767 /* 934 768 * If IRQD_WAKEUP_ARMED is not set for the SCI at this point, it means 935 769 * that the SCI has triggered while suspended, so cancel the wakeup in ··· 946 772 } 947 773 } 948 774 949 - static void acpi_freeze_sync(void) 775 + static void acpi_s2idle_sync(void) 950 776 { 951 777 /* 952 778 * Process all pending events in case there are any wakeup ones. ··· 959 785 s2idle_wakeup = false; 960 786 } 961 787 962 - static void acpi_freeze_restore(void) 788 + static void acpi_s2idle_restore(void) 963 789 { 964 790 if (acpi_sci_irq_valid()) 965 791 disable_irq_wake(acpi_sci_irq); ··· 972 798 } 973 799 } 974 800 975 - static void acpi_freeze_end(void) 801 + static void acpi_s2idle_end(void) 976 802 { 977 803 s2idle_in_progress = false; 978 804 acpi_scan_lock_release(); 979 805 } 980 806 981 - static const struct platform_freeze_ops acpi_freeze_ops = { 982 - .begin = acpi_freeze_begin, 983 - .prepare = acpi_freeze_prepare, 984 - .wake = acpi_freeze_wake, 985 - .sync = acpi_freeze_sync, 986 - .restore = acpi_freeze_restore, 987 - .end = acpi_freeze_end, 807 + static const struct platform_s2idle_ops acpi_s2idle_ops = { 808 + .begin = acpi_s2idle_begin, 809 + .prepare = acpi_s2idle_prepare, 810 + .wake = acpi_s2idle_wake, 811 + .sync = acpi_s2idle_sync, 812 + .restore = acpi_s2idle_restore, 813 + .end = acpi_s2idle_end, 988 814 }; 989 815 990 816 static void acpi_sleep_suspend_setup(void) ··· 999 825 &acpi_suspend_ops_old : &acpi_suspend_ops); 1000 826 1001 827 acpi_scan_add_handler(&lps0_handler); 1002 - freeze_set_ops(&acpi_freeze_ops); 828 + s2idle_set_ops(&acpi_s2idle_ops); 1003 829 } 1004 830 1005 831 #else /* !CONFIG_SUSPEND */
+64 -39
drivers/base/power/main.c
··· 418 418 dev_name(dev), pm_verb(state.event), info, error); 419 419 } 420 420 421 - #ifdef CONFIG_PM_DEBUG 422 - static void dpm_show_time(ktime_t starttime, pm_message_t state, 421 + static void dpm_show_time(ktime_t starttime, pm_message_t state, int error, 423 422 const char *info) 424 423 { 425 424 ktime_t calltime; ··· 431 432 usecs = usecs64; 432 433 if (usecs == 0) 433 434 usecs = 1; 434 - pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", 435 - info ?: "", info ? " " : "", pm_verb(state.event), 436 - usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 435 + 436 + pm_pr_dbg("%s%s%s of devices %s after %ld.%03ld msecs\n", 437 + info ?: "", info ? " " : "", pm_verb(state.event), 438 + error ? "aborted" : "complete", 439 + usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); 437 440 } 438 - #else 439 - static inline void dpm_show_time(ktime_t starttime, pm_message_t state, 440 - const char *info) {} 441 - #endif /* CONFIG_PM_DEBUG */ 442 441 443 442 static int dpm_run_callback(pm_callback_t cb, struct device *dev, 444 443 pm_message_t state, const char *info) ··· 599 602 put_device(dev); 600 603 } 601 604 602 - /** 603 - * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. 604 - * @state: PM transition of the system being carried out. 605 - * 606 - * Call the "noirq" resume handlers for all devices in dpm_noirq_list and 607 - * enable device drivers to receive interrupts. 608 - */ 609 - void dpm_resume_noirq(pm_message_t state) 605 + void dpm_noirq_resume_devices(pm_message_t state) 610 606 { 611 607 struct device *dev; 612 608 ktime_t starttime = ktime_get(); ··· 644 654 } 645 655 mutex_unlock(&dpm_list_mtx); 646 656 async_synchronize_full(); 647 - dpm_show_time(starttime, state, "noirq"); 657 + dpm_show_time(starttime, state, 0, "noirq"); 658 + trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); 659 + } 660 + 661 + void dpm_noirq_end(void) 662 + { 648 663 resume_device_irqs(); 649 664 device_wakeup_disarm_wake_irqs(); 650 665 cpuidle_resume(); 651 - trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false); 666 + } 667 + 668 + /** 669 + * dpm_resume_noirq - Execute "noirq resume" callbacks for all devices. 670 + * @state: PM transition of the system being carried out. 671 + * 672 + * Invoke the "noirq" resume callbacks for all devices in dpm_noirq_list and 673 + * allow device drivers' interrupt handlers to be called. 674 + */ 675 + void dpm_resume_noirq(pm_message_t state) 676 + { 677 + dpm_noirq_resume_devices(state); 678 + dpm_noirq_end(); 652 679 } 653 680 654 681 /** ··· 783 776 } 784 777 mutex_unlock(&dpm_list_mtx); 785 778 async_synchronize_full(); 786 - dpm_show_time(starttime, state, "early"); 779 + dpm_show_time(starttime, state, 0, "early"); 787 780 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false); 788 781 } 789 782 ··· 955 948 } 956 949 mutex_unlock(&dpm_list_mtx); 957 950 async_synchronize_full(); 958 - dpm_show_time(starttime, state, NULL); 951 + dpm_show_time(starttime, state, 0, NULL); 959 952 960 953 cpufreq_resume(); 961 954 trace_suspend_resume(TPS("dpm_resume"), state.event, false); ··· 1105 1098 if (async_error) 1106 1099 goto Complete; 1107 1100 1101 + if (pm_wakeup_pending()) { 1102 + async_error = -EBUSY; 1103 + goto Complete; 1104 + } 1105 + 1108 1106 if (dev->power.syscore || dev->power.direct_complete) 1109 1107 goto Complete; 1110 1108 ··· 1170 1158 return __device_suspend_noirq(dev, pm_transition, false); 1171 1159 } 1172 1160 1173 - /** 1174 - * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. 1175 - * @state: PM transition of the system being carried out. 1176 - * 1177 - * Prevent device drivers from receiving interrupts and call the "noirq" suspend 1178 - * handlers for all non-sysdev devices. 1179 - */ 1180 - int dpm_suspend_noirq(pm_message_t state) 1161 + void dpm_noirq_begin(void) 1162 + { 1163 + cpuidle_pause(); 1164 + device_wakeup_arm_wake_irqs(); 1165 + suspend_device_irqs(); 1166 + } 1167 + 1168 + int dpm_noirq_suspend_devices(pm_message_t state) 1181 1169 { 1182 1170 ktime_t starttime = ktime_get(); 1183 1171 int error = 0; 1184 1172 1185 1173 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true); 1186 - cpuidle_pause(); 1187 - device_wakeup_arm_wake_irqs(); 1188 - suspend_device_irqs(); 1189 1174 mutex_lock(&dpm_list_mtx); 1190 1175 pm_transition = state; 1191 1176 async_error = 0; ··· 1217 1208 if (error) { 1218 1209 suspend_stats.failed_suspend_noirq++; 1219 1210 dpm_save_failed_step(SUSPEND_SUSPEND_NOIRQ); 1220 - dpm_resume_noirq(resume_event(state)); 1221 - } else { 1222 - dpm_show_time(starttime, state, "noirq"); 1223 1211 } 1212 + dpm_show_time(starttime, state, error, "noirq"); 1224 1213 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false); 1225 1214 return error; 1215 + } 1216 + 1217 + /** 1218 + * dpm_suspend_noirq - Execute "noirq suspend" callbacks for all devices. 1219 + * @state: PM transition of the system being carried out. 1220 + * 1221 + * Prevent device drivers' interrupt handlers from being called and invoke 1222 + * "noirq" suspend callbacks for all non-sysdev devices. 1223 + */ 1224 + int dpm_suspend_noirq(pm_message_t state) 1225 + { 1226 + int ret; 1227 + 1228 + dpm_noirq_begin(); 1229 + ret = dpm_noirq_suspend_devices(state); 1230 + if (ret) 1231 + dpm_resume_noirq(resume_event(state)); 1232 + 1233 + return ret; 1226 1234 } 1227 1235 1228 1236 /** ··· 1376 1350 suspend_stats.failed_suspend_late++; 1377 1351 dpm_save_failed_step(SUSPEND_SUSPEND_LATE); 1378 1352 dpm_resume_early(resume_event(state)); 1379 - } else { 1380 - dpm_show_time(starttime, state, "late"); 1381 1353 } 1354 + dpm_show_time(starttime, state, error, "late"); 1382 1355 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false); 1383 1356 return error; 1384 1357 } ··· 1643 1618 if (error) { 1644 1619 suspend_stats.failed_suspend++; 1645 1620 dpm_save_failed_step(SUSPEND_SUSPEND); 1646 - } else 1647 - dpm_show_time(starttime, state, NULL); 1621 + } 1622 + dpm_show_time(starttime, state, error, NULL); 1648 1623 trace_suspend_resume(TPS("dpm_suspend"), state.event, false); 1649 1624 return error; 1650 1625 }
+1 -1
drivers/base/power/wakeup.c
··· 865 865 void pm_system_wakeup(void) 866 866 { 867 867 atomic_inc(&pm_abort_suspend); 868 - freeze_wake(); 868 + s2idle_wake(); 869 869 } 870 870 EXPORT_SYMBOL_GPL(pm_system_wakeup); 871 871
+9 -9
drivers/cpuidle/cpuidle.c
··· 77 77 struct cpuidle_device *dev, 78 78 unsigned int max_latency, 79 79 unsigned int forbidden_flags, 80 - bool freeze) 80 + bool s2idle) 81 81 { 82 82 unsigned int latency_req = 0; 83 83 int i, ret = 0; ··· 89 89 if (s->disabled || su->disable || s->exit_latency <= latency_req 90 90 || s->exit_latency > max_latency 91 91 || (s->flags & forbidden_flags) 92 - || (freeze && !s->enter_freeze)) 92 + || (s2idle && !s->enter_s2idle)) 93 93 continue; 94 94 95 95 latency_req = s->exit_latency; ··· 128 128 } 129 129 130 130 #ifdef CONFIG_SUSPEND 131 - static void enter_freeze_proper(struct cpuidle_driver *drv, 131 + static void enter_s2idle_proper(struct cpuidle_driver *drv, 132 132 struct cpuidle_device *dev, int index) 133 133 { 134 134 /* ··· 143 143 * suspended is generally unsafe. 144 144 */ 145 145 stop_critical_timings(); 146 - drv->states[index].enter_freeze(dev, drv, index); 146 + drv->states[index].enter_s2idle(dev, drv, index); 147 147 WARN_ON(!irqs_disabled()); 148 148 /* 149 149 * timekeeping_resume() that will be called by tick_unfreeze() for the ··· 155 155 } 156 156 157 157 /** 158 - * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle. 158 + * cpuidle_enter_s2idle - Enter an idle state suitable for suspend-to-idle. 159 159 * @drv: cpuidle driver for the given CPU. 160 160 * @dev: cpuidle device for the given CPU. 161 161 * 162 - * If there are states with the ->enter_freeze callback, find the deepest of 162 + * If there are states with the ->enter_s2idle callback, find the deepest of 163 163 * them and enter it with frozen tick. 164 164 */ 165 - int cpuidle_enter_freeze(struct cpuidle_driver *drv, struct cpuidle_device *dev) 165 + int cpuidle_enter_s2idle(struct cpuidle_driver *drv, struct cpuidle_device *dev) 166 166 { 167 167 int index; 168 168 169 169 /* 170 - * Find the deepest state with ->enter_freeze present, which guarantees 170 + * Find the deepest state with ->enter_s2idle present, which guarantees 171 171 * that interrupts won't be enabled when it exits and allows the tick to 172 172 * be frozen safely. 173 173 */ 174 174 index = find_deepest_state(drv, dev, UINT_MAX, 0, true); 175 175 if (index > 0) 176 - enter_freeze_proper(drv, dev, index); 176 + enter_s2idle_proper(drv, dev, index); 177 177 178 178 return index; 179 179 }
+2 -2
drivers/cpuidle/dt_idle_states.c
··· 41 41 /* 42 42 * Since this is not a "coupled" state, it's safe to assume interrupts 43 43 * won't be enabled when it exits allowing the tick to be frozen 44 - * safely. So enter() can be also enter_freeze() callback. 44 + * safely. So enter() can be also enter_s2idle() callback. 45 45 */ 46 - idle_state->enter_freeze = match_id->data; 46 + idle_state->enter_s2idle = match_id->data; 47 47 48 48 err = of_property_read_u32(state_node, "wakeup-latency-us", 49 49 &idle_state->exit_latency);
+90 -90
drivers/idle/intel_idle.c
··· 97 97 static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; 98 98 static int intel_idle(struct cpuidle_device *dev, 99 99 struct cpuidle_driver *drv, int index); 100 - static void intel_idle_freeze(struct cpuidle_device *dev, 100 + static void intel_idle_s2idle(struct cpuidle_device *dev, 101 101 struct cpuidle_driver *drv, int index); 102 102 static struct cpuidle_state *cpuidle_state_table; 103 103 ··· 132 132 .exit_latency = 3, 133 133 .target_residency = 6, 134 134 .enter = &intel_idle, 135 - .enter_freeze = intel_idle_freeze, }, 135 + .enter_s2idle = intel_idle_s2idle, }, 136 136 { 137 137 .name = "C1E", 138 138 .desc = "MWAIT 0x01", ··· 140 140 .exit_latency = 10, 141 141 .target_residency = 20, 142 142 .enter = &intel_idle, 143 - .enter_freeze = intel_idle_freeze, }, 143 + .enter_s2idle = intel_idle_s2idle, }, 144 144 { 145 145 .name = "C3", 146 146 .desc = "MWAIT 0x10", ··· 148 148 .exit_latency = 20, 149 149 .target_residency = 80, 150 150 .enter = &intel_idle, 151 - .enter_freeze = intel_idle_freeze, }, 151 + .enter_s2idle = intel_idle_s2idle, }, 152 152 { 153 153 .name = "C6", 154 154 .desc = "MWAIT 0x20", ··· 156 156 .exit_latency = 200, 157 157 .target_residency = 800, 158 158 .enter = &intel_idle, 159 - .enter_freeze = intel_idle_freeze, }, 159 + .enter_s2idle = intel_idle_s2idle, }, 160 160 { 161 161 .enter = NULL } 162 162 }; ··· 169 169 .exit_latency = 2, 170 170 .target_residency = 2, 171 171 .enter = &intel_idle, 172 - .enter_freeze = intel_idle_freeze, }, 172 + .enter_s2idle = intel_idle_s2idle, }, 173 173 { 174 174 .name = "C1E", 175 175 .desc = "MWAIT 0x01", ··· 177 177 .exit_latency = 10, 178 178 .target_residency = 20, 179 179 .enter = &intel_idle, 180 - .enter_freeze = intel_idle_freeze, }, 180 + .enter_s2idle = intel_idle_s2idle, }, 181 181 { 182 182 .name = "C3", 183 183 .desc = "MWAIT 0x10", ··· 185 185 .exit_latency = 80, 186 186 .target_residency = 211, 187 187 .enter = &intel_idle, 188 - .enter_freeze = intel_idle_freeze, }, 188 + .enter_s2idle = intel_idle_s2idle, }, 189 189 { 190 190 .name = "C6", 191 191 .desc = "MWAIT 0x20", ··· 193 193 .exit_latency = 104, 194 194 .target_residency = 345, 195 195 .enter = &intel_idle, 196 - .enter_freeze = intel_idle_freeze, }, 196 + .enter_s2idle = intel_idle_s2idle, }, 197 197 { 198 198 .name = "C7", 199 199 .desc = "MWAIT 0x30", ··· 201 201 .exit_latency = 109, 202 202 .target_residency = 345, 203 203 .enter = &intel_idle, 204 - .enter_freeze = intel_idle_freeze, }, 204 + .enter_s2idle = intel_idle_s2idle, }, 205 205 { 206 206 .enter = NULL } 207 207 }; ··· 214 214 .exit_latency = 1, 215 215 .target_residency = 1, 216 216 .enter = &intel_idle, 217 - .enter_freeze = intel_idle_freeze, }, 217 + .enter_s2idle = intel_idle_s2idle, }, 218 218 { 219 219 .name = "C6N", 220 220 .desc = "MWAIT 0x58", ··· 222 222 .exit_latency = 300, 223 223 .target_residency = 275, 224 224 .enter = &intel_idle, 225 - .enter_freeze = intel_idle_freeze, }, 225 + .enter_s2idle = intel_idle_s2idle, }, 226 226 { 227 227 .name = "C6S", 228 228 .desc = "MWAIT 0x52", ··· 230 230 .exit_latency = 500, 231 231 .target_residency = 560, 232 232 .enter = &intel_idle, 233 - .enter_freeze = intel_idle_freeze, }, 233 + .enter_s2idle = intel_idle_s2idle, }, 234 234 { 235 235 .name = "C7", 236 236 .desc = "MWAIT 0x60", ··· 238 238 .exit_latency = 1200, 239 239 .target_residency = 4000, 240 240 .enter = &intel_idle, 241 - .enter_freeze = intel_idle_freeze, }, 241 + .enter_s2idle = intel_idle_s2idle, }, 242 242 { 243 243 .name = "C7S", 244 244 .desc = "MWAIT 0x64", ··· 246 246 .exit_latency = 10000, 247 247 .target_residency = 20000, 248 248 .enter = &intel_idle, 249 - .enter_freeze = intel_idle_freeze, }, 249 + .enter_s2idle = intel_idle_s2idle, }, 250 250 { 251 251 .enter = NULL } 252 252 }; ··· 259 259 .exit_latency = 1, 260 260 .target_residency = 1, 261 261 .enter = &intel_idle, 262 - .enter_freeze = intel_idle_freeze, }, 262 + .enter_s2idle = intel_idle_s2idle, }, 263 263 { 264 264 .name = "C6N", 265 265 .desc = "MWAIT 0x58", ··· 267 267 .exit_latency = 80, 268 268 .target_residency = 275, 269 269 .enter = &intel_idle, 270 - .enter_freeze = intel_idle_freeze, }, 270 + .enter_s2idle = intel_idle_s2idle, }, 271 271 { 272 272 .name = "C6S", 273 273 .desc = "MWAIT 0x52", ··· 275 275 .exit_latency = 200, 276 276 .target_residency = 560, 277 277 .enter = &intel_idle, 278 - .enter_freeze = intel_idle_freeze, }, 278 + .enter_s2idle = intel_idle_s2idle, }, 279 279 { 280 280 .name = "C7", 281 281 .desc = "MWAIT 0x60", ··· 283 283 .exit_latency = 1200, 284 284 .target_residency = 4000, 285 285 .enter = &intel_idle, 286 - .enter_freeze = intel_idle_freeze, }, 286 + .enter_s2idle = intel_idle_s2idle, }, 287 287 { 288 288 .name = "C7S", 289 289 .desc = "MWAIT 0x64", ··· 291 291 .exit_latency = 10000, 292 292 .target_residency = 20000, 293 293 .enter = &intel_idle, 294 - .enter_freeze = intel_idle_freeze, }, 294 + .enter_s2idle = intel_idle_s2idle, }, 295 295 { 296 296 .enter = NULL } 297 297 }; ··· 304 304 .exit_latency = 1, 305 305 .target_residency = 1, 306 306 .enter = &intel_idle, 307 - .enter_freeze = intel_idle_freeze, }, 307 + .enter_s2idle = intel_idle_s2idle, }, 308 308 { 309 309 .name = "C1E", 310 310 .desc = "MWAIT 0x01", ··· 312 312 .exit_latency = 10, 313 313 .target_residency = 20, 314 314 .enter = &intel_idle, 315 - .enter_freeze = intel_idle_freeze, }, 315 + .enter_s2idle = intel_idle_s2idle, }, 316 316 { 317 317 .name = "C3", 318 318 .desc = "MWAIT 0x10", ··· 320 320 .exit_latency = 59, 321 321 .target_residency = 156, 322 322 .enter = &intel_idle, 323 - .enter_freeze = intel_idle_freeze, }, 323 + .enter_s2idle = intel_idle_s2idle, }, 324 324 { 325 325 .name = "C6", 326 326 .desc = "MWAIT 0x20", ··· 328 328 .exit_latency = 80, 329 329 .target_residency = 300, 330 330 .enter = &intel_idle, 331 - .enter_freeze = intel_idle_freeze, }, 331 + .enter_s2idle = intel_idle_s2idle, }, 332 332 { 333 333 .name = "C7", 334 334 .desc = "MWAIT 0x30", ··· 336 336 .exit_latency = 87, 337 337 .target_residency = 300, 338 338 .enter = &intel_idle, 339 - .enter_freeze = intel_idle_freeze, }, 339 + .enter_s2idle = intel_idle_s2idle, }, 340 340 { 341 341 .enter = NULL } 342 342 }; ··· 349 349 .exit_latency = 1, 350 350 .target_residency = 1, 351 351 .enter = &intel_idle, 352 - .enter_freeze = intel_idle_freeze, }, 352 + .enter_s2idle = intel_idle_s2idle, }, 353 353 { 354 354 .name = "C1E", 355 355 .desc = "MWAIT 0x01", ··· 357 357 .exit_latency = 10, 358 358 .target_residency = 80, 359 359 .enter = &intel_idle, 360 - .enter_freeze = intel_idle_freeze, }, 360 + .enter_s2idle = intel_idle_s2idle, }, 361 361 { 362 362 .name = "C3", 363 363 .desc = "MWAIT 0x10", ··· 365 365 .exit_latency = 59, 366 366 .target_residency = 156, 367 367 .enter = &intel_idle, 368 - .enter_freeze = intel_idle_freeze, }, 368 + .enter_s2idle = intel_idle_s2idle, }, 369 369 { 370 370 .name = "C6", 371 371 .desc = "MWAIT 0x20", ··· 373 373 .exit_latency = 82, 374 374 .target_residency = 300, 375 375 .enter = &intel_idle, 376 - .enter_freeze = intel_idle_freeze, }, 376 + .enter_s2idle = intel_idle_s2idle, }, 377 377 { 378 378 .enter = NULL } 379 379 }; ··· 386 386 .exit_latency = 1, 387 387 .target_residency = 1, 388 388 .enter = &intel_idle, 389 - .enter_freeze = intel_idle_freeze, }, 389 + .enter_s2idle = intel_idle_s2idle, }, 390 390 { 391 391 .name = "C1E", 392 392 .desc = "MWAIT 0x01", ··· 394 394 .exit_latency = 10, 395 395 .target_residency = 250, 396 396 .enter = &intel_idle, 397 - .enter_freeze = intel_idle_freeze, }, 397 + .enter_s2idle = intel_idle_s2idle, }, 398 398 { 399 399 .name = "C3", 400 400 .desc = "MWAIT 0x10", ··· 402 402 .exit_latency = 59, 403 403 .target_residency = 300, 404 404 .enter = &intel_idle, 405 - .enter_freeze = intel_idle_freeze, }, 405 + .enter_s2idle = intel_idle_s2idle, }, 406 406 { 407 407 .name = "C6", 408 408 .desc = "MWAIT 0x20", ··· 410 410 .exit_latency = 84, 411 411 .target_residency = 400, 412 412 .enter = &intel_idle, 413 - .enter_freeze = intel_idle_freeze, }, 413 + .enter_s2idle = intel_idle_s2idle, }, 414 414 { 415 415 .enter = NULL } 416 416 }; ··· 423 423 .exit_latency = 1, 424 424 .target_residency = 1, 425 425 .enter = &intel_idle, 426 - .enter_freeze = intel_idle_freeze, }, 426 + .enter_s2idle = intel_idle_s2idle, }, 427 427 { 428 428 .name = "C1E", 429 429 .desc = "MWAIT 0x01", ··· 431 431 .exit_latency = 10, 432 432 .target_residency = 500, 433 433 .enter = &intel_idle, 434 - .enter_freeze = intel_idle_freeze, }, 434 + .enter_s2idle = intel_idle_s2idle, }, 435 435 { 436 436 .name = "C3", 437 437 .desc = "MWAIT 0x10", ··· 439 439 .exit_latency = 59, 440 440 .target_residency = 600, 441 441 .enter = &intel_idle, 442 - .enter_freeze = intel_idle_freeze, }, 442 + .enter_s2idle = intel_idle_s2idle, }, 443 443 { 444 444 .name = "C6", 445 445 .desc = "MWAIT 0x20", ··· 447 447 .exit_latency = 88, 448 448 .target_residency = 700, 449 449 .enter = &intel_idle, 450 - .enter_freeze = intel_idle_freeze, }, 450 + .enter_s2idle = intel_idle_s2idle, }, 451 451 { 452 452 .enter = NULL } 453 453 }; ··· 460 460 .exit_latency = 2, 461 461 .target_residency = 2, 462 462 .enter = &intel_idle, 463 - .enter_freeze = intel_idle_freeze, }, 463 + .enter_s2idle = intel_idle_s2idle, }, 464 464 { 465 465 .name = "C1E", 466 466 .desc = "MWAIT 0x01", ··· 468 468 .exit_latency = 10, 469 469 .target_residency = 20, 470 470 .enter = &intel_idle, 471 - .enter_freeze = intel_idle_freeze, }, 471 + .enter_s2idle = intel_idle_s2idle, }, 472 472 { 473 473 .name = "C3", 474 474 .desc = "MWAIT 0x10", ··· 476 476 .exit_latency = 33, 477 477 .target_residency = 100, 478 478 .enter = &intel_idle, 479 - .enter_freeze = intel_idle_freeze, }, 479 + .enter_s2idle = intel_idle_s2idle, }, 480 480 { 481 481 .name = "C6", 482 482 .desc = "MWAIT 0x20", ··· 484 484 .exit_latency = 133, 485 485 .target_residency = 400, 486 486 .enter = &intel_idle, 487 - .enter_freeze = intel_idle_freeze, }, 487 + .enter_s2idle = intel_idle_s2idle, }, 488 488 { 489 489 .name = "C7s", 490 490 .desc = "MWAIT 0x32", ··· 492 492 .exit_latency = 166, 493 493 .target_residency = 500, 494 494 .enter = &intel_idle, 495 - .enter_freeze = intel_idle_freeze, }, 495 + .enter_s2idle = intel_idle_s2idle, }, 496 496 { 497 497 .name = "C8", 498 498 .desc = "MWAIT 0x40", ··· 500 500 .exit_latency = 300, 501 501 .target_residency = 900, 502 502 .enter = &intel_idle, 503 - .enter_freeze = intel_idle_freeze, }, 503 + .enter_s2idle = intel_idle_s2idle, }, 504 504 { 505 505 .name = "C9", 506 506 .desc = "MWAIT 0x50", ··· 508 508 .exit_latency = 600, 509 509 .target_residency = 1800, 510 510 .enter = &intel_idle, 511 - .enter_freeze = intel_idle_freeze, }, 511 + .enter_s2idle = intel_idle_s2idle, }, 512 512 { 513 513 .name = "C10", 514 514 .desc = "MWAIT 0x60", ··· 516 516 .exit_latency = 2600, 517 517 .target_residency = 7700, 518 518 .enter = &intel_idle, 519 - .enter_freeze = intel_idle_freeze, }, 519 + .enter_s2idle = intel_idle_s2idle, }, 520 520 { 521 521 .enter = NULL } 522 522 }; ··· 528 528 .exit_latency = 2, 529 529 .target_residency = 2, 530 530 .enter = &intel_idle, 531 - .enter_freeze = intel_idle_freeze, }, 531 + .enter_s2idle = intel_idle_s2idle, }, 532 532 { 533 533 .name = "C1E", 534 534 .desc = "MWAIT 0x01", ··· 536 536 .exit_latency = 10, 537 537 .target_residency = 20, 538 538 .enter = &intel_idle, 539 - .enter_freeze = intel_idle_freeze, }, 539 + .enter_s2idle = intel_idle_s2idle, }, 540 540 { 541 541 .name = "C3", 542 542 .desc = "MWAIT 0x10", ··· 544 544 .exit_latency = 40, 545 545 .target_residency = 100, 546 546 .enter = &intel_idle, 547 - .enter_freeze = intel_idle_freeze, }, 547 + .enter_s2idle = intel_idle_s2idle, }, 548 548 { 549 549 .name = "C6", 550 550 .desc = "MWAIT 0x20", ··· 552 552 .exit_latency = 133, 553 553 .target_residency = 400, 554 554 .enter = &intel_idle, 555 - .enter_freeze = intel_idle_freeze, }, 555 + .enter_s2idle = intel_idle_s2idle, }, 556 556 { 557 557 .name = "C7s", 558 558 .desc = "MWAIT 0x32", ··· 560 560 .exit_latency = 166, 561 561 .target_residency = 500, 562 562 .enter = &intel_idle, 563 - .enter_freeze = intel_idle_freeze, }, 563 + .enter_s2idle = intel_idle_s2idle, }, 564 564 { 565 565 .name = "C8", 566 566 .desc = "MWAIT 0x40", ··· 568 568 .exit_latency = 300, 569 569 .target_residency = 900, 570 570 .enter = &intel_idle, 571 - .enter_freeze = intel_idle_freeze, }, 571 + .enter_s2idle = intel_idle_s2idle, }, 572 572 { 573 573 .name = "C9", 574 574 .desc = "MWAIT 0x50", ··· 576 576 .exit_latency = 600, 577 577 .target_residency = 1800, 578 578 .enter = &intel_idle, 579 - .enter_freeze = intel_idle_freeze, }, 579 + .enter_s2idle = intel_idle_s2idle, }, 580 580 { 581 581 .name = "C10", 582 582 .desc = "MWAIT 0x60", ··· 584 584 .exit_latency = 2600, 585 585 .target_residency = 7700, 586 586 .enter = &intel_idle, 587 - .enter_freeze = intel_idle_freeze, }, 587 + .enter_s2idle = intel_idle_s2idle, }, 588 588 { 589 589 .enter = NULL } 590 590 }; ··· 597 597 .exit_latency = 2, 598 598 .target_residency = 2, 599 599 .enter = &intel_idle, 600 - .enter_freeze = intel_idle_freeze, }, 600 + .enter_s2idle = intel_idle_s2idle, }, 601 601 { 602 602 .name = "C1E", 603 603 .desc = "MWAIT 0x01", ··· 605 605 .exit_latency = 10, 606 606 .target_residency = 20, 607 607 .enter = &intel_idle, 608 - .enter_freeze = intel_idle_freeze, }, 608 + .enter_s2idle = intel_idle_s2idle, }, 609 609 { 610 610 .name = "C3", 611 611 .desc = "MWAIT 0x10", ··· 613 613 .exit_latency = 70, 614 614 .target_residency = 100, 615 615 .enter = &intel_idle, 616 - .enter_freeze = intel_idle_freeze, }, 616 + .enter_s2idle = intel_idle_s2idle, }, 617 617 { 618 618 .name = "C6", 619 619 .desc = "MWAIT 0x20", ··· 621 621 .exit_latency = 85, 622 622 .target_residency = 200, 623 623 .enter = &intel_idle, 624 - .enter_freeze = intel_idle_freeze, }, 624 + .enter_s2idle = intel_idle_s2idle, }, 625 625 { 626 626 .name = "C7s", 627 627 .desc = "MWAIT 0x33", ··· 629 629 .exit_latency = 124, 630 630 .target_residency = 800, 631 631 .enter = &intel_idle, 632 - .enter_freeze = intel_idle_freeze, }, 632 + .enter_s2idle = intel_idle_s2idle, }, 633 633 { 634 634 .name = "C8", 635 635 .desc = "MWAIT 0x40", ··· 637 637 .exit_latency = 200, 638 638 .target_residency = 800, 639 639 .enter = &intel_idle, 640 - .enter_freeze = intel_idle_freeze, }, 640 + .enter_s2idle = intel_idle_s2idle, }, 641 641 { 642 642 .name = "C9", 643 643 .desc = "MWAIT 0x50", ··· 645 645 .exit_latency = 480, 646 646 .target_residency = 5000, 647 647 .enter = &intel_idle, 648 - .enter_freeze = intel_idle_freeze, }, 648 + .enter_s2idle = intel_idle_s2idle, }, 649 649 { 650 650 .name = "C10", 651 651 .desc = "MWAIT 0x60", ··· 653 653 .exit_latency = 890, 654 654 .target_residency = 5000, 655 655 .enter = &intel_idle, 656 - .enter_freeze = intel_idle_freeze, }, 656 + .enter_s2idle = intel_idle_s2idle, }, 657 657 { 658 658 .enter = NULL } 659 659 }; ··· 666 666 .exit_latency = 2, 667 667 .target_residency = 2, 668 668 .enter = &intel_idle, 669 - .enter_freeze = intel_idle_freeze, }, 669 + .enter_s2idle = intel_idle_s2idle, }, 670 670 { 671 671 .name = "C1E", 672 672 .desc = "MWAIT 0x01", ··· 674 674 .exit_latency = 10, 675 675 .target_residency = 20, 676 676 .enter = &intel_idle, 677 - .enter_freeze = intel_idle_freeze, }, 677 + .enter_s2idle = intel_idle_s2idle, }, 678 678 { 679 679 .name = "C6", 680 680 .desc = "MWAIT 0x20", ··· 682 682 .exit_latency = 133, 683 683 .target_residency = 600, 684 684 .enter = &intel_idle, 685 - .enter_freeze = intel_idle_freeze, }, 685 + .enter_s2idle = intel_idle_s2idle, }, 686 686 { 687 687 .enter = NULL } 688 688 }; ··· 695 695 .exit_latency = 10, 696 696 .target_residency = 20, 697 697 .enter = &intel_idle, 698 - .enter_freeze = intel_idle_freeze, }, 698 + .enter_s2idle = intel_idle_s2idle, }, 699 699 { 700 700 .name = "C2", 701 701 .desc = "MWAIT 0x10", ··· 703 703 .exit_latency = 20, 704 704 .target_residency = 80, 705 705 .enter = &intel_idle, 706 - .enter_freeze = intel_idle_freeze, }, 706 + .enter_s2idle = intel_idle_s2idle, }, 707 707 { 708 708 .name = "C4", 709 709 .desc = "MWAIT 0x30", ··· 711 711 .exit_latency = 100, 712 712 .target_residency = 400, 713 713 .enter = &intel_idle, 714 - .enter_freeze = intel_idle_freeze, }, 714 + .enter_s2idle = intel_idle_s2idle, }, 715 715 { 716 716 .name = "C6", 717 717 .desc = "MWAIT 0x52", ··· 719 719 .exit_latency = 140, 720 720 .target_residency = 560, 721 721 .enter = &intel_idle, 722 - .enter_freeze = intel_idle_freeze, }, 722 + .enter_s2idle = intel_idle_s2idle, }, 723 723 { 724 724 .enter = NULL } 725 725 }; ··· 731 731 .exit_latency = 1, 732 732 .target_residency = 4, 733 733 .enter = &intel_idle, 734 - .enter_freeze = intel_idle_freeze, }, 734 + .enter_s2idle = intel_idle_s2idle, }, 735 735 { 736 736 .name = "C4", 737 737 .desc = "MWAIT 0x30", ··· 739 739 .exit_latency = 100, 740 740 .target_residency = 400, 741 741 .enter = &intel_idle, 742 - .enter_freeze = intel_idle_freeze, }, 742 + .enter_s2idle = intel_idle_s2idle, }, 743 743 { 744 744 .name = "C6", 745 745 .desc = "MWAIT 0x52", ··· 747 747 .exit_latency = 140, 748 748 .target_residency = 560, 749 749 .enter = &intel_idle, 750 - .enter_freeze = intel_idle_freeze, }, 750 + .enter_s2idle = intel_idle_s2idle, }, 751 751 { 752 752 .name = "C7", 753 753 .desc = "MWAIT 0x60", ··· 755 755 .exit_latency = 1200, 756 756 .target_residency = 4000, 757 757 .enter = &intel_idle, 758 - .enter_freeze = intel_idle_freeze, }, 758 + .enter_s2idle = intel_idle_s2idle, }, 759 759 { 760 760 .name = "C9", 761 761 .desc = "MWAIT 0x64", ··· 763 763 .exit_latency = 10000, 764 764 .target_residency = 20000, 765 765 .enter = &intel_idle, 766 - .enter_freeze = intel_idle_freeze, }, 766 + .enter_s2idle = intel_idle_s2idle, }, 767 767 { 768 768 .enter = NULL } 769 769 }; ··· 775 775 .exit_latency = 2, 776 776 .target_residency = 2, 777 777 .enter = &intel_idle, 778 - .enter_freeze = intel_idle_freeze, }, 778 + .enter_s2idle = intel_idle_s2idle, }, 779 779 { 780 780 .name = "C6", 781 781 .desc = "MWAIT 0x51", ··· 783 783 .exit_latency = 15, 784 784 .target_residency = 45, 785 785 .enter = &intel_idle, 786 - .enter_freeze = intel_idle_freeze, }, 786 + .enter_s2idle = intel_idle_s2idle, }, 787 787 { 788 788 .enter = NULL } 789 789 }; ··· 795 795 .exit_latency = 1, 796 796 .target_residency = 2, 797 797 .enter = &intel_idle, 798 - .enter_freeze = intel_idle_freeze }, 798 + .enter_s2idle = intel_idle_s2idle }, 799 799 { 800 800 .name = "C6", 801 801 .desc = "MWAIT 0x10", ··· 803 803 .exit_latency = 120, 804 804 .target_residency = 500, 805 805 .enter = &intel_idle, 806 - .enter_freeze = intel_idle_freeze }, 806 + .enter_s2idle = intel_idle_s2idle }, 807 807 { 808 808 .enter = NULL } 809 809 }; ··· 816 816 .exit_latency = 2, 817 817 .target_residency = 2, 818 818 .enter = &intel_idle, 819 - .enter_freeze = intel_idle_freeze, }, 819 + .enter_s2idle = intel_idle_s2idle, }, 820 820 { 821 821 .name = "C1E", 822 822 .desc = "MWAIT 0x01", ··· 824 824 .exit_latency = 10, 825 825 .target_residency = 20, 826 826 .enter = &intel_idle, 827 - .enter_freeze = intel_idle_freeze, }, 827 + .enter_s2idle = intel_idle_s2idle, }, 828 828 { 829 829 .name = "C6", 830 830 .desc = "MWAIT 0x20", ··· 832 832 .exit_latency = 133, 833 833 .target_residency = 133, 834 834 .enter = &intel_idle, 835 - .enter_freeze = intel_idle_freeze, }, 835 + .enter_s2idle = intel_idle_s2idle, }, 836 836 { 837 837 .name = "C7s", 838 838 .desc = "MWAIT 0x31", ··· 840 840 .exit_latency = 155, 841 841 .target_residency = 155, 842 842 .enter = &intel_idle, 843 - .enter_freeze = intel_idle_freeze, }, 843 + .enter_s2idle = intel_idle_s2idle, }, 844 844 { 845 845 .name = "C8", 846 846 .desc = "MWAIT 0x40", ··· 848 848 .exit_latency = 1000, 849 849 .target_residency = 1000, 850 850 .enter = &intel_idle, 851 - .enter_freeze = intel_idle_freeze, }, 851 + .enter_s2idle = intel_idle_s2idle, }, 852 852 { 853 853 .name = "C9", 854 854 .desc = "MWAIT 0x50", ··· 856 856 .exit_latency = 2000, 857 857 .target_residency = 2000, 858 858 .enter = &intel_idle, 859 - .enter_freeze = intel_idle_freeze, }, 859 + .enter_s2idle = intel_idle_s2idle, }, 860 860 { 861 861 .name = "C10", 862 862 .desc = "MWAIT 0x60", ··· 864 864 .exit_latency = 10000, 865 865 .target_residency = 10000, 866 866 .enter = &intel_idle, 867 - .enter_freeze = intel_idle_freeze, }, 867 + .enter_s2idle = intel_idle_s2idle, }, 868 868 { 869 869 .enter = NULL } 870 870 }; ··· 877 877 .exit_latency = 2, 878 878 .target_residency = 2, 879 879 .enter = &intel_idle, 880 - .enter_freeze = intel_idle_freeze, }, 880 + .enter_s2idle = intel_idle_s2idle, }, 881 881 { 882 882 .name = "C1E", 883 883 .desc = "MWAIT 0x01", ··· 885 885 .exit_latency = 10, 886 886 .target_residency = 20, 887 887 .enter = &intel_idle, 888 - .enter_freeze = intel_idle_freeze, }, 888 + .enter_s2idle = intel_idle_s2idle, }, 889 889 { 890 890 .name = "C6", 891 891 .desc = "MWAIT 0x20", ··· 893 893 .exit_latency = 50, 894 894 .target_residency = 500, 895 895 .enter = &intel_idle, 896 - .enter_freeze = intel_idle_freeze, }, 896 + .enter_s2idle = intel_idle_s2idle, }, 897 897 { 898 898 .enter = NULL } 899 899 }; ··· 936 936 } 937 937 938 938 /** 939 - * intel_idle_freeze - simplified "enter" callback routine for suspend-to-idle 939 + * intel_idle_s2idle - simplified "enter" callback routine for suspend-to-idle 940 940 * @dev: cpuidle_device 941 941 * @drv: cpuidle driver 942 942 * @index: state index 943 943 */ 944 - static void intel_idle_freeze(struct cpuidle_device *dev, 944 + static void intel_idle_s2idle(struct cpuidle_device *dev, 945 945 struct cpuidle_driver *drv, int index) 946 946 { 947 947 unsigned long ecx = 1; /* break on interrupt flag */ ··· 1338 1338 int num_substates, mwait_hint, mwait_cstate; 1339 1339 1340 1340 if ((cpuidle_state_table[cstate].enter == NULL) && 1341 - (cpuidle_state_table[cstate].enter_freeze == NULL)) 1341 + (cpuidle_state_table[cstate].enter_s2idle == NULL)) 1342 1342 break; 1343 1343 1344 1344 if (cstate + 1 > max_cstate) {
+14 -3
drivers/platform/x86/intel-hid.c
··· 203 203 acpi_status status; 204 204 205 205 if (priv->wakeup_mode) { 206 + /* 207 + * Needed for wakeup from suspend-to-idle to work on some 208 + * platforms that don't expose the 5-button array, but still 209 + * send notifies with the power button event code to this 210 + * device object on power button actions while suspended. 211 + */ 212 + if (event == 0xce) 213 + goto wakeup; 214 + 206 215 /* Wake up on 5-button array events only. */ 207 216 if (event == 0xc0 || !priv->array) 208 217 return; 209 218 210 - if (sparse_keymap_entry_from_scancode(priv->array, event)) 211 - pm_wakeup_hard_event(&device->dev); 212 - else 219 + if (!sparse_keymap_entry_from_scancode(priv->array, event)) { 213 220 dev_info(&device->dev, "unknown event 0x%x\n", event); 221 + return; 222 + } 214 223 224 + wakeup: 225 + pm_wakeup_hard_event(&device->dev); 215 226 return; 216 227 } 217 228
+1 -1
drivers/regulator/of_regulator.c
··· 150 150 suspend_state = &constraints->state_disk; 151 151 break; 152 152 case PM_SUSPEND_ON: 153 - case PM_SUSPEND_FREEZE: 153 + case PM_SUSPEND_TO_IDLE: 154 154 case PM_SUSPEND_STANDBY: 155 155 default: 156 156 continue;
+4 -4
include/linux/cpuidle.h
··· 52 52 int (*enter_dead) (struct cpuidle_device *dev, int index); 53 53 54 54 /* 55 - * CPUs execute ->enter_freeze with the local tick or entire timekeeping 55 + * CPUs execute ->enter_s2idle with the local tick or entire timekeeping 56 56 * suspended, so it must not re-enable interrupts at any point (even 57 57 * temporarily) or attempt to change states of clock event devices. 58 58 */ 59 - void (*enter_freeze) (struct cpuidle_device *dev, 59 + void (*enter_s2idle) (struct cpuidle_device *dev, 60 60 struct cpuidle_driver *drv, 61 61 int index); 62 62 }; ··· 198 198 #ifdef CONFIG_CPU_IDLE 199 199 extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv, 200 200 struct cpuidle_device *dev); 201 - extern int cpuidle_enter_freeze(struct cpuidle_driver *drv, 201 + extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv, 202 202 struct cpuidle_device *dev); 203 203 extern void cpuidle_use_deepest_state(bool enable); 204 204 #else 205 205 static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv, 206 206 struct cpuidle_device *dev) 207 207 {return -ENODEV; } 208 - static inline int cpuidle_enter_freeze(struct cpuidle_driver *drv, 208 + static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv, 209 209 struct cpuidle_device *dev) 210 210 {return -ENODEV; } 211 211 static inline void cpuidle_use_deepest_state(bool enable)
+4
include/linux/pm.h
··· 689 689 extern void device_pm_lock(void); 690 690 extern void dpm_resume_start(pm_message_t state); 691 691 extern void dpm_resume_end(pm_message_t state); 692 + extern void dpm_noirq_resume_devices(pm_message_t state); 693 + extern void dpm_noirq_end(void); 692 694 extern void dpm_resume_noirq(pm_message_t state); 693 695 extern void dpm_resume_early(pm_message_t state); 694 696 extern void dpm_resume(pm_message_t state); ··· 699 697 extern void device_pm_unlock(void); 700 698 extern int dpm_suspend_end(pm_message_t state); 701 699 extern int dpm_suspend_start(pm_message_t state); 700 + extern void dpm_noirq_begin(void); 701 + extern int dpm_noirq_suspend_devices(pm_message_t state); 702 702 extern int dpm_suspend_noirq(pm_message_t state); 703 703 extern int dpm_suspend_late(pm_message_t state); 704 704 extern int dpm_suspend(pm_message_t state);
+33 -15
include/linux/suspend.h
··· 33 33 typedef int __bitwise suspend_state_t; 34 34 35 35 #define PM_SUSPEND_ON ((__force suspend_state_t) 0) 36 - #define PM_SUSPEND_FREEZE ((__force suspend_state_t) 1) 36 + #define PM_SUSPEND_TO_IDLE ((__force suspend_state_t) 1) 37 37 #define PM_SUSPEND_STANDBY ((__force suspend_state_t) 2) 38 38 #define PM_SUSPEND_MEM ((__force suspend_state_t) 3) 39 - #define PM_SUSPEND_MIN PM_SUSPEND_FREEZE 39 + #define PM_SUSPEND_MIN PM_SUSPEND_TO_IDLE 40 40 #define PM_SUSPEND_MAX ((__force suspend_state_t) 4) 41 41 42 42 enum suspend_stat_step { ··· 186 186 void (*recover)(void); 187 187 }; 188 188 189 - struct platform_freeze_ops { 189 + struct platform_s2idle_ops { 190 190 int (*begin)(void); 191 191 int (*prepare)(void); 192 192 void (*wake)(void); ··· 196 196 }; 197 197 198 198 #ifdef CONFIG_SUSPEND 199 + extern suspend_state_t mem_sleep_current; 200 + extern suspend_state_t mem_sleep_default; 201 + 199 202 /** 200 203 * suspend_set_ops - set platform dependent suspend operations 201 204 * @ops: The new suspend operations to set. ··· 237 234 } 238 235 239 236 /* Suspend-to-idle state machnine. */ 240 - enum freeze_state { 241 - FREEZE_STATE_NONE, /* Not suspended/suspending. */ 242 - FREEZE_STATE_ENTER, /* Enter suspend-to-idle. */ 243 - FREEZE_STATE_WAKE, /* Wake up from suspend-to-idle. */ 237 + enum s2idle_states { 238 + S2IDLE_STATE_NONE, /* Not suspended/suspending. */ 239 + S2IDLE_STATE_ENTER, /* Enter suspend-to-idle. */ 240 + S2IDLE_STATE_WAKE, /* Wake up from suspend-to-idle. */ 244 241 }; 245 242 246 - extern enum freeze_state __read_mostly suspend_freeze_state; 243 + extern enum s2idle_states __read_mostly s2idle_state; 247 244 248 - static inline bool idle_should_freeze(void) 245 + static inline bool idle_should_enter_s2idle(void) 249 246 { 250 - return unlikely(suspend_freeze_state == FREEZE_STATE_ENTER); 247 + return unlikely(s2idle_state == S2IDLE_STATE_ENTER); 251 248 } 252 249 253 250 extern void __init pm_states_init(void); 254 - extern void freeze_set_ops(const struct platform_freeze_ops *ops); 255 - extern void freeze_wake(void); 251 + extern void s2idle_set_ops(const struct platform_s2idle_ops *ops); 252 + extern void s2idle_wake(void); 256 253 257 254 /** 258 255 * arch_suspend_disable_irqs - disable IRQs for suspend ··· 284 281 285 282 static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} 286 283 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } 287 - static inline bool idle_should_freeze(void) { return false; } 284 + static inline bool idle_should_enter_s2idle(void) { return false; } 288 285 static inline void __init pm_states_init(void) {} 289 - static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {} 290 - static inline void freeze_wake(void) {} 286 + static inline void s2idle_set_ops(const struct platform_s2idle_ops *ops) {} 287 + static inline void s2idle_wake(void) {} 291 288 #endif /* !CONFIG_SUSPEND */ 292 289 293 290 /* struct pbe is used for creating lists of pages that should be restored ··· 430 427 /* drivers/base/power/wakeup.c */ 431 428 extern bool events_check_enabled; 432 429 extern unsigned int pm_wakeup_irq; 430 + extern suspend_state_t pm_suspend_target_state; 433 431 434 432 extern bool pm_wakeup_pending(void); 435 433 extern void pm_system_wakeup(void); ··· 495 491 496 492 #ifdef CONFIG_PM_SLEEP_DEBUG 497 493 extern bool pm_print_times_enabled; 494 + extern bool pm_debug_messages_on; 495 + extern __printf(2, 3) void __pm_pr_dbg(bool defer, const char *fmt, ...); 498 496 #else 499 497 #define pm_print_times_enabled (false) 498 + #define pm_debug_messages_on (false) 499 + 500 + #include <linux/printk.h> 501 + 502 + #define __pm_pr_dbg(defer, fmt, ...) \ 503 + no_printk(KERN_DEBUG fmt, ##__VA_ARGS__) 500 504 #endif 505 + 506 + #define pm_pr_dbg(fmt, ...) \ 507 + __pm_pr_dbg(false, fmt, ##__VA_ARGS__) 508 + 509 + #define pm_deferred_pr_dbg(fmt, ...) \ 510 + __pm_pr_dbg(true, fmt, ##__VA_ARGS__) 501 511 502 512 #ifdef CONFIG_PM_AUTOSLEEP 503 513
+17 -12
kernel/power/hibernate.c
··· 651 651 int error; 652 652 unsigned int flags; 653 653 654 - pr_debug("Loading hibernation image.\n"); 654 + pm_pr_dbg("Loading hibernation image.\n"); 655 655 656 656 lock_device_hotplug(); 657 657 error = create_basic_memory_bitmaps(); ··· 681 681 bool snapshot_test = false; 682 682 683 683 if (!hibernation_available()) { 684 - pr_debug("Hibernation not available.\n"); 684 + pm_pr_dbg("Hibernation not available.\n"); 685 685 return -EPERM; 686 686 } 687 687 ··· 692 692 goto Unlock; 693 693 } 694 694 695 + pr_info("hibernation entry\n"); 695 696 pm_prepare_console(); 696 697 error = __pm_notifier_call_chain(PM_HIBERNATION_PREPARE, -1, &nr_calls); 697 698 if (error) { ··· 728 727 else 729 728 flags |= SF_CRC32_MODE; 730 729 731 - pr_debug("Writing image.\n"); 730 + pm_pr_dbg("Writing image.\n"); 732 731 error = swsusp_write(flags); 733 732 swsusp_free(); 734 733 if (!error) { ··· 740 739 in_suspend = 0; 741 740 pm_restore_gfp_mask(); 742 741 } else { 743 - pr_debug("Image restored successfully.\n"); 742 + pm_pr_dbg("Image restored successfully.\n"); 744 743 } 745 744 746 745 Free_bitmaps: ··· 748 747 Thaw: 749 748 unlock_device_hotplug(); 750 749 if (snapshot_test) { 751 - pr_debug("Checking hibernation image\n"); 750 + pm_pr_dbg("Checking hibernation image\n"); 752 751 error = swsusp_check(); 753 752 if (!error) 754 753 error = load_image_and_restore(); ··· 763 762 atomic_inc(&snapshot_device_available); 764 763 Unlock: 765 764 unlock_system_sleep(); 765 + pr_info("hibernation exit\n"); 766 + 766 767 return error; 767 768 } 768 769 ··· 814 811 goto Unlock; 815 812 } 816 813 817 - pr_debug("Checking hibernation image partition %s\n", resume_file); 814 + pm_pr_dbg("Checking hibernation image partition %s\n", resume_file); 818 815 819 816 if (resume_delay) { 820 817 pr_info("Waiting %dsec before reading resume device ...\n", ··· 856 853 } 857 854 858 855 Check_image: 859 - pr_debug("Hibernation image partition %d:%d present\n", 856 + pm_pr_dbg("Hibernation image partition %d:%d present\n", 860 857 MAJOR(swsusp_resume_device), MINOR(swsusp_resume_device)); 861 858 862 - pr_debug("Looking for hibernation image.\n"); 859 + pm_pr_dbg("Looking for hibernation image.\n"); 863 860 error = swsusp_check(); 864 861 if (error) 865 862 goto Unlock; ··· 871 868 goto Unlock; 872 869 } 873 870 871 + pr_info("resume from hibernation\n"); 874 872 pm_prepare_console(); 875 873 error = __pm_notifier_call_chain(PM_RESTORE_PREPARE, -1, &nr_calls); 876 874 if (error) { ··· 879 875 goto Close_Finish; 880 876 } 881 877 882 - pr_debug("Preparing processes for restore.\n"); 878 + pm_pr_dbg("Preparing processes for restore.\n"); 883 879 error = freeze_processes(); 884 880 if (error) 885 881 goto Close_Finish; ··· 888 884 Finish: 889 885 __pm_notifier_call_chain(PM_POST_RESTORE, nr_calls, NULL); 890 886 pm_restore_console(); 887 + pr_info("resume from hibernation failed (%d)\n", error); 891 888 atomic_inc(&snapshot_device_available); 892 889 /* For success case, the suspend path will release the lock */ 893 890 Unlock: 894 891 mutex_unlock(&pm_mutex); 895 - pr_debug("Hibernation image not present or could not be loaded.\n"); 892 + pm_pr_dbg("Hibernation image not present or could not be loaded.\n"); 896 893 return error; 897 894 Close_Finish: 898 895 swsusp_close(FMODE_READ); ··· 1017 1012 error = -EINVAL; 1018 1013 1019 1014 if (!error) 1020 - pr_debug("Hibernation mode set to '%s'\n", 1021 - hibernation_modes[mode]); 1015 + pm_pr_dbg("Hibernation mode set to '%s'\n", 1016 + hibernation_modes[mode]); 1022 1017 unlock_system_sleep(); 1023 1018 return error ? error : n; 1024 1019 }
+59 -5
kernel/power/main.c
··· 150 150 power_attr(mem_sleep); 151 151 #endif /* CONFIG_SUSPEND */ 152 152 153 - #ifdef CONFIG_PM_DEBUG 153 + #ifdef CONFIG_PM_SLEEP_DEBUG 154 154 int pm_test_level = TEST_NONE; 155 155 156 156 static const char * const pm_tests[__TEST_AFTER_LAST] = { ··· 211 211 } 212 212 213 213 power_attr(pm_test); 214 - #endif /* CONFIG_PM_DEBUG */ 214 + #endif /* CONFIG_PM_SLEEP_DEBUG */ 215 215 216 216 #ifdef CONFIG_DEBUG_FS 217 217 static char *suspend_step_name(enum suspend_stat_step step) ··· 360 360 } 361 361 362 362 power_attr_ro(pm_wakeup_irq); 363 + 364 + bool pm_debug_messages_on __read_mostly; 365 + 366 + static ssize_t pm_debug_messages_show(struct kobject *kobj, 367 + struct kobj_attribute *attr, char *buf) 368 + { 369 + return sprintf(buf, "%d\n", pm_debug_messages_on); 370 + } 371 + 372 + static ssize_t pm_debug_messages_store(struct kobject *kobj, 373 + struct kobj_attribute *attr, 374 + const char *buf, size_t n) 375 + { 376 + unsigned long val; 377 + 378 + if (kstrtoul(buf, 10, &val)) 379 + return -EINVAL; 380 + 381 + if (val > 1) 382 + return -EINVAL; 383 + 384 + pm_debug_messages_on = !!val; 385 + return n; 386 + } 387 + 388 + power_attr(pm_debug_messages); 389 + 390 + /** 391 + * __pm_pr_dbg - Print a suspend debug message to the kernel log. 392 + * @defer: Whether or not to use printk_deferred() to print the message. 393 + * @fmt: Message format. 394 + * 395 + * The message will be emitted if enabled through the pm_debug_messages 396 + * sysfs attribute. 397 + */ 398 + void __pm_pr_dbg(bool defer, const char *fmt, ...) 399 + { 400 + struct va_format vaf; 401 + va_list args; 402 + 403 + if (!pm_debug_messages_on) 404 + return; 405 + 406 + va_start(args, fmt); 407 + 408 + vaf.fmt = fmt; 409 + vaf.va = &args; 410 + 411 + if (defer) 412 + printk_deferred(KERN_DEBUG "PM: %pV", &vaf); 413 + else 414 + printk(KERN_DEBUG "PM: %pV", &vaf); 415 + 416 + va_end(args); 417 + } 363 418 364 419 #else /* !CONFIG_PM_SLEEP_DEBUG */ 365 420 static inline void pm_print_times_init(void) {} ··· 746 691 &wake_lock_attr.attr, 747 692 &wake_unlock_attr.attr, 748 693 #endif 749 - #ifdef CONFIG_PM_DEBUG 750 - &pm_test_attr.attr, 751 - #endif 752 694 #ifdef CONFIG_PM_SLEEP_DEBUG 695 + &pm_test_attr.attr, 753 696 &pm_print_times_attr.attr, 754 697 &pm_wakeup_irq_attr.attr, 698 + &pm_debug_messages_attr.attr, 755 699 #endif 756 700 #endif 757 701 #ifdef CONFIG_FREEZER
+4 -1
kernel/power/power.h
··· 192 192 extern const char * const pm_labels[]; 193 193 extern const char *pm_states[]; 194 194 extern const char *mem_sleep_states[]; 195 - extern suspend_state_t mem_sleep_current; 196 195 197 196 extern int suspend_devices_and_enter(suspend_state_t state); 198 197 #else /* !CONFIG_SUSPEND */ ··· 244 245 #define TEST_FIRST TEST_NONE 245 246 #define TEST_MAX (__TEST_AFTER_LAST - 1) 246 247 248 + #ifdef CONFIG_PM_SLEEP_DEBUG 247 249 extern int pm_test_level; 250 + #else 251 + #define pm_test_level (TEST_NONE) 252 + #endif 248 253 249 254 #ifdef CONFIG_SUSPEND_FREEZER 250 255 static inline int suspend_freeze_processes(void)
+103 -81
kernel/power/suspend.c
··· 8 8 * This file is released under the GPLv2. 9 9 */ 10 10 11 + #define pr_fmt(fmt) "PM: " fmt 12 + 11 13 #include <linux/string.h> 12 14 #include <linux/delay.h> 13 15 #include <linux/errno.h> ··· 35 33 #include "power.h" 36 34 37 35 const char * const pm_labels[] = { 38 - [PM_SUSPEND_FREEZE] = "freeze", 36 + [PM_SUSPEND_TO_IDLE] = "freeze", 39 37 [PM_SUSPEND_STANDBY] = "standby", 40 38 [PM_SUSPEND_MEM] = "mem", 41 39 }; 42 40 const char *pm_states[PM_SUSPEND_MAX]; 43 41 static const char * const mem_sleep_labels[] = { 44 - [PM_SUSPEND_FREEZE] = "s2idle", 42 + [PM_SUSPEND_TO_IDLE] = "s2idle", 45 43 [PM_SUSPEND_STANDBY] = "shallow", 46 44 [PM_SUSPEND_MEM] = "deep", 47 45 }; 48 46 const char *mem_sleep_states[PM_SUSPEND_MAX]; 49 47 50 - suspend_state_t mem_sleep_current = PM_SUSPEND_FREEZE; 51 - static suspend_state_t mem_sleep_default = PM_SUSPEND_MEM; 48 + suspend_state_t mem_sleep_current = PM_SUSPEND_TO_IDLE; 49 + suspend_state_t mem_sleep_default = PM_SUSPEND_MAX; 50 + suspend_state_t pm_suspend_target_state; 51 + EXPORT_SYMBOL_GPL(pm_suspend_target_state); 52 52 53 53 unsigned int pm_suspend_global_flags; 54 54 EXPORT_SYMBOL_GPL(pm_suspend_global_flags); 55 55 56 56 static const struct platform_suspend_ops *suspend_ops; 57 - static const struct platform_freeze_ops *freeze_ops; 58 - static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head); 57 + static const struct platform_s2idle_ops *s2idle_ops; 58 + static DECLARE_WAIT_QUEUE_HEAD(s2idle_wait_head); 59 59 60 - enum freeze_state __read_mostly suspend_freeze_state; 61 - static DEFINE_SPINLOCK(suspend_freeze_lock); 60 + enum s2idle_states __read_mostly s2idle_state; 61 + static DEFINE_SPINLOCK(s2idle_lock); 62 62 63 - void freeze_set_ops(const struct platform_freeze_ops *ops) 63 + void s2idle_set_ops(const struct platform_s2idle_ops *ops) 64 64 { 65 65 lock_system_sleep(); 66 - freeze_ops = ops; 66 + s2idle_ops = ops; 67 67 unlock_system_sleep(); 68 68 } 69 69 70 - static void freeze_begin(void) 70 + static void s2idle_begin(void) 71 71 { 72 - suspend_freeze_state = FREEZE_STATE_NONE; 72 + s2idle_state = S2IDLE_STATE_NONE; 73 73 } 74 74 75 - static void freeze_enter(void) 75 + static void s2idle_enter(void) 76 76 { 77 - trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, true); 77 + trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_TO_IDLE, true); 78 78 79 - spin_lock_irq(&suspend_freeze_lock); 79 + spin_lock_irq(&s2idle_lock); 80 80 if (pm_wakeup_pending()) 81 81 goto out; 82 82 83 - suspend_freeze_state = FREEZE_STATE_ENTER; 84 - spin_unlock_irq(&suspend_freeze_lock); 83 + s2idle_state = S2IDLE_STATE_ENTER; 84 + spin_unlock_irq(&s2idle_lock); 85 85 86 86 get_online_cpus(); 87 87 cpuidle_resume(); ··· 91 87 /* Push all the CPUs into the idle loop. */ 92 88 wake_up_all_idle_cpus(); 93 89 /* Make the current CPU wait so it can enter the idle loop too. */ 94 - wait_event(suspend_freeze_wait_head, 95 - suspend_freeze_state == FREEZE_STATE_WAKE); 90 + wait_event(s2idle_wait_head, 91 + s2idle_state == S2IDLE_STATE_WAKE); 96 92 97 93 cpuidle_pause(); 98 94 put_online_cpus(); 99 95 100 - spin_lock_irq(&suspend_freeze_lock); 96 + spin_lock_irq(&s2idle_lock); 101 97 102 98 out: 103 - suspend_freeze_state = FREEZE_STATE_NONE; 104 - spin_unlock_irq(&suspend_freeze_lock); 99 + s2idle_state = S2IDLE_STATE_NONE; 100 + spin_unlock_irq(&s2idle_lock); 105 101 106 - trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_FREEZE, false); 102 + trace_suspend_resume(TPS("machine_suspend"), PM_SUSPEND_TO_IDLE, false); 107 103 } 108 104 109 105 static void s2idle_loop(void) 110 106 { 111 - pr_debug("PM: suspend-to-idle\n"); 107 + pm_pr_dbg("suspend-to-idle\n"); 112 108 113 - do { 114 - freeze_enter(); 109 + for (;;) { 110 + int error; 115 111 116 - if (freeze_ops && freeze_ops->wake) 117 - freeze_ops->wake(); 112 + dpm_noirq_begin(); 118 113 119 - dpm_resume_noirq(PMSG_RESUME); 120 - if (freeze_ops && freeze_ops->sync) 121 - freeze_ops->sync(); 114 + /* 115 + * Suspend-to-idle equals 116 + * frozen processes + suspended devices + idle processors. 117 + * Thus s2idle_enter() should be called right after 118 + * all devices have been suspended. 119 + */ 120 + error = dpm_noirq_suspend_devices(PMSG_SUSPEND); 121 + if (!error) 122 + s2idle_enter(); 123 + 124 + dpm_noirq_resume_devices(PMSG_RESUME); 125 + if (error && (error != -EBUSY || !pm_wakeup_pending())) { 126 + dpm_noirq_end(); 127 + break; 128 + } 129 + 130 + if (s2idle_ops && s2idle_ops->wake) 131 + s2idle_ops->wake(); 132 + 133 + dpm_noirq_end(); 134 + 135 + if (s2idle_ops && s2idle_ops->sync) 136 + s2idle_ops->sync(); 122 137 123 138 if (pm_wakeup_pending()) 124 139 break; 125 140 126 141 pm_wakeup_clear(false); 127 - } while (!dpm_suspend_noirq(PMSG_SUSPEND)); 142 + } 128 143 129 - pr_debug("PM: resume from suspend-to-idle\n"); 144 + pm_pr_dbg("resume from suspend-to-idle\n"); 130 145 } 131 146 132 - void freeze_wake(void) 147 + void s2idle_wake(void) 133 148 { 134 149 unsigned long flags; 135 150 136 - spin_lock_irqsave(&suspend_freeze_lock, flags); 137 - if (suspend_freeze_state > FREEZE_STATE_NONE) { 138 - suspend_freeze_state = FREEZE_STATE_WAKE; 139 - wake_up(&suspend_freeze_wait_head); 151 + spin_lock_irqsave(&s2idle_lock, flags); 152 + if (s2idle_state > S2IDLE_STATE_NONE) { 153 + s2idle_state = S2IDLE_STATE_WAKE; 154 + wake_up(&s2idle_wait_head); 140 155 } 141 - spin_unlock_irqrestore(&suspend_freeze_lock, flags); 156 + spin_unlock_irqrestore(&s2idle_lock, flags); 142 157 } 143 - EXPORT_SYMBOL_GPL(freeze_wake); 158 + EXPORT_SYMBOL_GPL(s2idle_wake); 144 159 145 160 static bool valid_state(suspend_state_t state) 146 161 { ··· 175 152 { 176 153 /* "mem" and "freeze" are always present in /sys/power/state. */ 177 154 pm_states[PM_SUSPEND_MEM] = pm_labels[PM_SUSPEND_MEM]; 178 - pm_states[PM_SUSPEND_FREEZE] = pm_labels[PM_SUSPEND_FREEZE]; 155 + pm_states[PM_SUSPEND_TO_IDLE] = pm_labels[PM_SUSPEND_TO_IDLE]; 179 156 /* 180 157 * Suspend-to-idle should be supported even without any suspend_ops, 181 158 * initialize mem_sleep_states[] accordingly here. 182 159 */ 183 - mem_sleep_states[PM_SUSPEND_FREEZE] = mem_sleep_labels[PM_SUSPEND_FREEZE]; 160 + mem_sleep_states[PM_SUSPEND_TO_IDLE] = mem_sleep_labels[PM_SUSPEND_TO_IDLE]; 184 161 } 185 162 186 163 static int __init mem_sleep_default_setup(char *str) 187 164 { 188 165 suspend_state_t state; 189 166 190 - for (state = PM_SUSPEND_FREEZE; state <= PM_SUSPEND_MEM; state++) 167 + for (state = PM_SUSPEND_TO_IDLE; state <= PM_SUSPEND_MEM; state++) 191 168 if (mem_sleep_labels[state] && 192 169 !strcmp(str, mem_sleep_labels[state])) { 193 170 mem_sleep_default = state; ··· 216 193 } 217 194 if (valid_state(PM_SUSPEND_MEM)) { 218 195 mem_sleep_states[PM_SUSPEND_MEM] = mem_sleep_labels[PM_SUSPEND_MEM]; 219 - if (mem_sleep_default == PM_SUSPEND_MEM) 196 + if (mem_sleep_default >= PM_SUSPEND_MEM) 220 197 mem_sleep_current = PM_SUSPEND_MEM; 221 198 } 222 199 ··· 239 216 240 217 static bool sleep_state_supported(suspend_state_t state) 241 218 { 242 - return state == PM_SUSPEND_FREEZE || (suspend_ops && suspend_ops->enter); 219 + return state == PM_SUSPEND_TO_IDLE || (suspend_ops && suspend_ops->enter); 243 220 } 244 221 245 222 static int platform_suspend_prepare(suspend_state_t state) 246 223 { 247 - return state != PM_SUSPEND_FREEZE && suspend_ops->prepare ? 224 + return state != PM_SUSPEND_TO_IDLE && suspend_ops->prepare ? 248 225 suspend_ops->prepare() : 0; 249 226 } 250 227 251 228 static int platform_suspend_prepare_late(suspend_state_t state) 252 229 { 253 - return state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->prepare ? 254 - freeze_ops->prepare() : 0; 230 + return state == PM_SUSPEND_TO_IDLE && s2idle_ops && s2idle_ops->prepare ? 231 + s2idle_ops->prepare() : 0; 255 232 } 256 233 257 234 static int platform_suspend_prepare_noirq(suspend_state_t state) 258 235 { 259 - return state != PM_SUSPEND_FREEZE && suspend_ops->prepare_late ? 236 + return state != PM_SUSPEND_TO_IDLE && suspend_ops->prepare_late ? 260 237 suspend_ops->prepare_late() : 0; 261 238 } 262 239 263 240 static void platform_resume_noirq(suspend_state_t state) 264 241 { 265 - if (state != PM_SUSPEND_FREEZE && suspend_ops->wake) 242 + if (state != PM_SUSPEND_TO_IDLE && suspend_ops->wake) 266 243 suspend_ops->wake(); 267 244 } 268 245 269 246 static void platform_resume_early(suspend_state_t state) 270 247 { 271 - if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->restore) 272 - freeze_ops->restore(); 248 + if (state == PM_SUSPEND_TO_IDLE && s2idle_ops && s2idle_ops->restore) 249 + s2idle_ops->restore(); 273 250 } 274 251 275 252 static void platform_resume_finish(suspend_state_t state) 276 253 { 277 - if (state != PM_SUSPEND_FREEZE && suspend_ops->finish) 254 + if (state != PM_SUSPEND_TO_IDLE && suspend_ops->finish) 278 255 suspend_ops->finish(); 279 256 } 280 257 281 258 static int platform_suspend_begin(suspend_state_t state) 282 259 { 283 - if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->begin) 284 - return freeze_ops->begin(); 260 + if (state == PM_SUSPEND_TO_IDLE && s2idle_ops && s2idle_ops->begin) 261 + return s2idle_ops->begin(); 285 262 else if (suspend_ops && suspend_ops->begin) 286 263 return suspend_ops->begin(state); 287 264 else ··· 290 267 291 268 static void platform_resume_end(suspend_state_t state) 292 269 { 293 - if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end) 294 - freeze_ops->end(); 270 + if (state == PM_SUSPEND_TO_IDLE && s2idle_ops && s2idle_ops->end) 271 + s2idle_ops->end(); 295 272 else if (suspend_ops && suspend_ops->end) 296 273 suspend_ops->end(); 297 274 } 298 275 299 276 static void platform_recover(suspend_state_t state) 300 277 { 301 - if (state != PM_SUSPEND_FREEZE && suspend_ops->recover) 278 + if (state != PM_SUSPEND_TO_IDLE && suspend_ops->recover) 302 279 suspend_ops->recover(); 303 280 } 304 281 305 282 static bool platform_suspend_again(suspend_state_t state) 306 283 { 307 - return state != PM_SUSPEND_FREEZE && suspend_ops->suspend_again ? 284 + return state != PM_SUSPEND_TO_IDLE && suspend_ops->suspend_again ? 308 285 suspend_ops->suspend_again() : false; 309 286 } 310 287 ··· 393 370 394 371 error = dpm_suspend_late(PMSG_SUSPEND); 395 372 if (error) { 396 - pr_err("PM: late suspend of devices failed\n"); 373 + pr_err("late suspend of devices failed\n"); 397 374 goto Platform_finish; 398 375 } 399 376 error = platform_suspend_prepare_late(state); 400 377 if (error) 401 378 goto Devices_early_resume; 402 379 380 + if (state == PM_SUSPEND_TO_IDLE && pm_test_level != TEST_PLATFORM) { 381 + s2idle_loop(); 382 + goto Platform_early_resume; 383 + } 384 + 403 385 error = dpm_suspend_noirq(PMSG_SUSPEND); 404 386 if (error) { 405 - pr_err("PM: noirq suspend of devices failed\n"); 387 + pr_err("noirq suspend of devices failed\n"); 406 388 goto Platform_early_resume; 407 389 } 408 390 error = platform_suspend_prepare_noirq(state); ··· 416 388 417 389 if (suspend_test(TEST_PLATFORM)) 418 390 goto Platform_wake; 419 - 420 - /* 421 - * PM_SUSPEND_FREEZE equals 422 - * frozen processes + suspended devices + idle processors. 423 - * Thus we should invoke freeze_enter() soon after 424 - * all the devices are suspended. 425 - */ 426 - if (state == PM_SUSPEND_FREEZE) { 427 - s2idle_loop(); 428 - goto Platform_early_resume; 429 - } 430 391 431 392 error = disable_nonboot_cpus(); 432 393 if (error || suspend_test(TEST_CPUS)) ··· 473 456 if (!sleep_state_supported(state)) 474 457 return -ENOSYS; 475 458 459 + pm_suspend_target_state = state; 460 + 476 461 error = platform_suspend_begin(state); 477 462 if (error) 478 463 goto Close; ··· 483 464 suspend_test_start(); 484 465 error = dpm_suspend_start(PMSG_SUSPEND); 485 466 if (error) { 486 - pr_err("PM: Some devices failed to suspend, or early wake event detected\n"); 467 + pr_err("Some devices failed to suspend, or early wake event detected\n"); 487 468 goto Recover_platform; 488 469 } 489 470 suspend_test_finish("suspend devices"); ··· 504 485 505 486 Close: 506 487 platform_resume_end(state); 488 + pm_suspend_target_state = PM_SUSPEND_ON; 507 489 return error; 508 490 509 491 Recover_platform: ··· 538 518 int error; 539 519 540 520 trace_suspend_resume(TPS("suspend_enter"), state, true); 541 - if (state == PM_SUSPEND_FREEZE) { 521 + if (state == PM_SUSPEND_TO_IDLE) { 542 522 #ifdef CONFIG_PM_DEBUG 543 523 if (pm_test_level != TEST_NONE && pm_test_level <= TEST_CPUS) { 544 - pr_warn("PM: Unsupported test mode for suspend to idle, please choose none/freezer/devices/platform.\n"); 524 + pr_warn("Unsupported test mode for suspend to idle, please choose none/freezer/devices/platform.\n"); 545 525 return -EAGAIN; 546 526 } 547 527 #endif ··· 551 531 if (!mutex_trylock(&pm_mutex)) 552 532 return -EBUSY; 553 533 554 - if (state == PM_SUSPEND_FREEZE) 555 - freeze_begin(); 534 + if (state == PM_SUSPEND_TO_IDLE) 535 + s2idle_begin(); 556 536 557 537 #ifndef CONFIG_SUSPEND_SKIP_SYNC 558 538 trace_suspend_resume(TPS("sync_filesystems"), 0, true); 559 - pr_info("PM: Syncing filesystems ... "); 539 + pr_info("Syncing filesystems ... "); 560 540 sys_sync(); 561 541 pr_cont("done.\n"); 562 542 trace_suspend_resume(TPS("sync_filesystems"), 0, false); 563 543 #endif 564 544 565 - pr_debug("PM: Preparing system for sleep (%s)\n", pm_states[state]); 545 + pm_pr_dbg("Preparing system for sleep (%s)\n", mem_sleep_labels[state]); 566 546 pm_suspend_clear_flags(); 567 547 error = suspend_prepare(state); 568 548 if (error) ··· 572 552 goto Finish; 573 553 574 554 trace_suspend_resume(TPS("suspend_enter"), state, false); 575 - pr_debug("PM: Suspending system (%s)\n", pm_states[state]); 555 + pm_pr_dbg("Suspending system (%s)\n", mem_sleep_labels[state]); 576 556 pm_restrict_gfp_mask(); 577 557 error = suspend_devices_and_enter(state); 578 558 pm_restore_gfp_mask(); 579 559 580 560 Finish: 581 - pr_debug("PM: Finishing wakeup.\n"); 561 + pm_pr_dbg("Finishing wakeup.\n"); 582 562 suspend_finish(); 583 563 Unlock: 584 564 mutex_unlock(&pm_mutex); ··· 599 579 if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX) 600 580 return -EINVAL; 601 581 582 + pr_info("suspend entry (%s)\n", mem_sleep_labels[state]); 602 583 error = enter_state(state); 603 584 if (error) { 604 585 suspend_stats.fail++; ··· 607 586 } else { 608 587 suspend_stats.success++; 609 588 } 589 + pr_info("suspend exit\n"); 610 590 return error; 611 591 } 612 592 EXPORT_SYMBOL(pm_suspend);
+2 -2
kernel/power/suspend_test.c
··· 104 104 printk(info_test, pm_states[state]); 105 105 status = pm_suspend(state); 106 106 if (status < 0) 107 - state = PM_SUSPEND_FREEZE; 107 + state = PM_SUSPEND_TO_IDLE; 108 108 } 109 - if (state == PM_SUSPEND_FREEZE) { 109 + if (state == PM_SUSPEND_TO_IDLE) { 110 110 printk(info_test, pm_states[state]); 111 111 status = pm_suspend(state); 112 112 }
+4 -4
kernel/sched/idle.c
··· 158 158 } 159 159 160 160 /* 161 - * Suspend-to-idle ("freeze") is a system state in which all user space 161 + * Suspend-to-idle ("s2idle") is a system state in which all user space 162 162 * has been frozen, all I/O devices have been suspended and the only 163 163 * activity happens here and in iterrupts (if any). In that case bypass 164 164 * the cpuidle governor and go stratight for the deepest idle state ··· 167 167 * until a proper wakeup interrupt happens. 168 168 */ 169 169 170 - if (idle_should_freeze() || dev->use_deepest_state) { 171 - if (idle_should_freeze()) { 172 - entered_state = cpuidle_enter_freeze(drv, dev); 170 + if (idle_should_enter_s2idle() || dev->use_deepest_state) { 171 + if (idle_should_enter_s2idle()) { 172 + entered_state = cpuidle_enter_s2idle(drv, dev); 173 173 if (entered_state > 0) { 174 174 local_irq_enable(); 175 175 goto exit_idle;
+3 -2
kernel/time/timekeeping_debug.c
··· 19 19 #include <linux/init.h> 20 20 #include <linux/kernel.h> 21 21 #include <linux/seq_file.h> 22 + #include <linux/suspend.h> 22 23 #include <linux/time.h> 23 24 24 25 #include "timekeeping_internal.h" ··· 76 75 int bin = min(fls(t->tv_sec), NUM_BINS-1); 77 76 78 77 sleep_time_bin[bin]++; 79 - printk_deferred(KERN_INFO "Suspended for %lld.%03lu seconds\n", 80 - (s64)t->tv_sec, t->tv_nsec / NSEC_PER_MSEC); 78 + pm_deferred_pr_dbg("Timekeeping suspended for %lld.%03lu seconds\n", 79 + (s64)t->tv_sec, t->tv_nsec / NSEC_PER_MSEC); 81 80 } 82 81