Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'pm-5.6-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm

Pull power management fixes from Rafael Wysocki:
"Fix three issues related to the handling of wakeup events signaled
through the ACPI SCI while suspended to idle (Rafael Wysocki) and
unexport an internal cpufreq variable (Yangtao Li)"

* tag 'pm-5.6-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm:
ACPI: PM: s2idle: Prevent spurious SCIs from waking up the system
ACPICA: Introduce acpi_any_gpe_status_set()
ACPI: PM: s2idle: Avoid possible race related to the EC GPE
ACPI: EC: Fix flushing of pending work
cpufreq: Make cpufreq_global_kobject static

+177 -42
+2
drivers/acpi/acpica/achware.h
··· 101 101 102 102 acpi_status acpi_hw_enable_all_wakeup_gpes(void); 103 103 104 + u8 acpi_hw_check_all_gpes(void); 105 + 104 106 acpi_status 105 107 acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, 106 108 struct acpi_gpe_block_info *gpe_block,
+32
drivers/acpi/acpica/evxfgpe.c
··· 795 795 796 796 ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes) 797 797 798 + /****************************************************************************** 799 + * 800 + * FUNCTION: acpi_any_gpe_status_set 801 + * 802 + * PARAMETERS: None 803 + * 804 + * RETURN: Whether or not the status bit is set for any GPE 805 + * 806 + * DESCRIPTION: Check the status bits of all enabled GPEs and return TRUE if any 807 + * of them is set or FALSE otherwise. 808 + * 809 + ******************************************************************************/ 810 + u32 acpi_any_gpe_status_set(void) 811 + { 812 + acpi_status status; 813 + u8 ret; 814 + 815 + ACPI_FUNCTION_TRACE(acpi_any_gpe_status_set); 816 + 817 + status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); 818 + if (ACPI_FAILURE(status)) { 819 + return (FALSE); 820 + } 821 + 822 + ret = acpi_hw_check_all_gpes(); 823 + (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); 824 + 825 + return (ret); 826 + } 827 + 828 + ACPI_EXPORT_SYMBOL(acpi_any_gpe_status_set) 829 + 798 830 /******************************************************************************* 799 831 * 800 832 * FUNCTION: acpi_install_gpe_block
+71
drivers/acpi/acpica/hwgpe.c
··· 446 446 447 447 /****************************************************************************** 448 448 * 449 + * FUNCTION: acpi_hw_get_gpe_block_status 450 + * 451 + * PARAMETERS: gpe_xrupt_info - GPE Interrupt info 452 + * gpe_block - Gpe Block info 453 + * 454 + * RETURN: Success 455 + * 456 + * DESCRIPTION: Produce a combined GPE status bits mask for the given block. 457 + * 458 + ******************************************************************************/ 459 + 460 + static acpi_status 461 + acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info, 462 + struct acpi_gpe_block_info *gpe_block, 463 + void *ret_ptr) 464 + { 465 + struct acpi_gpe_register_info *gpe_register_info; 466 + u64 in_enable, in_status; 467 + acpi_status status; 468 + u8 *ret = ret_ptr; 469 + u32 i; 470 + 471 + /* Examine each GPE Register within the block */ 472 + 473 + for (i = 0; i < gpe_block->register_count; i++) { 474 + gpe_register_info = &gpe_block->register_info[i]; 475 + 476 + status = acpi_hw_read(&in_enable, 477 + &gpe_register_info->enable_address); 478 + if (ACPI_FAILURE(status)) { 479 + continue; 480 + } 481 + 482 + status = acpi_hw_read(&in_status, 483 + &gpe_register_info->status_address); 484 + if (ACPI_FAILURE(status)) { 485 + continue; 486 + } 487 + 488 + *ret |= in_enable & in_status; 489 + } 490 + 491 + return (AE_OK); 492 + } 493 + 494 + /****************************************************************************** 495 + * 449 496 * FUNCTION: acpi_hw_disable_all_gpes 450 497 * 451 498 * PARAMETERS: None ··· 555 508 556 509 status = acpi_ev_walk_gpe_list(acpi_hw_enable_wakeup_gpe_block, NULL); 557 510 return_ACPI_STATUS(status); 511 + } 512 + 513 + /****************************************************************************** 514 + * 515 + * FUNCTION: acpi_hw_check_all_gpes 516 + * 517 + * PARAMETERS: None 518 + * 519 + * RETURN: Combined status of all GPEs 520 + * 521 + * DESCRIPTION: Check all enabled GPEs in all GPE blocks and return TRUE if the 522 + * status bit is set for at least one of them of FALSE otherwise. 523 + * 524 + ******************************************************************************/ 525 + 526 + u8 acpi_hw_check_all_gpes(void) 527 + { 528 + u8 ret = 0; 529 + 530 + ACPI_FUNCTION_TRACE(acpi_hw_check_all_gpes); 531 + 532 + (void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &ret); 533 + 534 + return (ret != 0); 558 535 } 559 536 560 537 #endif /* !ACPI_REDUCED_HARDWARE */
+26 -18
drivers/acpi/ec.c
··· 179 179 180 180 static struct acpi_ec *boot_ec; 181 181 static bool boot_ec_is_ecdt = false; 182 + static struct workqueue_struct *ec_wq; 182 183 static struct workqueue_struct *ec_query_wq; 183 184 184 185 static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */ ··· 470 469 ec_dbg_evt("Command(%s) submitted/blocked", 471 470 acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY)); 472 471 ec->nr_pending_queries++; 473 - schedule_work(&ec->work); 472 + queue_work(ec_wq, &ec->work); 474 473 } 475 474 } 476 475 ··· 536 535 #ifdef CONFIG_PM_SLEEP 537 536 static void __acpi_ec_flush_work(void) 538 537 { 539 - flush_scheduled_work(); /* flush ec->work */ 538 + drain_workqueue(ec_wq); /* flush ec->work */ 540 539 flush_workqueue(ec_query_wq); /* flush queries */ 541 540 } 542 541 ··· 557 556 558 557 void acpi_ec_flush_work(void) 559 558 { 560 - /* Without ec_query_wq there is nothing to flush. */ 561 - if (!ec_query_wq) 559 + /* Without ec_wq there is nothing to flush. */ 560 + if (!ec_wq) 562 561 return; 563 562 564 563 __acpi_ec_flush_work(); ··· 2108 2107 .drv.pm = &acpi_ec_pm, 2109 2108 }; 2110 2109 2111 - static inline int acpi_ec_query_init(void) 2110 + static void acpi_ec_destroy_workqueues(void) 2112 2111 { 2113 - if (!ec_query_wq) { 2114 - ec_query_wq = alloc_workqueue("kec_query", 0, 2115 - ec_max_queries); 2116 - if (!ec_query_wq) 2117 - return -ENODEV; 2112 + if (ec_wq) { 2113 + destroy_workqueue(ec_wq); 2114 + ec_wq = NULL; 2118 2115 } 2119 - return 0; 2120 - } 2121 - 2122 - static inline void acpi_ec_query_exit(void) 2123 - { 2124 2116 if (ec_query_wq) { 2125 2117 destroy_workqueue(ec_query_wq); 2126 2118 ec_query_wq = NULL; 2127 2119 } 2120 + } 2121 + 2122 + static int acpi_ec_init_workqueues(void) 2123 + { 2124 + if (!ec_wq) 2125 + ec_wq = alloc_ordered_workqueue("kec", 0); 2126 + 2127 + if (!ec_query_wq) 2128 + ec_query_wq = alloc_workqueue("kec_query", 0, ec_max_queries); 2129 + 2130 + if (!ec_wq || !ec_query_wq) { 2131 + acpi_ec_destroy_workqueues(); 2132 + return -ENODEV; 2133 + } 2134 + return 0; 2128 2135 } 2129 2136 2130 2137 static const struct dmi_system_id acpi_ec_no_wakeup[] = { ··· 2165 2156 int result; 2166 2157 int ecdt_fail, dsdt_fail; 2167 2158 2168 - /* register workqueue for _Qxx evaluations */ 2169 - result = acpi_ec_query_init(); 2159 + result = acpi_ec_init_workqueues(); 2170 2160 if (result) 2171 2161 return result; 2172 2162 ··· 2196 2188 { 2197 2189 2198 2190 acpi_bus_unregister_driver(&acpi_ec_driver); 2199 - acpi_ec_query_exit(); 2191 + acpi_ec_destroy_workqueues(); 2200 2192 } 2201 2193 #endif /* 0 */
+37 -13
drivers/acpi/sleep.c
··· 990 990 acpi_os_wait_events_complete(); /* synchronize Notify handling */ 991 991 } 992 992 993 - static void acpi_s2idle_wake(void) 993 + static bool acpi_s2idle_wake(void) 994 994 { 995 - /* 996 - * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the SCI has 997 - * not triggered while suspended, so bail out. 998 - */ 999 - if (!acpi_sci_irq_valid() || 1000 - irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) 1001 - return; 995 + if (!acpi_sci_irq_valid()) 996 + return pm_wakeup_pending(); 1002 997 1003 - /* 1004 - * If there are EC events to process, the wakeup may be a spurious one 1005 - * coming from the EC. 1006 - */ 1007 - if (acpi_ec_dispatch_gpe()) { 998 + while (pm_wakeup_pending()) { 999 + /* 1000 + * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the 1001 + * SCI has not triggered while suspended, so bail out (the 1002 + * wakeup is pending anyway and the SCI is not the source of 1003 + * it). 1004 + */ 1005 + if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq))) 1006 + return true; 1007 + 1008 + /* 1009 + * If there are no EC events to process and at least one of the 1010 + * other enabled GPEs is active, the wakeup is regarded as a 1011 + * genuine one. 1012 + * 1013 + * Note that the checks below must be carried out in this order 1014 + * to avoid returning prematurely due to a change of the EC GPE 1015 + * status bit from unset to set between the checks with the 1016 + * status bits of all the other GPEs unset. 1017 + */ 1018 + if (acpi_any_gpe_status_set() && !acpi_ec_dispatch_gpe()) 1019 + return true; 1020 + 1008 1021 /* 1009 1022 * Cancel the wakeup and process all pending events in case 1010 1023 * there are any wakeup ones in there. ··· 1030 1017 1031 1018 acpi_s2idle_sync(); 1032 1019 1020 + /* 1021 + * The SCI is in the "suspended" state now and it cannot produce 1022 + * new wakeup events till the rearming below, so if any of them 1023 + * are pending here, they must be resulting from the processing 1024 + * of EC events above or coming from somewhere else. 1025 + */ 1026 + if (pm_wakeup_pending()) 1027 + return true; 1028 + 1033 1029 rearm_wake_irq(acpi_sci_irq); 1034 1030 } 1031 + 1032 + return false; 1035 1033 } 1036 1034 1037 1035 static void acpi_s2idle_restore_early(void)
+2 -3
drivers/cpufreq/cpufreq.c
··· 105 105 } 106 106 EXPORT_SYMBOL_GPL(have_governor_per_policy); 107 107 108 + static struct kobject *cpufreq_global_kobject; 109 + 108 110 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) 109 111 { 110 112 if (have_governor_per_policy()) ··· 2746 2744 return 0; 2747 2745 } 2748 2746 EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); 2749 - 2750 - struct kobject *cpufreq_global_kobject; 2751 - EXPORT_SYMBOL(cpufreq_global_kobject); 2752 2747 2753 2748 static int __init cpufreq_core_init(void) 2754 2749 {
+1
include/acpi/acpixf.h
··· 752 752 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void)) 753 753 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void)) 754 754 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void)) 755 + ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(void)) 755 756 756 757 ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status 757 758 acpi_get_gpe_device(u32 gpe_index,
-3
include/linux/cpufreq.h
··· 201 201 return cpumask_weight(policy->cpus) > 1; 202 202 } 203 203 204 - /* /sys/devices/system/cpu/cpufreq: entry point for global variables */ 205 - extern struct kobject *cpufreq_global_kobject; 206 - 207 204 #ifdef CONFIG_CPU_FREQ 208 205 unsigned int cpufreq_get(unsigned int cpu); 209 206 unsigned int cpufreq_quick_get(unsigned int cpu);
+1 -1
include/linux/suspend.h
··· 191 191 int (*begin)(void); 192 192 int (*prepare)(void); 193 193 int (*prepare_late)(void); 194 - void (*wake)(void); 194 + bool (*wake)(void); 195 195 void (*restore_early)(void); 196 196 void (*restore)(void); 197 197 void (*end)(void);
+5 -4
kernel/power/suspend.c
··· 131 131 * to avoid them upfront. 132 132 */ 133 133 for (;;) { 134 - if (s2idle_ops && s2idle_ops->wake) 135 - s2idle_ops->wake(); 136 - 137 - if (pm_wakeup_pending()) 134 + if (s2idle_ops && s2idle_ops->wake) { 135 + if (s2idle_ops->wake()) 136 + break; 137 + } else if (pm_wakeup_pending()) { 138 138 break; 139 + } 139 140 140 141 pm_wakeup_clear(false); 141 142