Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6: (59 commits)
ACPI / PM: Fix build problems for !CONFIG_ACPI related to NVS rework
ACPI: fix resource check message
ACPI / Battery: Update information on info notification and resume
ACPI: Drop device flag wake_capable
ACPI: Always check if _PRW is present before trying to evaluate it
ACPI / PM: Check status of power resources under mutexes
ACPI / PM: Rename acpi_power_off_device()
ACPI / PM: Drop acpi_power_nocheck
ACPI / PM: Drop acpi_bus_get_power()
Platform / x86: Make fujitsu_laptop use acpi_bus_update_power()
ACPI / Fan: Rework the handling of power resources
ACPI / PM: Register power resource devices as soon as they are needed
ACPI / PM: Register acpi_power_driver early
ACPI / PM: Add function for updating device power state consistently
ACPI / PM: Add function for device power state initialization
ACPI / PM: Introduce __acpi_bus_get_power()
ACPI / PM: Introduce function for refcounting device power resources
ACPI / PM: Add functions for manipulating lists of power resources
ACPI / PM: Prevent acpi_power_get_inferred_state() from making changes
ACPICA: Update version to 20101209
...

+3374 -1612
+4
Documentation/ABI/stable/thermal-notification
··· 1 + What: A notification mechanism for thermal related events 2 + Description: 3 + This interface enables notification for thermal related events. 4 + The notification is in the form of a netlink event.
+27
Documentation/IPMI.txt
··· 533 533 Other Pieces 534 534 ------------ 535 535 536 + Get the detailed info related with the IPMI device 537 + -------------------------------------------------- 538 + 539 + Some users need more detailed information about a device, like where 540 + the address came from or the raw base device for the IPMI interface. 541 + You can use the IPMI smi_watcher to catch the IPMI interfaces as they 542 + come or go, and to grab the information, you can use the function 543 + ipmi_get_smi_info(), which returns the following structure: 544 + 545 + struct ipmi_smi_info { 546 + enum ipmi_addr_src addr_src; 547 + struct device *dev; 548 + union { 549 + struct { 550 + void *acpi_handle; 551 + } acpi_info; 552 + } addr_info; 553 + }; 554 + 555 + Currently special info for only for SI_ACPI address sources is 556 + returned. Others may be added as necessary. 557 + 558 + Note that the dev pointer is included in the above structure, and 559 + assuming ipmi_smi_get_info returns success, you must call put_device 560 + on the dev pointer. 561 + 562 + 536 563 Watchdog 537 564 -------- 538 565
+122
Documentation/acpi/apei/output_format.txt
··· 1 + APEI output format 2 + ~~~~~~~~~~~~~~~~~~ 3 + 4 + APEI uses printk as hardware error reporting interface, the output 5 + format is as follow. 6 + 7 + <error record> := 8 + APEI generic hardware error status 9 + severity: <integer>, <severity string> 10 + section: <integer>, severity: <integer>, <severity string> 11 + flags: <integer> 12 + <section flags strings> 13 + fru_id: <uuid string> 14 + fru_text: <string> 15 + section_type: <section type string> 16 + <section data> 17 + 18 + <severity string>* := recoverable | fatal | corrected | info 19 + 20 + <section flags strings># := 21 + [primary][, containment warning][, reset][, threshold exceeded]\ 22 + [, resource not accessible][, latent error] 23 + 24 + <section type string> := generic processor error | memory error | \ 25 + PCIe error | unknown, <uuid string> 26 + 27 + <section data> := 28 + <generic processor section data> | <memory section data> | \ 29 + <pcie section data> | <null> 30 + 31 + <generic processor section data> := 32 + [processor_type: <integer>, <proc type string>] 33 + [processor_isa: <integer>, <proc isa string>] 34 + [error_type: <integer> 35 + <proc error type strings>] 36 + [operation: <integer>, <proc operation string>] 37 + [flags: <integer> 38 + <proc flags strings>] 39 + [level: <integer>] 40 + [version_info: <integer>] 41 + [processor_id: <integer>] 42 + [target_address: <integer>] 43 + [requestor_id: <integer>] 44 + [responder_id: <integer>] 45 + [IP: <integer>] 46 + 47 + <proc type string>* := IA32/X64 | IA64 48 + 49 + <proc isa string>* := IA32 | IA64 | X64 50 + 51 + <processor error type strings># := 52 + [cache error][, TLB error][, bus error][, micro-architectural error] 53 + 54 + <proc operation string>* := unknown or generic | data read | data write | \ 55 + instruction execution 56 + 57 + <proc flags strings># := 58 + [restartable][, precise IP][, overflow][, corrected] 59 + 60 + <memory section data> := 61 + [error_status: <integer>] 62 + [physical_address: <integer>] 63 + [physical_address_mask: <integer>] 64 + [node: <integer>] 65 + [card: <integer>] 66 + [module: <integer>] 67 + [bank: <integer>] 68 + [device: <integer>] 69 + [row: <integer>] 70 + [column: <integer>] 71 + [bit_position: <integer>] 72 + [requestor_id: <integer>] 73 + [responder_id: <integer>] 74 + [target_id: <integer>] 75 + [error_type: <integer>, <mem error type string>] 76 + 77 + <mem error type string>* := 78 + unknown | no error | single-bit ECC | multi-bit ECC | \ 79 + single-symbol chipkill ECC | multi-symbol chipkill ECC | master abort | \ 80 + target abort | parity error | watchdog timeout | invalid address | \ 81 + mirror Broken | memory sparing | scrub corrected error | \ 82 + scrub uncorrected error 83 + 84 + <pcie section data> := 85 + [port_type: <integer>, <pcie port type string>] 86 + [version: <integer>.<integer>] 87 + [command: <integer>, status: <integer>] 88 + [device_id: <integer>:<integer>:<integer>.<integer> 89 + slot: <integer> 90 + secondary_bus: <integer> 91 + vendor_id: <integer>, device_id: <integer> 92 + class_code: <integer>] 93 + [serial number: <integer>, <integer>] 94 + [bridge: secondary_status: <integer>, control: <integer>] 95 + 96 + <pcie port type string>* := PCIe end point | legacy PCI end point | \ 97 + unknown | unknown | root port | upstream switch port | \ 98 + downstream switch port | PCIe to PCI/PCI-X bridge | \ 99 + PCI/PCI-X to PCIe bridge | root complex integrated endpoint device | \ 100 + root complex event collector 101 + 102 + Where, [] designate corresponding content is optional 103 + 104 + All <field string> description with * has the following format: 105 + 106 + field: <integer>, <field string> 107 + 108 + Where value of <integer> should be the position of "string" in <field 109 + string> description. Otherwise, <field string> will be "unknown". 110 + 111 + All <field strings> description with # has the following format: 112 + 113 + field: <integer> 114 + <field strings> 115 + 116 + Where each string in <fields strings> corresponding to one set bit of 117 + <integer>. The bit position is the position of "string" in <field 118 + strings> description. 119 + 120 + For more detailed explanation of every field, please refer to UEFI 121 + specification version 2.3 or later, section Appendix N: Common 122 + Platform Error Record.
+11
Documentation/feature-removal-schedule.txt
··· 248 248 249 249 --------------------------- 250 250 251 + What: CONFIG_ACPI_PROCFS_POWER 252 + When: 2.6.39 253 + Why: sysfs I/F for ACPI power devices, including AC and Battery, 254 + has been working in upstream kenrel since 2.6.24, Sep 2007. 255 + In 2.6.37, we make the sysfs I/F always built in and this option 256 + disabled by default. 257 + Remove this option and the ACPI power procfs interface in 2.6.39. 258 + Who: Zhang Rui <rui.zhang@intel.com> 259 + 260 + --------------------------- 261 + 251 262 What: /proc/acpi/button 252 263 When: August 2007 253 264 Why: /proc/acpi/button has been replaced by events to the input layer
-5
Documentation/kernel-parameters.txt
··· 199 199 unusable. The "log_buf_len" parameter may be useful 200 200 if you need to capture more output. 201 201 202 - acpi_display_output= [HW,ACPI] 203 - acpi_display_output=vendor 204 - acpi_display_output=video 205 - See above. 206 - 207 202 acpi_irq_balance [HW,ACPI] 208 203 ACPI will balance active IRQs 209 204 default in APIC mode
+12
Documentation/thermal/sysfs-api.txt
··· 278 278 |---name: acpitz 279 279 |---temp1_input: 37000 280 280 |---temp1_crit: 100000 281 + 282 + 4. Event Notification 283 + 284 + The framework includes a simple notification mechanism, in the form of a 285 + netlink event. Netlink socket initialization is done during the _init_ 286 + of the framework. Drivers which intend to use the notification mechanism 287 + just need to call generate_netlink_event() with two arguments viz 288 + (originator, event). Typically the originator will be an integer assigned 289 + to a thermal_zone_device when it registers itself with the framework. The 290 + event will be one of:{THERMAL_AUX0, THERMAL_AUX1, THERMAL_CRITICAL, 291 + THERMAL_DEV_FAULT}. Notification can be sent when the current temperature 292 + crosses any of the configured thresholds.
+5
arch/ia64/include/asm/io.h
··· 426 426 extern void iounmap (volatile void __iomem *addr); 427 427 extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size); 428 428 extern void early_iounmap (volatile void __iomem *addr, unsigned long size); 429 + static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size) 430 + { 431 + return ioremap(phys_addr, size); 432 + } 433 + 429 434 430 435 /* 431 436 * String version of IO memory access ops:
+1
arch/x86/kernel/acpi/boot.c
··· 509 509 510 510 return 0; 511 511 } 512 + EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); 512 513 513 514 int acpi_isa_irq_to_gsi(unsigned isa_irq, u32 *gsi) 514 515 {
+1
arch/x86/kernel/dumpstack.c
··· 234 234 bust_spinlocks(1); 235 235 return flags; 236 236 } 237 + EXPORT_SYMBOL_GPL(oops_begin); 237 238 238 239 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) 239 240 {
+1
arch/x86/kernel/e820.c
··· 14 14 #include <linux/bootmem.h> 15 15 #include <linux/pfn.h> 16 16 #include <linux/suspend.h> 17 + #include <linux/acpi.h> 17 18 #include <linux/firmware-map.h> 18 19 #include <linux/memblock.h> 19 20
+13 -5
drivers/acpi/Kconfig
··· 51 51 For backwards compatibility, this option allows 52 52 deprecated /proc/acpi/ files to exist, even when 53 53 they have been replaced by functions in /sys. 54 - The deprecated files (and their replacements) include: 55 54 56 - /proc/acpi/processor/*/throttling (/sys/class/thermal/ 57 - cooling_device*/*) 58 - /proc/acpi/video/*/brightness (/sys/class/backlight/) 59 - /proc/acpi/thermal_zone/*/* (/sys/class/thermal/) 60 55 This option has no effect on /proc/acpi/ files 61 56 and functions which do not yet exist in /sys. 62 57 ··· 69 74 /proc/acpi/ac_adapter/* (sys/class/power_supply/*) 70 75 This option has no effect on /proc/acpi/ directories 71 76 and functions, which do not yet exist in /sys 77 + This option, together with the proc directories, will be 78 + deleted in 2.6.39. 72 79 73 80 Say N to delete power /proc/acpi/ directories that have moved to /sys/ 74 81 ··· 206 209 207 210 To compile this driver as a module, choose M here: 208 211 the module will be called processor. 212 + config ACPI_IPMI 213 + tristate "IPMI" 214 + depends on EXPERIMENTAL && IPMI_SI && IPMI_HANDLER 215 + default n 216 + help 217 + This driver enables the ACPI to access the BMC controller. And it 218 + uses the IPMI request/response message to communicate with BMC 219 + controller, which can be found on on the server. 220 + 221 + To compile this driver as a module, choose M here: 222 + the module will be called as acpi_ipmi. 209 223 210 224 config ACPI_HOTPLUG_CPU 211 225 bool
+2 -1
drivers/acpi/Makefile
··· 24 24 # sleep related files 25 25 acpi-y += wakeup.o 26 26 acpi-y += sleep.o 27 - acpi-$(CONFIG_ACPI_SLEEP) += proc.o 27 + acpi-$(CONFIG_ACPI_SLEEP) += proc.o nvs.o 28 28 29 29 30 30 # ··· 69 69 processor-$(CONFIG_CPU_FREQ) += processor_perflib.o 70 70 71 71 obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o 72 + obj-$(CONFIG_ACPI_IPMI) += acpi_ipmi.o 72 73 73 74 obj-$(CONFIG_ACPI_APEI) += apei/
+2 -1
drivers/acpi/ac.c
··· 197 197 { 198 198 struct proc_dir_entry *entry = NULL; 199 199 200 - 200 + printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded," 201 + " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); 201 202 if (!acpi_device_dir(device)) { 202 203 acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), 203 204 acpi_ac_dir);
+525
drivers/acpi/acpi_ipmi.c
··· 1 + /* 2 + * acpi_ipmi.c - ACPI IPMI opregion 3 + * 4 + * Copyright (C) 2010 Intel Corporation 5 + * Copyright (C) 2010 Zhao Yakui <yakui.zhao@intel.com> 6 + * 7 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 8 + * 9 + * This program is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License as published by 11 + * the Free Software Foundation; either version 2 of the License, or (at 12 + * your option) any later version. 13 + * 14 + * This program is distributed in the hope that it will be useful, but 15 + * WITHOUT ANY WARRANTY; without even the implied warranty of 16 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 + * General Public License for more details. 18 + * 19 + * You should have received a copy of the GNU General Public License along 20 + * with this program; if not, write to the Free Software Foundation, Inc., 21 + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 22 + * 23 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 24 + */ 25 + 26 + #include <linux/kernel.h> 27 + #include <linux/module.h> 28 + #include <linux/init.h> 29 + #include <linux/types.h> 30 + #include <linux/delay.h> 31 + #include <linux/proc_fs.h> 32 + #include <linux/seq_file.h> 33 + #include <linux/interrupt.h> 34 + #include <linux/list.h> 35 + #include <linux/spinlock.h> 36 + #include <linux/io.h> 37 + #include <acpi/acpi_bus.h> 38 + #include <acpi/acpi_drivers.h> 39 + #include <linux/ipmi.h> 40 + #include <linux/device.h> 41 + #include <linux/pnp.h> 42 + 43 + MODULE_AUTHOR("Zhao Yakui"); 44 + MODULE_DESCRIPTION("ACPI IPMI Opregion driver"); 45 + MODULE_LICENSE("GPL"); 46 + 47 + #define IPMI_FLAGS_HANDLER_INSTALL 0 48 + 49 + #define ACPI_IPMI_OK 0 50 + #define ACPI_IPMI_TIMEOUT 0x10 51 + #define ACPI_IPMI_UNKNOWN 0x07 52 + /* the IPMI timeout is 5s */ 53 + #define IPMI_TIMEOUT (5 * HZ) 54 + 55 + struct acpi_ipmi_device { 56 + /* the device list attached to driver_data.ipmi_devices */ 57 + struct list_head head; 58 + /* the IPMI request message list */ 59 + struct list_head tx_msg_list; 60 + struct mutex tx_msg_lock; 61 + acpi_handle handle; 62 + struct pnp_dev *pnp_dev; 63 + ipmi_user_t user_interface; 64 + int ipmi_ifnum; /* IPMI interface number */ 65 + long curr_msgid; 66 + unsigned long flags; 67 + struct ipmi_smi_info smi_data; 68 + }; 69 + 70 + struct ipmi_driver_data { 71 + struct list_head ipmi_devices; 72 + struct ipmi_smi_watcher bmc_events; 73 + struct ipmi_user_hndl ipmi_hndlrs; 74 + struct mutex ipmi_lock; 75 + }; 76 + 77 + struct acpi_ipmi_msg { 78 + struct list_head head; 79 + /* 80 + * General speaking the addr type should be SI_ADDR_TYPE. And 81 + * the addr channel should be BMC. 82 + * In fact it can also be IPMB type. But we will have to 83 + * parse it from the Netfn command buffer. It is so complex 84 + * that it is skipped. 85 + */ 86 + struct ipmi_addr addr; 87 + long tx_msgid; 88 + /* it is used to track whether the IPMI message is finished */ 89 + struct completion tx_complete; 90 + struct kernel_ipmi_msg tx_message; 91 + int msg_done; 92 + /* tx data . And copy it from ACPI object buffer */ 93 + u8 tx_data[64]; 94 + int tx_len; 95 + u8 rx_data[64]; 96 + int rx_len; 97 + struct acpi_ipmi_device *device; 98 + }; 99 + 100 + /* IPMI request/response buffer per ACPI 4.0, sec 5.5.2.4.3.2 */ 101 + struct acpi_ipmi_buffer { 102 + u8 status; 103 + u8 length; 104 + u8 data[64]; 105 + }; 106 + 107 + static void ipmi_register_bmc(int iface, struct device *dev); 108 + static void ipmi_bmc_gone(int iface); 109 + static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data); 110 + static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device); 111 + static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device); 112 + 113 + static struct ipmi_driver_data driver_data = { 114 + .ipmi_devices = LIST_HEAD_INIT(driver_data.ipmi_devices), 115 + .bmc_events = { 116 + .owner = THIS_MODULE, 117 + .new_smi = ipmi_register_bmc, 118 + .smi_gone = ipmi_bmc_gone, 119 + }, 120 + .ipmi_hndlrs = { 121 + .ipmi_recv_hndl = ipmi_msg_handler, 122 + }, 123 + }; 124 + 125 + static struct acpi_ipmi_msg *acpi_alloc_ipmi_msg(struct acpi_ipmi_device *ipmi) 126 + { 127 + struct acpi_ipmi_msg *ipmi_msg; 128 + struct pnp_dev *pnp_dev = ipmi->pnp_dev; 129 + 130 + ipmi_msg = kzalloc(sizeof(struct acpi_ipmi_msg), GFP_KERNEL); 131 + if (!ipmi_msg) { 132 + dev_warn(&pnp_dev->dev, "Can't allocate memory for ipmi_msg\n"); 133 + return NULL; 134 + } 135 + init_completion(&ipmi_msg->tx_complete); 136 + INIT_LIST_HEAD(&ipmi_msg->head); 137 + ipmi_msg->device = ipmi; 138 + return ipmi_msg; 139 + } 140 + 141 + #define IPMI_OP_RGN_NETFN(offset) ((offset >> 8) & 0xff) 142 + #define IPMI_OP_RGN_CMD(offset) (offset & 0xff) 143 + static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg, 144 + acpi_physical_address address, 145 + acpi_integer *value) 146 + { 147 + struct kernel_ipmi_msg *msg; 148 + struct acpi_ipmi_buffer *buffer; 149 + struct acpi_ipmi_device *device; 150 + 151 + msg = &tx_msg->tx_message; 152 + /* 153 + * IPMI network function and command are encoded in the address 154 + * within the IPMI OpRegion; see ACPI 4.0, sec 5.5.2.4.3. 155 + */ 156 + msg->netfn = IPMI_OP_RGN_NETFN(address); 157 + msg->cmd = IPMI_OP_RGN_CMD(address); 158 + msg->data = tx_msg->tx_data; 159 + /* 160 + * value is the parameter passed by the IPMI opregion space handler. 161 + * It points to the IPMI request message buffer 162 + */ 163 + buffer = (struct acpi_ipmi_buffer *)value; 164 + /* copy the tx message data */ 165 + msg->data_len = buffer->length; 166 + memcpy(tx_msg->tx_data, buffer->data, msg->data_len); 167 + /* 168 + * now the default type is SYSTEM_INTERFACE and channel type is BMC. 169 + * If the netfn is APP_REQUEST and the cmd is SEND_MESSAGE, 170 + * the addr type should be changed to IPMB. Then we will have to parse 171 + * the IPMI request message buffer to get the IPMB address. 172 + * If so, please fix me. 173 + */ 174 + tx_msg->addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; 175 + tx_msg->addr.channel = IPMI_BMC_CHANNEL; 176 + tx_msg->addr.data[0] = 0; 177 + 178 + /* Get the msgid */ 179 + device = tx_msg->device; 180 + mutex_lock(&device->tx_msg_lock); 181 + device->curr_msgid++; 182 + tx_msg->tx_msgid = device->curr_msgid; 183 + mutex_unlock(&device->tx_msg_lock); 184 + } 185 + 186 + static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg, 187 + acpi_integer *value, int rem_time) 188 + { 189 + struct acpi_ipmi_buffer *buffer; 190 + 191 + /* 192 + * value is also used as output parameter. It represents the response 193 + * IPMI message returned by IPMI command. 194 + */ 195 + buffer = (struct acpi_ipmi_buffer *)value; 196 + if (!rem_time && !msg->msg_done) { 197 + buffer->status = ACPI_IPMI_TIMEOUT; 198 + return; 199 + } 200 + /* 201 + * If the flag of msg_done is not set or the recv length is zero, it 202 + * means that the IPMI command is not executed correctly. 203 + * The status code will be ACPI_IPMI_UNKNOWN. 204 + */ 205 + if (!msg->msg_done || !msg->rx_len) { 206 + buffer->status = ACPI_IPMI_UNKNOWN; 207 + return; 208 + } 209 + /* 210 + * If the IPMI response message is obtained correctly, the status code 211 + * will be ACPI_IPMI_OK 212 + */ 213 + buffer->status = ACPI_IPMI_OK; 214 + buffer->length = msg->rx_len; 215 + memcpy(buffer->data, msg->rx_data, msg->rx_len); 216 + } 217 + 218 + static void ipmi_flush_tx_msg(struct acpi_ipmi_device *ipmi) 219 + { 220 + struct acpi_ipmi_msg *tx_msg, *temp; 221 + int count = HZ / 10; 222 + struct pnp_dev *pnp_dev = ipmi->pnp_dev; 223 + 224 + list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) { 225 + /* wake up the sleep thread on the Tx msg */ 226 + complete(&tx_msg->tx_complete); 227 + } 228 + 229 + /* wait for about 100ms to flush the tx message list */ 230 + while (count--) { 231 + if (list_empty(&ipmi->tx_msg_list)) 232 + break; 233 + schedule_timeout(1); 234 + } 235 + if (!list_empty(&ipmi->tx_msg_list)) 236 + dev_warn(&pnp_dev->dev, "tx msg list is not NULL\n"); 237 + } 238 + 239 + static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) 240 + { 241 + struct acpi_ipmi_device *ipmi_device = user_msg_data; 242 + int msg_found = 0; 243 + struct acpi_ipmi_msg *tx_msg; 244 + struct pnp_dev *pnp_dev = ipmi_device->pnp_dev; 245 + 246 + if (msg->user != ipmi_device->user_interface) { 247 + dev_warn(&pnp_dev->dev, "Unexpected response is returned. " 248 + "returned user %p, expected user %p\n", 249 + msg->user, ipmi_device->user_interface); 250 + ipmi_free_recv_msg(msg); 251 + return; 252 + } 253 + mutex_lock(&ipmi_device->tx_msg_lock); 254 + list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) { 255 + if (msg->msgid == tx_msg->tx_msgid) { 256 + msg_found = 1; 257 + break; 258 + } 259 + } 260 + 261 + mutex_unlock(&ipmi_device->tx_msg_lock); 262 + if (!msg_found) { 263 + dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is " 264 + "returned.\n", msg->msgid); 265 + ipmi_free_recv_msg(msg); 266 + return; 267 + } 268 + 269 + if (msg->msg.data_len) { 270 + /* copy the response data to Rx_data buffer */ 271 + memcpy(tx_msg->rx_data, msg->msg_data, msg->msg.data_len); 272 + tx_msg->rx_len = msg->msg.data_len; 273 + tx_msg->msg_done = 1; 274 + } 275 + complete(&tx_msg->tx_complete); 276 + ipmi_free_recv_msg(msg); 277 + }; 278 + 279 + static void ipmi_register_bmc(int iface, struct device *dev) 280 + { 281 + struct acpi_ipmi_device *ipmi_device, *temp; 282 + struct pnp_dev *pnp_dev; 283 + ipmi_user_t user; 284 + int err; 285 + struct ipmi_smi_info smi_data; 286 + acpi_handle handle; 287 + 288 + err = ipmi_get_smi_info(iface, &smi_data); 289 + 290 + if (err) 291 + return; 292 + 293 + if (smi_data.addr_src != SI_ACPI) { 294 + put_device(smi_data.dev); 295 + return; 296 + } 297 + 298 + handle = smi_data.addr_info.acpi_info.acpi_handle; 299 + 300 + mutex_lock(&driver_data.ipmi_lock); 301 + list_for_each_entry(temp, &driver_data.ipmi_devices, head) { 302 + /* 303 + * if the corresponding ACPI handle is already added 304 + * to the device list, don't add it again. 305 + */ 306 + if (temp->handle == handle) 307 + goto out; 308 + } 309 + 310 + ipmi_device = kzalloc(sizeof(*ipmi_device), GFP_KERNEL); 311 + 312 + if (!ipmi_device) 313 + goto out; 314 + 315 + pnp_dev = to_pnp_dev(smi_data.dev); 316 + ipmi_device->handle = handle; 317 + ipmi_device->pnp_dev = pnp_dev; 318 + 319 + err = ipmi_create_user(iface, &driver_data.ipmi_hndlrs, 320 + ipmi_device, &user); 321 + if (err) { 322 + dev_warn(&pnp_dev->dev, "Can't create IPMI user interface\n"); 323 + kfree(ipmi_device); 324 + goto out; 325 + } 326 + acpi_add_ipmi_device(ipmi_device); 327 + ipmi_device->user_interface = user; 328 + ipmi_device->ipmi_ifnum = iface; 329 + mutex_unlock(&driver_data.ipmi_lock); 330 + memcpy(&ipmi_device->smi_data, &smi_data, sizeof(struct ipmi_smi_info)); 331 + return; 332 + 333 + out: 334 + mutex_unlock(&driver_data.ipmi_lock); 335 + put_device(smi_data.dev); 336 + return; 337 + } 338 + 339 + static void ipmi_bmc_gone(int iface) 340 + { 341 + struct acpi_ipmi_device *ipmi_device, *temp; 342 + 343 + mutex_lock(&driver_data.ipmi_lock); 344 + list_for_each_entry_safe(ipmi_device, temp, 345 + &driver_data.ipmi_devices, head) { 346 + if (ipmi_device->ipmi_ifnum != iface) 347 + continue; 348 + 349 + acpi_remove_ipmi_device(ipmi_device); 350 + put_device(ipmi_device->smi_data.dev); 351 + kfree(ipmi_device); 352 + break; 353 + } 354 + mutex_unlock(&driver_data.ipmi_lock); 355 + } 356 + /* -------------------------------------------------------------------------- 357 + * Address Space Management 358 + * -------------------------------------------------------------------------- */ 359 + /* 360 + * This is the IPMI opregion space handler. 361 + * @function: indicates the read/write. In fact as the IPMI message is driven 362 + * by command, only write is meaningful. 363 + * @address: This contains the netfn/command of IPMI request message. 364 + * @bits : not used. 365 + * @value : it is an in/out parameter. It points to the IPMI message buffer. 366 + * Before the IPMI message is sent, it represents the actual request 367 + * IPMI message. After the IPMI message is finished, it represents 368 + * the response IPMI message returned by IPMI command. 369 + * @handler_context: IPMI device context. 370 + */ 371 + 372 + static acpi_status 373 + acpi_ipmi_space_handler(u32 function, acpi_physical_address address, 374 + u32 bits, acpi_integer *value, 375 + void *handler_context, void *region_context) 376 + { 377 + struct acpi_ipmi_msg *tx_msg; 378 + struct acpi_ipmi_device *ipmi_device = handler_context; 379 + int err, rem_time; 380 + acpi_status status; 381 + /* 382 + * IPMI opregion message. 383 + * IPMI message is firstly written to the BMC and system software 384 + * can get the respsonse. So it is unmeaningful for the read access 385 + * of IPMI opregion. 386 + */ 387 + if ((function & ACPI_IO_MASK) == ACPI_READ) 388 + return AE_TYPE; 389 + 390 + if (!ipmi_device->user_interface) 391 + return AE_NOT_EXIST; 392 + 393 + tx_msg = acpi_alloc_ipmi_msg(ipmi_device); 394 + if (!tx_msg) 395 + return AE_NO_MEMORY; 396 + 397 + acpi_format_ipmi_msg(tx_msg, address, value); 398 + mutex_lock(&ipmi_device->tx_msg_lock); 399 + list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list); 400 + mutex_unlock(&ipmi_device->tx_msg_lock); 401 + err = ipmi_request_settime(ipmi_device->user_interface, 402 + &tx_msg->addr, 403 + tx_msg->tx_msgid, 404 + &tx_msg->tx_message, 405 + NULL, 0, 0, 0); 406 + if (err) { 407 + status = AE_ERROR; 408 + goto end_label; 409 + } 410 + rem_time = wait_for_completion_timeout(&tx_msg->tx_complete, 411 + IPMI_TIMEOUT); 412 + acpi_format_ipmi_response(tx_msg, value, rem_time); 413 + status = AE_OK; 414 + 415 + end_label: 416 + mutex_lock(&ipmi_device->tx_msg_lock); 417 + list_del(&tx_msg->head); 418 + mutex_unlock(&ipmi_device->tx_msg_lock); 419 + kfree(tx_msg); 420 + return status; 421 + } 422 + 423 + static void ipmi_remove_space_handler(struct acpi_ipmi_device *ipmi) 424 + { 425 + if (!test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags)) 426 + return; 427 + 428 + acpi_remove_address_space_handler(ipmi->handle, 429 + ACPI_ADR_SPACE_IPMI, &acpi_ipmi_space_handler); 430 + 431 + clear_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags); 432 + } 433 + 434 + static int ipmi_install_space_handler(struct acpi_ipmi_device *ipmi) 435 + { 436 + acpi_status status; 437 + 438 + if (test_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags)) 439 + return 0; 440 + 441 + status = acpi_install_address_space_handler(ipmi->handle, 442 + ACPI_ADR_SPACE_IPMI, 443 + &acpi_ipmi_space_handler, 444 + NULL, ipmi); 445 + if (ACPI_FAILURE(status)) { 446 + struct pnp_dev *pnp_dev = ipmi->pnp_dev; 447 + dev_warn(&pnp_dev->dev, "Can't register IPMI opregion space " 448 + "handle\n"); 449 + return -EINVAL; 450 + } 451 + set_bit(IPMI_FLAGS_HANDLER_INSTALL, &ipmi->flags); 452 + return 0; 453 + } 454 + 455 + static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device) 456 + { 457 + 458 + INIT_LIST_HEAD(&ipmi_device->head); 459 + 460 + mutex_init(&ipmi_device->tx_msg_lock); 461 + INIT_LIST_HEAD(&ipmi_device->tx_msg_list); 462 + ipmi_install_space_handler(ipmi_device); 463 + 464 + list_add_tail(&ipmi_device->head, &driver_data.ipmi_devices); 465 + } 466 + 467 + static void acpi_remove_ipmi_device(struct acpi_ipmi_device *ipmi_device) 468 + { 469 + /* 470 + * If the IPMI user interface is created, it should be 471 + * destroyed. 472 + */ 473 + if (ipmi_device->user_interface) { 474 + ipmi_destroy_user(ipmi_device->user_interface); 475 + ipmi_device->user_interface = NULL; 476 + } 477 + /* flush the Tx_msg list */ 478 + if (!list_empty(&ipmi_device->tx_msg_list)) 479 + ipmi_flush_tx_msg(ipmi_device); 480 + 481 + list_del(&ipmi_device->head); 482 + ipmi_remove_space_handler(ipmi_device); 483 + } 484 + 485 + static int __init acpi_ipmi_init(void) 486 + { 487 + int result = 0; 488 + 489 + if (acpi_disabled) 490 + return result; 491 + 492 + mutex_init(&driver_data.ipmi_lock); 493 + 494 + result = ipmi_smi_watcher_register(&driver_data.bmc_events); 495 + 496 + return result; 497 + } 498 + 499 + static void __exit acpi_ipmi_exit(void) 500 + { 501 + struct acpi_ipmi_device *ipmi_device, *temp; 502 + 503 + if (acpi_disabled) 504 + return; 505 + 506 + ipmi_smi_watcher_unregister(&driver_data.bmc_events); 507 + 508 + /* 509 + * When one smi_watcher is unregistered, it is only deleted 510 + * from the smi_watcher list. But the smi_gone callback function 511 + * is not called. So explicitly uninstall the ACPI IPMI oregion 512 + * handler and free it. 513 + */ 514 + mutex_lock(&driver_data.ipmi_lock); 515 + list_for_each_entry_safe(ipmi_device, temp, 516 + &driver_data.ipmi_devices, head) { 517 + acpi_remove_ipmi_device(ipmi_device); 518 + put_device(ipmi_device->smi_data.dev); 519 + kfree(ipmi_device); 520 + } 521 + mutex_unlock(&driver_data.ipmi_lock); 522 + } 523 + 524 + module_init(acpi_ipmi_init); 525 + module_exit(acpi_ipmi_exit);
+1 -1
drivers/acpi/acpica/Makefile
··· 14 14 15 15 acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \ 16 16 evmisc.o evrgnini.o evxface.o evxfregn.o \ 17 - evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o 17 + evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o 18 18 19 19 acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\ 20 20 exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\
+11 -10
drivers/acpi/acpica/acevents.h
··· 51 51 52 52 acpi_status acpi_ev_install_xrupt_handlers(void); 53 53 54 - acpi_status acpi_ev_install_fadt_gpes(void); 55 - 56 54 u32 acpi_ev_fixed_event_detect(void); 57 55 58 56 /* ··· 80 82 81 83 acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); 82 84 83 - acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info); 85 + acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info); 84 86 85 - acpi_status acpi_raw_disable_gpe(struct acpi_gpe_event_info *gpe_event_info); 87 + acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info); 86 88 87 89 struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, 88 90 u32 gpe_number); ··· 90 92 struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number, 91 93 struct acpi_gpe_block_info 92 94 *gpe_block); 95 + 96 + acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info); 93 97 94 98 /* 95 99 * evgpeblk - Upper-level GPE block support ··· 107 107 acpi_status 108 108 acpi_ev_initialize_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info, 109 109 struct acpi_gpe_block_info *gpe_block, 110 - void *ignored); 110 + void *context); 111 111 112 112 acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block); 113 113 114 114 u32 115 - acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, 115 + acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, 116 + struct acpi_gpe_event_info *gpe_event_info, 116 117 u32 gpe_number); 117 118 118 119 /* ··· 127 126 acpi_ev_match_gpe_method(acpi_handle obj_handle, 128 127 u32 level, void *context, void **return_value); 129 128 130 - acpi_status 131 - acpi_ev_match_prw_and_gpe(acpi_handle obj_handle, 132 - u32 level, void *context, void **return_value); 133 - 134 129 /* 135 130 * evgpeutil - GPE utilities 136 131 */ ··· 134 137 acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context); 135 138 136 139 u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info); 140 + 141 + acpi_status 142 + acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, 143 + struct acpi_gpe_block_info *gpe_block, void *context); 137 144 138 145 struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number); 139 146
+6 -1
drivers/acpi/acpica/acglobal.h
··· 146 146 147 147 extern u32 acpi_gbl_nesting_level; 148 148 149 + ACPI_EXTERN u32 acpi_gpe_count; 150 + ACPI_EXTERN u32 acpi_fixed_event_count[ACPI_NUM_FIXED_EVENTS]; 151 + 149 152 /* Support for dynamic control method tracing mechanism */ 150 153 151 154 ACPI_EXTERN u32 acpi_gbl_original_dbg_level; ··· 373 370 ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head; 374 371 ACPI_EXTERN struct acpi_gpe_block_info 375 372 *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS]; 376 - ACPI_EXTERN u8 acpi_all_gpes_initialized; 373 + ACPI_EXTERN u8 acpi_gbl_all_gpes_initialized; 374 + ACPI_EXTERN ACPI_GBL_EVENT_HANDLER acpi_gbl_global_event_handler; 375 + ACPI_EXTERN void *acpi_gbl_global_event_handler_context; 377 376 378 377 /***************************************************************************** 379 378 *
+1 -1
drivers/acpi/acpica/achware.h
··· 94 94 struct acpi_gpe_register_info *gpe_register_info); 95 95 96 96 acpi_status 97 - acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action); 97 + acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action); 98 98 99 99 acpi_status 100 100 acpi_hw_disable_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+7 -6
drivers/acpi/acpica/aclocal.h
··· 408 408 409 409 /* Dispatch info for each GPE -- either a method or handler, cannot be both */ 410 410 411 - struct acpi_handler_info { 412 - acpi_event_handler address; /* Address of handler, if any */ 411 + struct acpi_gpe_handler_info { 412 + acpi_gpe_handler address; /* Address of handler, if any */ 413 413 void *context; /* Context to be passed to handler */ 414 414 struct acpi_namespace_node *method_node; /* Method node for this GPE level (saved) */ 415 - u8 orig_flags; /* Original misc info about this GPE */ 416 - u8 orig_enabled; /* Set if the GPE was originally enabled */ 415 + u8 original_flags; /* Original (pre-handler) GPE info */ 416 + u8 originally_enabled; /* True if GPE was originally enabled */ 417 417 }; 418 418 419 419 union acpi_gpe_dispatch_info { 420 420 struct acpi_namespace_node *method_node; /* Method node for this GPE level */ 421 - struct acpi_handler_info *handler; 421 + struct acpi_gpe_handler_info *handler; /* Installed GPE handler */ 422 + struct acpi_namespace_node *device_node; /* Parent _PRW device for implicit notify */ 422 423 }; 423 424 424 425 /* ··· 459 458 u32 register_count; /* Number of register pairs in block */ 460 459 u16 gpe_count; /* Number of individual GPEs in block */ 461 460 u8 block_base_number; /* Base GPE number for this block */ 462 - u8 initialized; /* If set, the GPE block has been initialized */ 461 + u8 initialized; /* TRUE if this block is initialized */ 463 462 }; 464 463 465 464 /* Information about GPE interrupt handlers, one per each interrupt level used for GPEs */
+10 -2
drivers/acpi/acpica/evevent.c
··· 217 217 status_bit_mask) 218 218 && (fixed_enable & acpi_gbl_fixed_event_info[i]. 219 219 enable_bit_mask)) { 220 + /* 221 + * Found an active (signalled) event. Invoke global event 222 + * handler if present. 223 + */ 224 + acpi_fixed_event_count[i]++; 225 + if (acpi_gbl_global_event_handler) { 226 + acpi_gbl_global_event_handler 227 + (ACPI_EVENT_TYPE_FIXED, NULL, i, 228 + acpi_gbl_global_event_handler_context); 229 + } 220 230 221 - /* Found an active (signalled) event */ 222 - acpi_os_fixed_event_count(i); 223 231 int_status |= acpi_ev_fixed_event_dispatch(i); 224 232 } 225 233 }
+174 -91
drivers/acpi/acpica/evgpe.c
··· 52 52 /* Local prototypes */ 53 53 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context); 54 54 55 + static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context); 56 + 55 57 /******************************************************************************* 56 58 * 57 59 * FUNCTION: acpi_ev_update_gpe_enable_mask ··· 104 102 * 105 103 * RETURN: Status 106 104 * 107 - * DESCRIPTION: Clear the given GPE from stale events and enable it. 105 + * DESCRIPTION: Clear a GPE of stale events and enable it. 108 106 * 109 107 ******************************************************************************/ 110 108 acpi_status ··· 115 113 ACPI_FUNCTION_TRACE(ev_enable_gpe); 116 114 117 115 /* 118 - * We will only allow a GPE to be enabled if it has either an 119 - * associated method (_Lxx/_Exx) or a handler. Otherwise, the 120 - * GPE will be immediately disabled by acpi_ev_gpe_dispatch the 121 - * first time it fires. 116 + * We will only allow a GPE to be enabled if it has either an associated 117 + * method (_Lxx/_Exx) or a handler, or is using the implicit notify 118 + * feature. Otherwise, the GPE will be immediately disabled by 119 + * acpi_ev_gpe_dispatch the first time it fires. 122 120 */ 123 - if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) { 121 + if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 122 + ACPI_GPE_DISPATCH_NONE) { 124 123 return_ACPI_STATUS(AE_NO_HANDLER); 125 124 } 126 125 ··· 140 137 141 138 /******************************************************************************* 142 139 * 143 - * FUNCTION: acpi_raw_enable_gpe 140 + * FUNCTION: acpi_ev_add_gpe_reference 144 141 * 145 - * PARAMETERS: gpe_event_info - GPE to enable 142 + * PARAMETERS: gpe_event_info - Add a reference to this GPE 146 143 * 147 144 * RETURN: Status 148 145 * ··· 151 148 * 152 149 ******************************************************************************/ 153 150 154 - acpi_status acpi_raw_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) 151 + acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) 155 152 { 156 153 acpi_status status = AE_OK; 154 + 155 + ACPI_FUNCTION_TRACE(ev_add_gpe_reference); 157 156 158 157 if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) { 159 158 return_ACPI_STATUS(AE_LIMIT); ··· 163 158 164 159 gpe_event_info->runtime_count++; 165 160 if (gpe_event_info->runtime_count == 1) { 161 + 162 + /* Enable on first reference */ 163 + 166 164 status = acpi_ev_update_gpe_enable_mask(gpe_event_info); 167 165 if (ACPI_SUCCESS(status)) { 168 166 status = acpi_ev_enable_gpe(gpe_event_info); ··· 181 173 182 174 /******************************************************************************* 183 175 * 184 - * FUNCTION: acpi_raw_disable_gpe 176 + * FUNCTION: acpi_ev_remove_gpe_reference 185 177 * 186 - * PARAMETERS: gpe_event_info - GPE to disable 178 + * PARAMETERS: gpe_event_info - Remove a reference to this GPE 187 179 * 188 180 * RETURN: Status 189 181 * ··· 192 184 * 193 185 ******************************************************************************/ 194 186 195 - acpi_status acpi_raw_disable_gpe(struct acpi_gpe_event_info *gpe_event_info) 187 + acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) 196 188 { 197 189 acpi_status status = AE_OK; 190 + 191 + ACPI_FUNCTION_TRACE(ev_remove_gpe_reference); 198 192 199 193 if (!gpe_event_info->runtime_count) { 200 194 return_ACPI_STATUS(AE_LIMIT); ··· 204 194 205 195 gpe_event_info->runtime_count--; 206 196 if (!gpe_event_info->runtime_count) { 197 + 198 + /* Disable on last reference */ 199 + 207 200 status = acpi_ev_update_gpe_enable_mask(gpe_event_info); 208 201 if (ACPI_SUCCESS(status)) { 209 202 status = acpi_hw_low_set_gpe(gpe_event_info, ··· 392 379 } 393 380 394 381 ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, 395 - "Read GPE Register at GPE%X: Status=%02X, Enable=%02X\n", 382 + "Read GPE Register at GPE%02X: Status=%02X, Enable=%02X\n", 396 383 gpe_register_info->base_gpe_number, 397 384 status_reg, enable_reg)); 398 385 ··· 418 405 * or method. 419 406 */ 420 407 int_status |= 421 - acpi_ev_gpe_dispatch(&gpe_block-> 408 + acpi_ev_gpe_dispatch(gpe_block-> 409 + node, 410 + &gpe_block-> 422 411 event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number); 423 412 } 424 413 } ··· 450 435 * an interrupt handler. 451 436 * 452 437 ******************************************************************************/ 453 - static void acpi_ev_asynch_enable_gpe(void *context); 454 438 455 439 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) 456 440 { 457 - struct acpi_gpe_event_info *gpe_event_info = (void *)context; 441 + struct acpi_gpe_event_info *gpe_event_info = context; 458 442 acpi_status status; 459 - struct acpi_gpe_event_info local_gpe_event_info; 443 + struct acpi_gpe_event_info *local_gpe_event_info; 460 444 struct acpi_evaluate_info *info; 461 445 462 446 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); 447 + 448 + /* Allocate a local GPE block */ 449 + 450 + local_gpe_event_info = 451 + ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info)); 452 + if (!local_gpe_event_info) { 453 + ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE")); 454 + return_VOID; 455 + } 463 456 464 457 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); 465 458 if (ACPI_FAILURE(status)) { ··· 485 462 * Take a snapshot of the GPE info for this level - we copy the info to 486 463 * prevent a race condition with remove_handler/remove_block. 487 464 */ 488 - ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info, 465 + ACPI_MEMCPY(local_gpe_event_info, gpe_event_info, 489 466 sizeof(struct acpi_gpe_event_info)); 490 467 491 468 status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); ··· 493 470 return_VOID; 494 471 } 495 472 496 - /* 497 - * Must check for control method type dispatch one more time to avoid a 498 - * race with ev_gpe_install_handler 499 - */ 500 - if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) == 501 - ACPI_GPE_DISPATCH_METHOD) { 473 + /* Do the correct dispatch - normal method or implicit notify */ 474 + 475 + switch (local_gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { 476 + case ACPI_GPE_DISPATCH_NOTIFY: 477 + 478 + /* 479 + * Implicit notify. 480 + * Dispatch a DEVICE_WAKE notify to the appropriate handler. 481 + * NOTE: the request is queued for execution after this method 482 + * completes. The notify handlers are NOT invoked synchronously 483 + * from this thread -- because handlers may in turn run other 484 + * control methods. 485 + */ 486 + status = 487 + acpi_ev_queue_notify_request(local_gpe_event_info->dispatch. 488 + device_node, 489 + ACPI_NOTIFY_DEVICE_WAKE); 490 + break; 491 + 492 + case ACPI_GPE_DISPATCH_METHOD: 502 493 503 494 /* Allocate the evaluation information block */ 504 495 ··· 525 488 * control method that corresponds to this GPE 526 489 */ 527 490 info->prefix_node = 528 - local_gpe_event_info.dispatch.method_node; 491 + local_gpe_event_info->dispatch.method_node; 529 492 info->flags = ACPI_IGNORE_RETURN_VALUE; 530 493 531 494 status = acpi_ns_evaluate(info); ··· 536 499 ACPI_EXCEPTION((AE_INFO, status, 537 500 "while evaluating GPE method [%4.4s]", 538 501 acpi_ut_get_node_name 539 - (local_gpe_event_info.dispatch. 502 + (local_gpe_event_info->dispatch. 540 503 method_node))); 541 504 } 505 + 506 + break; 507 + 508 + default: 509 + return_VOID; /* Should never happen */ 542 510 } 511 + 543 512 /* Defer enabling of GPE until all notify handlers are done */ 544 - acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe, 545 - gpe_event_info); 513 + 514 + status = acpi_os_execute(OSL_NOTIFY_HANDLER, 515 + acpi_ev_asynch_enable_gpe, 516 + local_gpe_event_info); 517 + if (ACPI_FAILURE(status)) { 518 + ACPI_FREE(local_gpe_event_info); 519 + } 546 520 return_VOID; 547 521 } 548 522 549 - static void acpi_ev_asynch_enable_gpe(void *context) 523 + 524 + /******************************************************************************* 525 + * 526 + * FUNCTION: acpi_ev_asynch_enable_gpe 527 + * 528 + * PARAMETERS: Context (gpe_event_info) - Info for this GPE 529 + * Callback from acpi_os_execute 530 + * 531 + * RETURN: None 532 + * 533 + * DESCRIPTION: Asynchronous clear/enable for GPE. This allows the GPE to 534 + * complete (i.e., finish execution of Notify) 535 + * 536 + ******************************************************************************/ 537 + 538 + static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context) 550 539 { 551 540 struct acpi_gpe_event_info *gpe_event_info = context; 541 + 542 + (void)acpi_ev_finish_gpe(gpe_event_info); 543 + 544 + ACPI_FREE(gpe_event_info); 545 + return; 546 + } 547 + 548 + 549 + /******************************************************************************* 550 + * 551 + * FUNCTION: acpi_ev_finish_gpe 552 + * 553 + * PARAMETERS: gpe_event_info - Info for this GPE 554 + * 555 + * RETURN: Status 556 + * 557 + * DESCRIPTION: Clear/Enable a GPE. Common code that is used after execution 558 + * of a GPE method or a synchronous or asynchronous GPE handler. 559 + * 560 + ******************************************************************************/ 561 + 562 + acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info) 563 + { 552 564 acpi_status status; 565 + 553 566 if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == 554 567 ACPI_GPE_LEVEL_TRIGGERED) { 555 568 /* 556 - * GPE is level-triggered, we clear the GPE status bit after handling 557 - * the event. 569 + * GPE is level-triggered, we clear the GPE status bit after 570 + * handling the event. 558 571 */ 559 572 status = acpi_hw_clear_gpe(gpe_event_info); 560 573 if (ACPI_FAILURE(status)) { 561 - return_VOID; 574 + return (status); 562 575 } 563 576 } 564 577 565 578 /* 566 - * Enable this GPE, conditionally. This means that the GPE will only be 567 - * physically enabled if the enable_for_run bit is set in the event_info 579 + * Enable this GPE, conditionally. This means that the GPE will 580 + * only be physically enabled if the enable_for_run bit is set 581 + * in the event_info. 568 582 */ 569 - (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_COND_ENABLE); 570 - 571 - return_VOID; 583 + (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE); 584 + return (AE_OK); 572 585 } 586 + 573 587 574 588 /******************************************************************************* 575 589 * 576 590 * FUNCTION: acpi_ev_gpe_dispatch 577 591 * 578 - * PARAMETERS: gpe_event_info - Info for this GPE 592 + * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 593 + * gpe_event_info - Info for this GPE 579 594 * gpe_number - Number relative to the parent GPE block 580 595 * 581 596 * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED ··· 640 551 ******************************************************************************/ 641 552 642 553 u32 643 - acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) 554 + acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, 555 + struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) 644 556 { 645 557 acpi_status status; 558 + u32 return_value; 646 559 647 560 ACPI_FUNCTION_TRACE(ev_gpe_dispatch); 648 561 649 - acpi_os_gpe_count(gpe_number); 562 + /* Invoke global event handler if present */ 563 + 564 + acpi_gpe_count++; 565 + if (acpi_gbl_global_event_handler) { 566 + acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device, 567 + gpe_number, 568 + acpi_gbl_global_event_handler_context); 569 + } 650 570 651 571 /* 652 572 * If edge-triggered, clear the GPE status bit now. Note that ··· 666 568 status = acpi_hw_clear_gpe(gpe_event_info); 667 569 if (ACPI_FAILURE(status)) { 668 570 ACPI_EXCEPTION((AE_INFO, status, 669 - "Unable to clear GPE[0x%2X]", 670 - gpe_number)); 571 + "Unable to clear GPE%02X", gpe_number)); 671 572 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); 672 573 } 673 574 } 674 575 675 576 /* 676 - * Dispatch the GPE to either an installed handler, or the control method 677 - * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke 678 - * it and do not attempt to run the method. If there is neither a handler 679 - * nor a method, we disable this GPE to prevent further such pointless 680 - * events from firing. 577 + * Always disable the GPE so that it does not keep firing before 578 + * any asynchronous activity completes (either from the execution 579 + * of a GPE method or an asynchronous GPE handler.) 580 + * 581 + * If there is no handler or method to run, just disable the 582 + * GPE and leave it disabled permanently to prevent further such 583 + * pointless events from firing. 584 + */ 585 + status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); 586 + if (ACPI_FAILURE(status)) { 587 + ACPI_EXCEPTION((AE_INFO, status, 588 + "Unable to disable GPE%02X", gpe_number)); 589 + return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); 590 + } 591 + 592 + /* 593 + * Dispatch the GPE to either an installed handler or the control 594 + * method associated with this GPE (_Lxx or _Exx). If a handler 595 + * exists, we invoke it and do not attempt to run the method. 596 + * If there is neither a handler nor a method, leave the GPE 597 + * disabled. 681 598 */ 682 599 switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { 683 600 case ACPI_GPE_DISPATCH_HANDLER: 684 601 685 - /* 686 - * Invoke the installed handler (at interrupt level) 687 - * Ignore return status for now. 688 - * TBD: leave GPE disabled on error? 689 - */ 690 - (void)gpe_event_info->dispatch.handler->address(gpe_event_info-> 691 - dispatch. 692 - handler-> 693 - context); 602 + /* Invoke the installed handler (at interrupt level) */ 694 603 695 - /* It is now safe to clear level-triggered events. */ 604 + return_value = 605 + gpe_event_info->dispatch.handler->address(gpe_device, 606 + gpe_number, 607 + gpe_event_info-> 608 + dispatch.handler-> 609 + context); 696 610 697 - if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == 698 - ACPI_GPE_LEVEL_TRIGGERED) { 699 - status = acpi_hw_clear_gpe(gpe_event_info); 700 - if (ACPI_FAILURE(status)) { 701 - ACPI_EXCEPTION((AE_INFO, status, 702 - "Unable to clear GPE[0x%2X]", 703 - gpe_number)); 704 - return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); 705 - } 611 + /* If requested, clear (if level-triggered) and reenable the GPE */ 612 + 613 + if (return_value & ACPI_REENABLE_GPE) { 614 + (void)acpi_ev_finish_gpe(gpe_event_info); 706 615 } 707 616 break; 708 617 709 618 case ACPI_GPE_DISPATCH_METHOD: 710 - 711 - /* 712 - * Disable the GPE, so it doesn't keep firing before the method has a 713 - * chance to run (it runs asynchronously with interrupts enabled). 714 - */ 715 - status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); 716 - if (ACPI_FAILURE(status)) { 717 - ACPI_EXCEPTION((AE_INFO, status, 718 - "Unable to disable GPE[0x%2X]", 719 - gpe_number)); 720 - return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); 721 - } 619 + case ACPI_GPE_DISPATCH_NOTIFY: 722 620 723 621 /* 724 622 * Execute the method associated with the GPE ··· 725 631 gpe_event_info); 726 632 if (ACPI_FAILURE(status)) { 727 633 ACPI_EXCEPTION((AE_INFO, status, 728 - "Unable to queue handler for GPE[0x%2X] - event disabled", 634 + "Unable to queue handler for GPE%2X - event disabled", 729 635 gpe_number)); 730 636 } 731 637 break; ··· 738 644 * a GPE to be enabled if it has no handler or method. 739 645 */ 740 646 ACPI_ERROR((AE_INFO, 741 - "No handler or method for GPE[0x%2X], disabling event", 647 + "No handler or method for GPE%02X, disabling event", 742 648 gpe_number)); 743 649 744 - /* 745 - * Disable the GPE. The GPE will remain disabled a handler 746 - * is installed or ACPICA is restarted. 747 - */ 748 - status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); 749 - if (ACPI_FAILURE(status)) { 750 - ACPI_EXCEPTION((AE_INFO, status, 751 - "Unable to disable GPE[0x%2X]", 752 - gpe_number)); 753 - return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); 754 - } 755 650 break; 756 651 } 757 652
+18 -15
drivers/acpi/acpica/evgpeblk.c
··· 361 361 362 362 gpe_block->node = gpe_device; 363 363 gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH); 364 + gpe_block->initialized = FALSE; 364 365 gpe_block->register_count = register_count; 365 366 gpe_block->block_base_number = gpe_block_base_number; 366 - gpe_block->initialized = FALSE; 367 367 368 368 ACPI_MEMCPY(&gpe_block->block_address, gpe_block_address, 369 369 sizeof(struct acpi_generic_address)); ··· 386 386 return_ACPI_STATUS(status); 387 387 } 388 388 389 - acpi_all_gpes_initialized = FALSE; 389 + acpi_gbl_all_gpes_initialized = FALSE; 390 390 391 391 /* Find all GPE methods (_Lxx or_Exx) for this block */ 392 392 ··· 423 423 * 424 424 * FUNCTION: acpi_ev_initialize_gpe_block 425 425 * 426 - * PARAMETERS: gpe_device - Handle to the parent GPE block 427 - * gpe_block - Gpe Block info 426 + * PARAMETERS: acpi_gpe_callback 428 427 * 429 428 * RETURN: Status 430 429 * 431 - * DESCRIPTION: Initialize and enable a GPE block. First find and run any 432 - * _PRT methods associated with the block, then enable the 433 - * appropriate GPEs. 430 + * DESCRIPTION: Initialize and enable a GPE block. Enable GPEs that have 431 + * associated methods. 434 432 * Note: Assumes namespace is locked. 435 433 * 436 434 ******************************************************************************/ ··· 448 450 ACPI_FUNCTION_TRACE(ev_initialize_gpe_block); 449 451 450 452 /* 451 - * Ignore a null GPE block (e.g., if no GPE block 1 exists) and 452 - * GPE blocks that have been initialized already. 453 + * Ignore a null GPE block (e.g., if no GPE block 1 exists), and 454 + * any GPE blocks that have been initialized already. 453 455 */ 454 456 if (!gpe_block || gpe_block->initialized) { 455 457 return_ACPI_STATUS(AE_OK); ··· 457 459 458 460 /* 459 461 * Enable all GPEs that have a corresponding method and have the 460 - * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block must 461 - * be enabled via the acpi_enable_gpe() interface. 462 + * ACPI_GPE_CAN_WAKE flag unset. Any other GPEs within this block 463 + * must be enabled via the acpi_enable_gpe() interface. 462 464 */ 463 465 gpe_enabled_count = 0; 464 466 ··· 470 472 gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j; 471 473 gpe_event_info = &gpe_block->event_info[gpe_index]; 472 474 473 - /* Ignore GPEs that have no corresponding _Lxx/_Exx method */ 474 - 475 - if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD) 475 + /* 476 + * Ignore GPEs that have no corresponding _Lxx/_Exx method 477 + * and GPEs that are used to wake the system 478 + */ 479 + if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 480 + ACPI_GPE_DISPATCH_NONE) 481 + || ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) 482 + == ACPI_GPE_DISPATCH_HANDLER) 476 483 || (gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { 477 484 continue; 478 485 } 479 486 480 - status = acpi_raw_enable_gpe(gpe_event_info); 487 + status = acpi_ev_add_gpe_reference(gpe_event_info); 481 488 if (ACPI_FAILURE(status)) { 482 489 ACPI_EXCEPTION((AE_INFO, status, 483 490 "Could not enable GPE 0x%02X",
+21 -4
drivers/acpi/acpica/evgpeinit.c
··· 45 45 #include "accommon.h" 46 46 #include "acevents.h" 47 47 #include "acnamesp.h" 48 - #include "acinterp.h" 49 48 50 49 #define _COMPONENT ACPI_EVENTS 51 50 ACPI_MODULE_NAME("evgpeinit") 51 + 52 + /* 53 + * Note: History of _PRW support in ACPICA 54 + * 55 + * Originally (2000 - 2010), the GPE initialization code performed a walk of 56 + * the entire namespace to execute the _PRW methods and detect all GPEs 57 + * capable of waking the system. 58 + * 59 + * As of 10/2010, the _PRW method execution has been removed since it is 60 + * actually unnecessary. The host OS must in fact execute all _PRW methods 61 + * in order to identify the device/power-resource dependencies. We now put 62 + * the onus on the host OS to identify the wake GPEs as part of this process 63 + * and to inform ACPICA of these GPEs via the acpi_setup_gpe_for_wake interface. This 64 + * not only reduces the complexity of the ACPICA initialization code, but in 65 + * some cases (on systems with very large namespaces) it should reduce the 66 + * kernel boot time as well. 67 + */ 52 68 53 69 /******************************************************************************* 54 70 * ··· 238 222 acpi_status status = AE_OK; 239 223 240 224 /* 241 - * 2) Find any _Lxx/_Exx GPE methods that have just been loaded. 225 + * Find any _Lxx/_Exx GPE methods that have just been loaded. 242 226 * 243 227 * Any GPEs that correspond to new _Lxx/_Exx methods are immediately 244 228 * enabled. ··· 251 235 return; 252 236 } 253 237 238 + walk_info.count = 0; 254 239 walk_info.owner_id = table_owner_id; 255 240 walk_info.execute_by_owner_id = TRUE; 256 - walk_info.count = 0; 257 241 258 242 /* Walk the interrupt level descriptor list */ 259 243 ··· 314 298 * xx - is the GPE number [in HEX] 315 299 * 316 300 * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods 317 - * with that owner. 301 + * with that owner. 318 302 * 319 303 ******************************************************************************/ 320 304 ··· 431 415 * Add the GPE information from above to the gpe_event_info block for 432 416 * use during dispatch of this GPE. 433 417 */ 418 + gpe_event_info->flags &= ~(ACPI_GPE_DISPATCH_MASK); 434 419 gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD); 435 420 gpe_event_info->dispatch.method_node = method_node; 436 421
+39
drivers/acpi/acpica/evgpeutil.c
··· 154 154 155 155 /******************************************************************************* 156 156 * 157 + * FUNCTION: acpi_ev_get_gpe_device 158 + * 159 + * PARAMETERS: GPE_WALK_CALLBACK 160 + * 161 + * RETURN: Status 162 + * 163 + * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE 164 + * block device. NULL if the GPE is one of the FADT-defined GPEs. 165 + * 166 + ******************************************************************************/ 167 + 168 + acpi_status 169 + acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, 170 + struct acpi_gpe_block_info *gpe_block, void *context) 171 + { 172 + struct acpi_gpe_device_info *info = context; 173 + 174 + /* Increment Index by the number of GPEs in this block */ 175 + 176 + info->next_block_base_index += gpe_block->gpe_count; 177 + 178 + if (info->index < info->next_block_base_index) { 179 + /* 180 + * The GPE index is within this block, get the node. Leave the node 181 + * NULL for the FADT-defined GPEs 182 + */ 183 + if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) { 184 + info->gpe_device = gpe_block->node; 185 + } 186 + 187 + info->status = AE_OK; 188 + return (AE_CTRL_END); 189 + } 190 + 191 + return (AE_OK); 192 + } 193 + 194 + /******************************************************************************* 195 + * 157 196 * FUNCTION: acpi_ev_get_gpe_xrupt_block 158 197 * 159 198 * PARAMETERS: interrupt_number - Interrupt for a GPE block
+57 -41
drivers/acpi/acpica/evmisc.c
··· 284 284 * RETURN: ACPI_INTERRUPT_HANDLED 285 285 * 286 286 * DESCRIPTION: Invoked directly from the SCI handler when a global lock 287 - * release interrupt occurs. Attempt to acquire the global lock, 288 - * if successful, signal the thread waiting for the lock. 287 + * release interrupt occurs. If there's a thread waiting for 288 + * the global lock, signal it. 289 289 * 290 290 * NOTE: Assumes that the semaphore can be signaled from interrupt level. If 291 291 * this is not possible for some reason, a separate thread will have to be 292 292 * scheduled to do this. 293 293 * 294 294 ******************************************************************************/ 295 + static u8 acpi_ev_global_lock_pending; 296 + static spinlock_t _acpi_ev_global_lock_pending_lock; 297 + #define acpi_ev_global_lock_pending_lock &_acpi_ev_global_lock_pending_lock 295 298 296 299 static u32 acpi_ev_global_lock_handler(void *context) 297 300 { 298 - u8 acquired = FALSE; 301 + acpi_status status; 302 + acpi_cpu_flags flags; 299 303 300 - /* 301 - * Attempt to get the lock. 302 - * 303 - * If we don't get it now, it will be marked pending and we will 304 - * take another interrupt when it becomes free. 305 - */ 306 - ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired); 307 - if (acquired) { 304 + flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock); 308 305 309 - /* Got the lock, now wake all threads waiting for it */ 310 - 311 - acpi_gbl_global_lock_acquired = TRUE; 312 - /* Send a unit to the semaphore */ 313 - 314 - if (ACPI_FAILURE 315 - (acpi_os_signal_semaphore 316 - (acpi_gbl_global_lock_semaphore, 1))) { 317 - ACPI_ERROR((AE_INFO, 318 - "Could not signal Global Lock semaphore")); 319 - } 306 + if (!acpi_ev_global_lock_pending) { 307 + goto out; 320 308 } 309 + 310 + /* Send a unit to the semaphore */ 311 + 312 + status = acpi_os_signal_semaphore(acpi_gbl_global_lock_semaphore, 1); 313 + if (ACPI_FAILURE(status)) { 314 + ACPI_ERROR((AE_INFO, "Could not signal Global Lock semaphore")); 315 + } 316 + 317 + acpi_ev_global_lock_pending = FALSE; 318 + 319 + out: 320 + acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags); 321 321 322 322 return (ACPI_INTERRUPT_HANDLED); 323 323 } ··· 415 415 416 416 acpi_status acpi_ev_acquire_global_lock(u16 timeout) 417 417 { 418 + acpi_cpu_flags flags; 418 419 acpi_status status = AE_OK; 419 420 u8 acquired = FALSE; 420 421 ··· 468 467 return_ACPI_STATUS(AE_OK); 469 468 } 470 469 471 - /* Attempt to acquire the actual hardware lock */ 470 + flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock); 472 471 473 - ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired); 474 - if (acquired) { 472 + do { 475 473 476 - /* We got the lock */ 474 + /* Attempt to acquire the actual hardware lock */ 477 475 476 + ACPI_ACQUIRE_GLOBAL_LOCK(acpi_gbl_FACS, acquired); 477 + if (acquired) { 478 + acpi_gbl_global_lock_acquired = TRUE; 479 + 480 + ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 481 + "Acquired hardware Global Lock\n")); 482 + break; 483 + } 484 + 485 + acpi_ev_global_lock_pending = TRUE; 486 + 487 + acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags); 488 + 489 + /* 490 + * Did not get the lock. The pending bit was set above, and we 491 + * must wait until we get the global lock released interrupt. 492 + */ 478 493 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, 479 - "Acquired hardware Global Lock\n")); 494 + "Waiting for hardware Global Lock\n")); 480 495 481 - acpi_gbl_global_lock_acquired = TRUE; 482 - return_ACPI_STATUS(AE_OK); 483 - } 496 + /* 497 + * Wait for handshake with the global lock interrupt handler. 498 + * This interface releases the interpreter if we must wait. 499 + */ 500 + status = acpi_ex_system_wait_semaphore( 501 + acpi_gbl_global_lock_semaphore, 502 + ACPI_WAIT_FOREVER); 484 503 485 - /* 486 - * Did not get the lock. The pending bit was set above, and we must now 487 - * wait until we get the global lock released interrupt. 488 - */ 489 - ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Waiting for hardware Global Lock\n")); 504 + flags = acpi_os_acquire_lock(acpi_ev_global_lock_pending_lock); 490 505 491 - /* 492 - * Wait for handshake with the global lock interrupt handler. 493 - * This interface releases the interpreter if we must wait. 494 - */ 495 - status = acpi_ex_system_wait_semaphore(acpi_gbl_global_lock_semaphore, 496 - ACPI_WAIT_FOREVER); 506 + } while (ACPI_SUCCESS(status)); 507 + 508 + acpi_ev_global_lock_pending = FALSE; 509 + 510 + acpi_os_release_lock(acpi_ev_global_lock_pending_lock, flags); 497 511 498 512 return_ACPI_STATUS(status); 499 513 }
+64 -13
drivers/acpi/acpica/evxface.c
··· 92 92 93 93 ACPI_EXPORT_SYMBOL(acpi_install_exception_handler) 94 94 #endif /* ACPI_FUTURE_USAGE */ 95 + 96 + /******************************************************************************* 97 + * 98 + * FUNCTION: acpi_install_global_event_handler 99 + * 100 + * PARAMETERS: Handler - Pointer to the global event handler function 101 + * Context - Value passed to the handler on each event 102 + * 103 + * RETURN: Status 104 + * 105 + * DESCRIPTION: Saves the pointer to the handler function. The global handler 106 + * is invoked upon each incoming GPE and Fixed Event. It is 107 + * invoked at interrupt level at the time of the event dispatch. 108 + * Can be used to update event counters, etc. 109 + * 110 + ******************************************************************************/ 111 + acpi_status 112 + acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, void *context) 113 + { 114 + acpi_status status; 115 + 116 + ACPI_FUNCTION_TRACE(acpi_install_global_event_handler); 117 + 118 + /* Parameter validation */ 119 + 120 + if (!handler) { 121 + return_ACPI_STATUS(AE_BAD_PARAMETER); 122 + } 123 + 124 + status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); 125 + if (ACPI_FAILURE(status)) { 126 + return_ACPI_STATUS(status); 127 + } 128 + 129 + /* Don't allow two handlers. */ 130 + 131 + if (acpi_gbl_global_event_handler) { 132 + status = AE_ALREADY_EXISTS; 133 + goto cleanup; 134 + } 135 + 136 + acpi_gbl_global_event_handler = handler; 137 + acpi_gbl_global_event_handler_context = context; 138 + 139 + cleanup: 140 + (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); 141 + return_ACPI_STATUS(status); 142 + } 143 + 144 + ACPI_EXPORT_SYMBOL(acpi_install_global_event_handler) 145 + 95 146 /******************************************************************************* 96 147 * 97 148 * FUNCTION: acpi_install_fixed_event_handler ··· 722 671 acpi_status 723 672 acpi_install_gpe_handler(acpi_handle gpe_device, 724 673 u32 gpe_number, 725 - u32 type, acpi_event_handler address, void *context) 674 + u32 type, acpi_gpe_handler address, void *context) 726 675 { 727 676 struct acpi_gpe_event_info *gpe_event_info; 728 - struct acpi_handler_info *handler; 677 + struct acpi_gpe_handler_info *handler; 729 678 acpi_status status; 730 679 acpi_cpu_flags flags; 731 680 ··· 744 693 745 694 /* Allocate memory for the handler object */ 746 695 747 - handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_handler_info)); 696 + handler = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_handler_info)); 748 697 if (!handler) { 749 698 status = AE_NO_MEMORY; 750 699 goto unlock_and_exit; ··· 773 722 handler->address = address; 774 723 handler->context = context; 775 724 handler->method_node = gpe_event_info->dispatch.method_node; 776 - handler->orig_flags = gpe_event_info->flags & 725 + handler->original_flags = gpe_event_info->flags & 777 726 (ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); 778 727 779 728 /* ··· 782 731 * disabled now to avoid spurious execution of the handler. 783 732 */ 784 733 785 - if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD) 734 + if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) 786 735 && gpe_event_info->runtime_count) { 787 - handler->orig_enabled = 1; 788 - (void)acpi_raw_disable_gpe(gpe_event_info); 736 + handler->originally_enabled = 1; 737 + (void)acpi_ev_remove_gpe_reference(gpe_event_info); 789 738 } 790 739 791 740 /* Install the handler */ ··· 828 777 ******************************************************************************/ 829 778 acpi_status 830 779 acpi_remove_gpe_handler(acpi_handle gpe_device, 831 - u32 gpe_number, acpi_event_handler address) 780 + u32 gpe_number, acpi_gpe_handler address) 832 781 { 833 782 struct acpi_gpe_event_info *gpe_event_info; 834 - struct acpi_handler_info *handler; 783 + struct acpi_gpe_handler_info *handler; 835 784 acpi_status status; 836 785 acpi_cpu_flags flags; 837 786 ··· 886 835 gpe_event_info->dispatch.method_node = handler->method_node; 887 836 gpe_event_info->flags &= 888 837 ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); 889 - gpe_event_info->flags |= handler->orig_flags; 838 + gpe_event_info->flags |= handler->original_flags; 890 839 891 840 /* 892 841 * If the GPE was previously associated with a method and it was ··· 894 843 * post-initialization configuration. 895 844 */ 896 845 897 - if ((handler->orig_flags & ACPI_GPE_DISPATCH_METHOD) 898 - && handler->orig_enabled) 899 - (void)acpi_raw_enable_gpe(gpe_event_info); 846 + if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) 847 + && handler->originally_enabled) 848 + (void)acpi_ev_add_gpe_reference(gpe_event_info); 900 849 901 850 /* Now we can free the handler object */ 902 851
-600
drivers/acpi/acpica/evxfevnt.c
··· 43 43 44 44 #include <acpi/acpi.h> 45 45 #include "accommon.h" 46 - #include "acevents.h" 47 - #include "acnamesp.h" 48 46 #include "actables.h" 49 47 50 48 #define _COMPONENT ACPI_EVENTS 51 49 ACPI_MODULE_NAME("evxfevnt") 52 - 53 - /* Local prototypes */ 54 - static acpi_status 55 - acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, 56 - struct acpi_gpe_block_info *gpe_block, void *context); 57 50 58 51 /******************************************************************************* 59 52 * ··· 206 213 207 214 /******************************************************************************* 208 215 * 209 - * FUNCTION: acpi_gpe_wakeup 210 - * 211 - * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 212 - * gpe_number - GPE level within the GPE block 213 - * Action - Enable or Disable 214 - * 215 - * RETURN: Status 216 - * 217 - * DESCRIPTION: Set or clear the GPE's wakeup enable mask bit. 218 - * 219 - ******************************************************************************/ 220 - acpi_status acpi_gpe_wakeup(acpi_handle gpe_device, u32 gpe_number, u8 action) 221 - { 222 - acpi_status status = AE_OK; 223 - struct acpi_gpe_event_info *gpe_event_info; 224 - struct acpi_gpe_register_info *gpe_register_info; 225 - acpi_cpu_flags flags; 226 - u32 register_bit; 227 - 228 - ACPI_FUNCTION_TRACE(acpi_gpe_wakeup); 229 - 230 - flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 231 - 232 - /* Ensure that we have a valid GPE number */ 233 - 234 - gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); 235 - if (!gpe_event_info || !(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { 236 - status = AE_BAD_PARAMETER; 237 - goto unlock_and_exit; 238 - } 239 - 240 - gpe_register_info = gpe_event_info->register_info; 241 - if (!gpe_register_info) { 242 - status = AE_NOT_EXIST; 243 - goto unlock_and_exit; 244 - } 245 - 246 - register_bit = 247 - acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info); 248 - 249 - /* Perform the action */ 250 - 251 - switch (action) { 252 - case ACPI_GPE_ENABLE: 253 - ACPI_SET_BIT(gpe_register_info->enable_for_wake, 254 - (u8)register_bit); 255 - break; 256 - 257 - case ACPI_GPE_DISABLE: 258 - ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, 259 - (u8)register_bit); 260 - break; 261 - 262 - default: 263 - ACPI_ERROR((AE_INFO, "%u, Invalid action", action)); 264 - status = AE_BAD_PARAMETER; 265 - break; 266 - } 267 - 268 - unlock_and_exit: 269 - acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 270 - return_ACPI_STATUS(status); 271 - } 272 - 273 - ACPI_EXPORT_SYMBOL(acpi_gpe_wakeup) 274 - 275 - /******************************************************************************* 276 - * 277 - * FUNCTION: acpi_enable_gpe 278 - * 279 - * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 280 - * gpe_number - GPE level within the GPE block 281 - * 282 - * RETURN: Status 283 - * 284 - * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is 285 - * hardware-enabled. 286 - * 287 - ******************************************************************************/ 288 - acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number) 289 - { 290 - acpi_status status = AE_BAD_PARAMETER; 291 - struct acpi_gpe_event_info *gpe_event_info; 292 - acpi_cpu_flags flags; 293 - 294 - ACPI_FUNCTION_TRACE(acpi_enable_gpe); 295 - 296 - flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 297 - 298 - /* Ensure that we have a valid GPE number */ 299 - 300 - gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); 301 - if (gpe_event_info) { 302 - status = acpi_raw_enable_gpe(gpe_event_info); 303 - } 304 - 305 - acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 306 - return_ACPI_STATUS(status); 307 - } 308 - ACPI_EXPORT_SYMBOL(acpi_enable_gpe) 309 - 310 - /******************************************************************************* 311 - * 312 - * FUNCTION: acpi_disable_gpe 313 - * 314 - * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 315 - * gpe_number - GPE level within the GPE block 316 - * 317 - * RETURN: Status 318 - * 319 - * DESCRIPTION: Remove a reference to a GPE. When the last reference is 320 - * removed, only then is the GPE disabled (for runtime GPEs), or 321 - * the GPE mask bit disabled (for wake GPEs) 322 - * 323 - ******************************************************************************/ 324 - acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number) 325 - { 326 - acpi_status status = AE_BAD_PARAMETER; 327 - struct acpi_gpe_event_info *gpe_event_info; 328 - acpi_cpu_flags flags; 329 - 330 - ACPI_FUNCTION_TRACE(acpi_disable_gpe); 331 - 332 - flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 333 - 334 - /* Ensure that we have a valid GPE number */ 335 - 336 - gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); 337 - if (gpe_event_info) { 338 - status = acpi_raw_disable_gpe(gpe_event_info) ; 339 - } 340 - 341 - acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 342 - return_ACPI_STATUS(status); 343 - } 344 - ACPI_EXPORT_SYMBOL(acpi_disable_gpe) 345 - 346 - /******************************************************************************* 347 - * 348 - * FUNCTION: acpi_gpe_can_wake 349 - * 350 - * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 351 - * gpe_number - GPE level within the GPE block 352 - * 353 - * RETURN: Status 354 - * 355 - * DESCRIPTION: Set the ACPI_GPE_CAN_WAKE flag for the given GPE. If the GPE 356 - * has a corresponding method and is currently enabled, disable it 357 - * (GPEs with corresponding methods are enabled unconditionally 358 - * during initialization, but GPEs that can wake up are expected 359 - * to be initially disabled). 360 - * 361 - ******************************************************************************/ 362 - acpi_status acpi_gpe_can_wake(acpi_handle gpe_device, u32 gpe_number) 363 - { 364 - acpi_status status = AE_OK; 365 - struct acpi_gpe_event_info *gpe_event_info; 366 - acpi_cpu_flags flags; 367 - 368 - ACPI_FUNCTION_TRACE(acpi_gpe_can_wake); 369 - 370 - flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 371 - 372 - /* Ensure that we have a valid GPE number */ 373 - 374 - gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); 375 - if (gpe_event_info) { 376 - gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; 377 - } else { 378 - status = AE_BAD_PARAMETER; 379 - } 380 - 381 - acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 382 - return_ACPI_STATUS(status); 383 - } 384 - ACPI_EXPORT_SYMBOL(acpi_gpe_can_wake) 385 - 386 - /******************************************************************************* 387 - * 388 216 * FUNCTION: acpi_disable_event 389 217 * 390 218 * PARAMETERS: Event - The fixed eventto be enabled ··· 297 483 298 484 /******************************************************************************* 299 485 * 300 - * FUNCTION: acpi_clear_gpe 301 - * 302 - * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 303 - * gpe_number - GPE level within the GPE block 304 - * 305 - * RETURN: Status 306 - * 307 - * DESCRIPTION: Clear an ACPI event (general purpose) 308 - * 309 - ******************************************************************************/ 310 - acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number) 311 - { 312 - acpi_status status = AE_OK; 313 - struct acpi_gpe_event_info *gpe_event_info; 314 - acpi_cpu_flags flags; 315 - 316 - ACPI_FUNCTION_TRACE(acpi_clear_gpe); 317 - 318 - flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 319 - 320 - /* Ensure that we have a valid GPE number */ 321 - 322 - gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); 323 - if (!gpe_event_info) { 324 - status = AE_BAD_PARAMETER; 325 - goto unlock_and_exit; 326 - } 327 - 328 - status = acpi_hw_clear_gpe(gpe_event_info); 329 - 330 - unlock_and_exit: 331 - acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 332 - return_ACPI_STATUS(status); 333 - } 334 - 335 - ACPI_EXPORT_SYMBOL(acpi_clear_gpe) 336 - /******************************************************************************* 337 - * 338 486 * FUNCTION: acpi_get_event_status 339 487 * 340 488 * PARAMETERS: Event - The fixed event ··· 351 575 } 352 576 353 577 ACPI_EXPORT_SYMBOL(acpi_get_event_status) 354 - 355 - /******************************************************************************* 356 - * 357 - * FUNCTION: acpi_get_gpe_status 358 - * 359 - * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 360 - * gpe_number - GPE level within the GPE block 361 - * event_status - Where the current status of the event will 362 - * be returned 363 - * 364 - * RETURN: Status 365 - * 366 - * DESCRIPTION: Get status of an event (general purpose) 367 - * 368 - ******************************************************************************/ 369 - acpi_status 370 - acpi_get_gpe_status(acpi_handle gpe_device, 371 - u32 gpe_number, acpi_event_status *event_status) 372 - { 373 - acpi_status status = AE_OK; 374 - struct acpi_gpe_event_info *gpe_event_info; 375 - acpi_cpu_flags flags; 376 - 377 - ACPI_FUNCTION_TRACE(acpi_get_gpe_status); 378 - 379 - flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 380 - 381 - /* Ensure that we have a valid GPE number */ 382 - 383 - gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); 384 - if (!gpe_event_info) { 385 - status = AE_BAD_PARAMETER; 386 - goto unlock_and_exit; 387 - } 388 - 389 - /* Obtain status on the requested GPE number */ 390 - 391 - status = acpi_hw_get_gpe_status(gpe_event_info, event_status); 392 - 393 - if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) 394 - *event_status |= ACPI_EVENT_FLAG_HANDLE; 395 - 396 - unlock_and_exit: 397 - acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 398 - return_ACPI_STATUS(status); 399 - } 400 - 401 - ACPI_EXPORT_SYMBOL(acpi_get_gpe_status) 402 - /******************************************************************************* 403 - * 404 - * FUNCTION: acpi_install_gpe_block 405 - * 406 - * PARAMETERS: gpe_device - Handle to the parent GPE Block Device 407 - * gpe_block_address - Address and space_iD 408 - * register_count - Number of GPE register pairs in the block 409 - * interrupt_number - H/W interrupt for the block 410 - * 411 - * RETURN: Status 412 - * 413 - * DESCRIPTION: Create and Install a block of GPE registers 414 - * 415 - ******************************************************************************/ 416 - acpi_status 417 - acpi_install_gpe_block(acpi_handle gpe_device, 418 - struct acpi_generic_address *gpe_block_address, 419 - u32 register_count, u32 interrupt_number) 420 - { 421 - acpi_status status = AE_OK; 422 - union acpi_operand_object *obj_desc; 423 - struct acpi_namespace_node *node; 424 - struct acpi_gpe_block_info *gpe_block; 425 - 426 - ACPI_FUNCTION_TRACE(acpi_install_gpe_block); 427 - 428 - if ((!gpe_device) || (!gpe_block_address) || (!register_count)) { 429 - return_ACPI_STATUS(AE_BAD_PARAMETER); 430 - } 431 - 432 - status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); 433 - if (ACPI_FAILURE(status)) { 434 - return (status); 435 - } 436 - 437 - node = acpi_ns_validate_handle(gpe_device); 438 - if (!node) { 439 - status = AE_BAD_PARAMETER; 440 - goto unlock_and_exit; 441 - } 442 - 443 - /* 444 - * For user-installed GPE Block Devices, the gpe_block_base_number 445 - * is always zero 446 - */ 447 - status = 448 - acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0, 449 - interrupt_number, &gpe_block); 450 - if (ACPI_FAILURE(status)) { 451 - goto unlock_and_exit; 452 - } 453 - 454 - /* Install block in the device_object attached to the node */ 455 - 456 - obj_desc = acpi_ns_get_attached_object(node); 457 - if (!obj_desc) { 458 - 459 - /* 460 - * No object, create a new one (Device nodes do not always have 461 - * an attached object) 462 - */ 463 - obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE); 464 - if (!obj_desc) { 465 - status = AE_NO_MEMORY; 466 - goto unlock_and_exit; 467 - } 468 - 469 - status = 470 - acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE); 471 - 472 - /* Remove local reference to the object */ 473 - 474 - acpi_ut_remove_reference(obj_desc); 475 - 476 - if (ACPI_FAILURE(status)) { 477 - goto unlock_and_exit; 478 - } 479 - } 480 - 481 - /* Now install the GPE block in the device_object */ 482 - 483 - obj_desc->device.gpe_block = gpe_block; 484 - 485 - unlock_and_exit: 486 - (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 487 - return_ACPI_STATUS(status); 488 - } 489 - 490 - ACPI_EXPORT_SYMBOL(acpi_install_gpe_block) 491 - 492 - /******************************************************************************* 493 - * 494 - * FUNCTION: acpi_remove_gpe_block 495 - * 496 - * PARAMETERS: gpe_device - Handle to the parent GPE Block Device 497 - * 498 - * RETURN: Status 499 - * 500 - * DESCRIPTION: Remove a previously installed block of GPE registers 501 - * 502 - ******************************************************************************/ 503 - acpi_status acpi_remove_gpe_block(acpi_handle gpe_device) 504 - { 505 - union acpi_operand_object *obj_desc; 506 - acpi_status status; 507 - struct acpi_namespace_node *node; 508 - 509 - ACPI_FUNCTION_TRACE(acpi_remove_gpe_block); 510 - 511 - if (!gpe_device) { 512 - return_ACPI_STATUS(AE_BAD_PARAMETER); 513 - } 514 - 515 - status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); 516 - if (ACPI_FAILURE(status)) { 517 - return (status); 518 - } 519 - 520 - node = acpi_ns_validate_handle(gpe_device); 521 - if (!node) { 522 - status = AE_BAD_PARAMETER; 523 - goto unlock_and_exit; 524 - } 525 - 526 - /* Get the device_object attached to the node */ 527 - 528 - obj_desc = acpi_ns_get_attached_object(node); 529 - if (!obj_desc || !obj_desc->device.gpe_block) { 530 - return_ACPI_STATUS(AE_NULL_OBJECT); 531 - } 532 - 533 - /* Delete the GPE block (but not the device_object) */ 534 - 535 - status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block); 536 - if (ACPI_SUCCESS(status)) { 537 - obj_desc->device.gpe_block = NULL; 538 - } 539 - 540 - unlock_and_exit: 541 - (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 542 - return_ACPI_STATUS(status); 543 - } 544 - 545 - ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block) 546 - 547 - /******************************************************************************* 548 - * 549 - * FUNCTION: acpi_get_gpe_device 550 - * 551 - * PARAMETERS: Index - System GPE index (0-current_gpe_count) 552 - * gpe_device - Where the parent GPE Device is returned 553 - * 554 - * RETURN: Status 555 - * 556 - * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL 557 - * gpe device indicates that the gpe number is contained in one of 558 - * the FADT-defined gpe blocks. Otherwise, the GPE block device. 559 - * 560 - ******************************************************************************/ 561 - acpi_status 562 - acpi_get_gpe_device(u32 index, acpi_handle *gpe_device) 563 - { 564 - struct acpi_gpe_device_info info; 565 - acpi_status status; 566 - 567 - ACPI_FUNCTION_TRACE(acpi_get_gpe_device); 568 - 569 - if (!gpe_device) { 570 - return_ACPI_STATUS(AE_BAD_PARAMETER); 571 - } 572 - 573 - if (index >= acpi_current_gpe_count) { 574 - return_ACPI_STATUS(AE_NOT_EXIST); 575 - } 576 - 577 - /* Setup and walk the GPE list */ 578 - 579 - info.index = index; 580 - info.status = AE_NOT_EXIST; 581 - info.gpe_device = NULL; 582 - info.next_block_base_index = 0; 583 - 584 - status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info); 585 - if (ACPI_FAILURE(status)) { 586 - return_ACPI_STATUS(status); 587 - } 588 - 589 - *gpe_device = info.gpe_device; 590 - return_ACPI_STATUS(info.status); 591 - } 592 - 593 - ACPI_EXPORT_SYMBOL(acpi_get_gpe_device) 594 - 595 - /******************************************************************************* 596 - * 597 - * FUNCTION: acpi_ev_get_gpe_device 598 - * 599 - * PARAMETERS: GPE_WALK_CALLBACK 600 - * 601 - * RETURN: Status 602 - * 603 - * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE 604 - * block device. NULL if the GPE is one of the FADT-defined GPEs. 605 - * 606 - ******************************************************************************/ 607 - static acpi_status 608 - acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info, 609 - struct acpi_gpe_block_info *gpe_block, void *context) 610 - { 611 - struct acpi_gpe_device_info *info = context; 612 - 613 - /* Increment Index by the number of GPEs in this block */ 614 - 615 - info->next_block_base_index += gpe_block->gpe_count; 616 - 617 - if (info->index < info->next_block_base_index) { 618 - /* 619 - * The GPE index is within this block, get the node. Leave the node 620 - * NULL for the FADT-defined GPEs 621 - */ 622 - if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) { 623 - info->gpe_device = gpe_block->node; 624 - } 625 - 626 - info->status = AE_OK; 627 - return (AE_CTRL_END); 628 - } 629 - 630 - return (AE_OK); 631 - } 632 - 633 - /****************************************************************************** 634 - * 635 - * FUNCTION: acpi_disable_all_gpes 636 - * 637 - * PARAMETERS: None 638 - * 639 - * RETURN: Status 640 - * 641 - * DESCRIPTION: Disable and clear all GPEs in all GPE blocks 642 - * 643 - ******************************************************************************/ 644 - 645 - acpi_status acpi_disable_all_gpes(void) 646 - { 647 - acpi_status status; 648 - 649 - ACPI_FUNCTION_TRACE(acpi_disable_all_gpes); 650 - 651 - status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); 652 - if (ACPI_FAILURE(status)) { 653 - return_ACPI_STATUS(status); 654 - } 655 - 656 - status = acpi_hw_disable_all_gpes(); 657 - (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); 658 - 659 - return_ACPI_STATUS(status); 660 - } 661 - 662 - /****************************************************************************** 663 - * 664 - * FUNCTION: acpi_enable_all_runtime_gpes 665 - * 666 - * PARAMETERS: None 667 - * 668 - * RETURN: Status 669 - * 670 - * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks 671 - * 672 - ******************************************************************************/ 673 - 674 - acpi_status acpi_enable_all_runtime_gpes(void) 675 - { 676 - acpi_status status; 677 - 678 - ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes); 679 - 680 - status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); 681 - if (ACPI_FAILURE(status)) { 682 - return_ACPI_STATUS(status); 683 - } 684 - 685 - status = acpi_hw_enable_all_runtime_gpes(); 686 - (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); 687 - 688 - return_ACPI_STATUS(status); 689 - } 690 - 691 - /****************************************************************************** 692 - * 693 - * FUNCTION: acpi_update_gpes 694 - * 695 - * PARAMETERS: None 696 - * 697 - * RETURN: None 698 - * 699 - * DESCRIPTION: Enable all GPEs that have associated _Lxx or _Exx methods and 700 - * are not pointed to by any device _PRW methods indicating that 701 - * these GPEs are generally intended for system or device wakeup 702 - * (such GPEs have to be enabled directly when the devices whose 703 - * _PRW methods point to them are set up for wakeup signaling). 704 - * 705 - ******************************************************************************/ 706 - 707 - acpi_status acpi_update_gpes(void) 708 - { 709 - acpi_status status; 710 - 711 - ACPI_FUNCTION_TRACE(acpi_update_gpes); 712 - 713 - status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); 714 - if (ACPI_FAILURE(status)) { 715 - return_ACPI_STATUS(status); 716 - } else if (acpi_all_gpes_initialized) { 717 - goto unlock; 718 - } 719 - 720 - status = acpi_ev_walk_gpe_list(acpi_ev_initialize_gpe_block, NULL); 721 - if (ACPI_SUCCESS(status)) { 722 - acpi_all_gpes_initialized = TRUE; 723 - } 724 - 725 - unlock: 726 - (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); 727 - 728 - return_ACPI_STATUS(status); 729 - }
+669
drivers/acpi/acpica/evxfgpe.c
··· 1 + /****************************************************************************** 2 + * 3 + * Module Name: evxfgpe - External Interfaces for General Purpose Events (GPEs) 4 + * 5 + *****************************************************************************/ 6 + 7 + /* 8 + * Copyright (C) 2000 - 2010, Intel Corp. 9 + * All rights reserved. 10 + * 11 + * Redistribution and use in source and binary forms, with or without 12 + * modification, are permitted provided that the following conditions 13 + * are met: 14 + * 1. Redistributions of source code must retain the above copyright 15 + * notice, this list of conditions, and the following disclaimer, 16 + * without modification. 17 + * 2. Redistributions in binary form must reproduce at minimum a disclaimer 18 + * substantially similar to the "NO WARRANTY" disclaimer below 19 + * ("Disclaimer") and any redistribution must be conditioned upon 20 + * including a substantially similar Disclaimer requirement for further 21 + * binary redistribution. 22 + * 3. Neither the names of the above-listed copyright holders nor the names 23 + * of any contributors may be used to endorse or promote products derived 24 + * from this software without specific prior written permission. 25 + * 26 + * Alternatively, this software may be distributed under the terms of the 27 + * GNU General Public License ("GPL") version 2 as published by the Free 28 + * Software Foundation. 29 + * 30 + * NO WARRANTY 31 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 32 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 33 + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR 34 + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 35 + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 36 + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 37 + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 38 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 39 + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 40 + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 41 + * POSSIBILITY OF SUCH DAMAGES. 42 + */ 43 + 44 + #include <acpi/acpi.h> 45 + #include "accommon.h" 46 + #include "acevents.h" 47 + #include "acnamesp.h" 48 + 49 + #define _COMPONENT ACPI_EVENTS 50 + ACPI_MODULE_NAME("evxfgpe") 51 + 52 + /****************************************************************************** 53 + * 54 + * FUNCTION: acpi_update_all_gpes 55 + * 56 + * PARAMETERS: None 57 + * 58 + * RETURN: Status 59 + * 60 + * DESCRIPTION: Complete GPE initialization and enable all GPEs that have 61 + * associated _Lxx or _Exx methods and are not pointed to by any 62 + * device _PRW methods (this indicates that these GPEs are 63 + * generally intended for system or device wakeup. Such GPEs 64 + * have to be enabled directly when the devices whose _PRW 65 + * methods point to them are set up for wakeup signaling.) 66 + * 67 + * NOTE: Should be called after any GPEs are added to the system. Primarily, 68 + * after the system _PRW methods have been run, but also after a GPE Block 69 + * Device has been added or if any new GPE methods have been added via a 70 + * dynamic table load. 71 + * 72 + ******************************************************************************/ 73 + 74 + acpi_status acpi_update_all_gpes(void) 75 + { 76 + acpi_status status; 77 + 78 + ACPI_FUNCTION_TRACE(acpi_update_all_gpes); 79 + 80 + status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); 81 + if (ACPI_FAILURE(status)) { 82 + return_ACPI_STATUS(status); 83 + } 84 + 85 + if (acpi_gbl_all_gpes_initialized) { 86 + goto unlock_and_exit; 87 + } 88 + 89 + status = acpi_ev_walk_gpe_list(acpi_ev_initialize_gpe_block, NULL); 90 + if (ACPI_SUCCESS(status)) { 91 + acpi_gbl_all_gpes_initialized = TRUE; 92 + } 93 + 94 + unlock_and_exit: 95 + (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); 96 + 97 + return_ACPI_STATUS(status); 98 + } 99 + 100 + ACPI_EXPORT_SYMBOL(acpi_update_all_gpes) 101 + 102 + /******************************************************************************* 103 + * 104 + * FUNCTION: acpi_enable_gpe 105 + * 106 + * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 107 + * gpe_number - GPE level within the GPE block 108 + * 109 + * RETURN: Status 110 + * 111 + * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is 112 + * hardware-enabled. 113 + * 114 + ******************************************************************************/ 115 + 116 + acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number) 117 + { 118 + acpi_status status = AE_BAD_PARAMETER; 119 + struct acpi_gpe_event_info *gpe_event_info; 120 + acpi_cpu_flags flags; 121 + 122 + ACPI_FUNCTION_TRACE(acpi_enable_gpe); 123 + 124 + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 125 + 126 + /* Ensure that we have a valid GPE number */ 127 + 128 + gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); 129 + if (gpe_event_info) { 130 + status = acpi_ev_add_gpe_reference(gpe_event_info); 131 + } 132 + 133 + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 134 + return_ACPI_STATUS(status); 135 + } 136 + ACPI_EXPORT_SYMBOL(acpi_enable_gpe) 137 + 138 + /******************************************************************************* 139 + * 140 + * FUNCTION: acpi_disable_gpe 141 + * 142 + * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 143 + * gpe_number - GPE level within the GPE block 144 + * 145 + * RETURN: Status 146 + * 147 + * DESCRIPTION: Remove a reference to a GPE. When the last reference is 148 + * removed, only then is the GPE disabled (for runtime GPEs), or 149 + * the GPE mask bit disabled (for wake GPEs) 150 + * 151 + ******************************************************************************/ 152 + 153 + acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number) 154 + { 155 + acpi_status status = AE_BAD_PARAMETER; 156 + struct acpi_gpe_event_info *gpe_event_info; 157 + acpi_cpu_flags flags; 158 + 159 + ACPI_FUNCTION_TRACE(acpi_disable_gpe); 160 + 161 + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 162 + 163 + /* Ensure that we have a valid GPE number */ 164 + 165 + gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); 166 + if (gpe_event_info) { 167 + status = acpi_ev_remove_gpe_reference(gpe_event_info) ; 168 + } 169 + 170 + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 171 + return_ACPI_STATUS(status); 172 + } 173 + ACPI_EXPORT_SYMBOL(acpi_disable_gpe) 174 + 175 + 176 + /******************************************************************************* 177 + * 178 + * FUNCTION: acpi_setup_gpe_for_wake 179 + * 180 + * PARAMETERS: wake_device - Device associated with the GPE (via _PRW) 181 + * gpe_device - Parent GPE Device. NULL for GPE0/GPE1 182 + * gpe_number - GPE level within the GPE block 183 + * 184 + * RETURN: Status 185 + * 186 + * DESCRIPTION: Mark a GPE as having the ability to wake the system. This 187 + * interface is intended to be used as the host executes the 188 + * _PRW methods (Power Resources for Wake) in the system tables. 189 + * Each _PRW appears under a Device Object (The wake_device), and 190 + * contains the info for the wake GPE associated with the 191 + * wake_device. 192 + * 193 + ******************************************************************************/ 194 + acpi_status 195 + acpi_setup_gpe_for_wake(acpi_handle wake_device, 196 + acpi_handle gpe_device, u32 gpe_number) 197 + { 198 + acpi_status status = AE_BAD_PARAMETER; 199 + struct acpi_gpe_event_info *gpe_event_info; 200 + struct acpi_namespace_node *device_node; 201 + acpi_cpu_flags flags; 202 + 203 + ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake); 204 + 205 + /* Parameter Validation */ 206 + 207 + if (!wake_device) { 208 + /* 209 + * By forcing wake_device to be valid, we automatically enable the 210 + * implicit notify feature on all hosts. 211 + */ 212 + return_ACPI_STATUS(AE_BAD_PARAMETER); 213 + } 214 + 215 + /* Validate wake_device is of type Device */ 216 + 217 + device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device); 218 + if (device_node->type != ACPI_TYPE_DEVICE) { 219 + return_ACPI_STATUS(AE_BAD_PARAMETER); 220 + } 221 + 222 + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 223 + 224 + /* Ensure that we have a valid GPE number */ 225 + 226 + gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); 227 + if (gpe_event_info) { 228 + /* 229 + * If there is no method or handler for this GPE, then the 230 + * wake_device will be notified whenever this GPE fires (aka 231 + * "implicit notify") Note: The GPE is assumed to be 232 + * level-triggered (for windows compatibility). 233 + */ 234 + if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == 235 + ACPI_GPE_DISPATCH_NONE) { 236 + gpe_event_info->flags = 237 + (ACPI_GPE_DISPATCH_NOTIFY | 238 + ACPI_GPE_LEVEL_TRIGGERED); 239 + gpe_event_info->dispatch.device_node = device_node; 240 + } 241 + 242 + gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; 243 + status = AE_OK; 244 + } 245 + 246 + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 247 + return_ACPI_STATUS(status); 248 + } 249 + ACPI_EXPORT_SYMBOL(acpi_setup_gpe_for_wake) 250 + 251 + /******************************************************************************* 252 + * 253 + * FUNCTION: acpi_set_gpe_wake_mask 254 + * 255 + * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 256 + * gpe_number - GPE level within the GPE block 257 + * Action - Enable or Disable 258 + * 259 + * RETURN: Status 260 + * 261 + * DESCRIPTION: Set or clear the GPE's wakeup enable mask bit. The GPE must 262 + * already be marked as a WAKE GPE. 263 + * 264 + ******************************************************************************/ 265 + 266 + acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action) 267 + { 268 + acpi_status status = AE_OK; 269 + struct acpi_gpe_event_info *gpe_event_info; 270 + struct acpi_gpe_register_info *gpe_register_info; 271 + acpi_cpu_flags flags; 272 + u32 register_bit; 273 + 274 + ACPI_FUNCTION_TRACE(acpi_set_gpe_wake_mask); 275 + 276 + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 277 + 278 + /* 279 + * Ensure that we have a valid GPE number and that this GPE is in 280 + * fact a wake GPE 281 + */ 282 + gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); 283 + if (!gpe_event_info) { 284 + status = AE_BAD_PARAMETER; 285 + goto unlock_and_exit; 286 + } 287 + 288 + if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { 289 + status = AE_TYPE; 290 + goto unlock_and_exit; 291 + } 292 + 293 + gpe_register_info = gpe_event_info->register_info; 294 + if (!gpe_register_info) { 295 + status = AE_NOT_EXIST; 296 + goto unlock_and_exit; 297 + } 298 + 299 + register_bit = 300 + acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info); 301 + 302 + /* Perform the action */ 303 + 304 + switch (action) { 305 + case ACPI_GPE_ENABLE: 306 + ACPI_SET_BIT(gpe_register_info->enable_for_wake, 307 + (u8)register_bit); 308 + break; 309 + 310 + case ACPI_GPE_DISABLE: 311 + ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, 312 + (u8)register_bit); 313 + break; 314 + 315 + default: 316 + ACPI_ERROR((AE_INFO, "%u, Invalid action", action)); 317 + status = AE_BAD_PARAMETER; 318 + break; 319 + } 320 + 321 + unlock_and_exit: 322 + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 323 + return_ACPI_STATUS(status); 324 + } 325 + 326 + ACPI_EXPORT_SYMBOL(acpi_set_gpe_wake_mask) 327 + 328 + /******************************************************************************* 329 + * 330 + * FUNCTION: acpi_clear_gpe 331 + * 332 + * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 333 + * gpe_number - GPE level within the GPE block 334 + * 335 + * RETURN: Status 336 + * 337 + * DESCRIPTION: Clear an ACPI event (general purpose) 338 + * 339 + ******************************************************************************/ 340 + acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number) 341 + { 342 + acpi_status status = AE_OK; 343 + struct acpi_gpe_event_info *gpe_event_info; 344 + acpi_cpu_flags flags; 345 + 346 + ACPI_FUNCTION_TRACE(acpi_clear_gpe); 347 + 348 + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 349 + 350 + /* Ensure that we have a valid GPE number */ 351 + 352 + gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); 353 + if (!gpe_event_info) { 354 + status = AE_BAD_PARAMETER; 355 + goto unlock_and_exit; 356 + } 357 + 358 + status = acpi_hw_clear_gpe(gpe_event_info); 359 + 360 + unlock_and_exit: 361 + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 362 + return_ACPI_STATUS(status); 363 + } 364 + 365 + ACPI_EXPORT_SYMBOL(acpi_clear_gpe) 366 + 367 + /******************************************************************************* 368 + * 369 + * FUNCTION: acpi_get_gpe_status 370 + * 371 + * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 372 + * gpe_number - GPE level within the GPE block 373 + * event_status - Where the current status of the event will 374 + * be returned 375 + * 376 + * RETURN: Status 377 + * 378 + * DESCRIPTION: Get the current status of a GPE (signalled/not_signalled) 379 + * 380 + ******************************************************************************/ 381 + acpi_status 382 + acpi_get_gpe_status(acpi_handle gpe_device, 383 + u32 gpe_number, acpi_event_status *event_status) 384 + { 385 + acpi_status status = AE_OK; 386 + struct acpi_gpe_event_info *gpe_event_info; 387 + acpi_cpu_flags flags; 388 + 389 + ACPI_FUNCTION_TRACE(acpi_get_gpe_status); 390 + 391 + flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 392 + 393 + /* Ensure that we have a valid GPE number */ 394 + 395 + gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); 396 + if (!gpe_event_info) { 397 + status = AE_BAD_PARAMETER; 398 + goto unlock_and_exit; 399 + } 400 + 401 + /* Obtain status on the requested GPE number */ 402 + 403 + status = acpi_hw_get_gpe_status(gpe_event_info, event_status); 404 + 405 + if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) 406 + *event_status |= ACPI_EVENT_FLAG_HANDLE; 407 + 408 + unlock_and_exit: 409 + acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 410 + return_ACPI_STATUS(status); 411 + } 412 + 413 + ACPI_EXPORT_SYMBOL(acpi_get_gpe_status) 414 + 415 + /****************************************************************************** 416 + * 417 + * FUNCTION: acpi_disable_all_gpes 418 + * 419 + * PARAMETERS: None 420 + * 421 + * RETURN: Status 422 + * 423 + * DESCRIPTION: Disable and clear all GPEs in all GPE blocks 424 + * 425 + ******************************************************************************/ 426 + 427 + acpi_status acpi_disable_all_gpes(void) 428 + { 429 + acpi_status status; 430 + 431 + ACPI_FUNCTION_TRACE(acpi_disable_all_gpes); 432 + 433 + status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); 434 + if (ACPI_FAILURE(status)) { 435 + return_ACPI_STATUS(status); 436 + } 437 + 438 + status = acpi_hw_disable_all_gpes(); 439 + (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); 440 + 441 + return_ACPI_STATUS(status); 442 + } 443 + 444 + ACPI_EXPORT_SYMBOL(acpi_disable_all_gpes) 445 + 446 + /****************************************************************************** 447 + * 448 + * FUNCTION: acpi_enable_all_runtime_gpes 449 + * 450 + * PARAMETERS: None 451 + * 452 + * RETURN: Status 453 + * 454 + * DESCRIPTION: Enable all "runtime" GPEs, in all GPE blocks 455 + * 456 + ******************************************************************************/ 457 + 458 + acpi_status acpi_enable_all_runtime_gpes(void) 459 + { 460 + acpi_status status; 461 + 462 + ACPI_FUNCTION_TRACE(acpi_enable_all_runtime_gpes); 463 + 464 + status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); 465 + if (ACPI_FAILURE(status)) { 466 + return_ACPI_STATUS(status); 467 + } 468 + 469 + status = acpi_hw_enable_all_runtime_gpes(); 470 + (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); 471 + 472 + return_ACPI_STATUS(status); 473 + } 474 + 475 + ACPI_EXPORT_SYMBOL(acpi_enable_all_runtime_gpes) 476 + 477 + /******************************************************************************* 478 + * 479 + * FUNCTION: acpi_install_gpe_block 480 + * 481 + * PARAMETERS: gpe_device - Handle to the parent GPE Block Device 482 + * gpe_block_address - Address and space_iD 483 + * register_count - Number of GPE register pairs in the block 484 + * interrupt_number - H/W interrupt for the block 485 + * 486 + * RETURN: Status 487 + * 488 + * DESCRIPTION: Create and Install a block of GPE registers. The GPEs are not 489 + * enabled here. 490 + * 491 + ******************************************************************************/ 492 + acpi_status 493 + acpi_install_gpe_block(acpi_handle gpe_device, 494 + struct acpi_generic_address *gpe_block_address, 495 + u32 register_count, u32 interrupt_number) 496 + { 497 + acpi_status status; 498 + union acpi_operand_object *obj_desc; 499 + struct acpi_namespace_node *node; 500 + struct acpi_gpe_block_info *gpe_block; 501 + 502 + ACPI_FUNCTION_TRACE(acpi_install_gpe_block); 503 + 504 + if ((!gpe_device) || (!gpe_block_address) || (!register_count)) { 505 + return_ACPI_STATUS(AE_BAD_PARAMETER); 506 + } 507 + 508 + status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); 509 + if (ACPI_FAILURE(status)) { 510 + return (status); 511 + } 512 + 513 + node = acpi_ns_validate_handle(gpe_device); 514 + if (!node) { 515 + status = AE_BAD_PARAMETER; 516 + goto unlock_and_exit; 517 + } 518 + 519 + /* 520 + * For user-installed GPE Block Devices, the gpe_block_base_number 521 + * is always zero 522 + */ 523 + status = 524 + acpi_ev_create_gpe_block(node, gpe_block_address, register_count, 0, 525 + interrupt_number, &gpe_block); 526 + if (ACPI_FAILURE(status)) { 527 + goto unlock_and_exit; 528 + } 529 + 530 + /* Install block in the device_object attached to the node */ 531 + 532 + obj_desc = acpi_ns_get_attached_object(node); 533 + if (!obj_desc) { 534 + 535 + /* 536 + * No object, create a new one (Device nodes do not always have 537 + * an attached object) 538 + */ 539 + obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE); 540 + if (!obj_desc) { 541 + status = AE_NO_MEMORY; 542 + goto unlock_and_exit; 543 + } 544 + 545 + status = 546 + acpi_ns_attach_object(node, obj_desc, ACPI_TYPE_DEVICE); 547 + 548 + /* Remove local reference to the object */ 549 + 550 + acpi_ut_remove_reference(obj_desc); 551 + 552 + if (ACPI_FAILURE(status)) { 553 + goto unlock_and_exit; 554 + } 555 + } 556 + 557 + /* Now install the GPE block in the device_object */ 558 + 559 + obj_desc->device.gpe_block = gpe_block; 560 + 561 + unlock_and_exit: 562 + (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 563 + return_ACPI_STATUS(status); 564 + } 565 + 566 + ACPI_EXPORT_SYMBOL(acpi_install_gpe_block) 567 + 568 + /******************************************************************************* 569 + * 570 + * FUNCTION: acpi_remove_gpe_block 571 + * 572 + * PARAMETERS: gpe_device - Handle to the parent GPE Block Device 573 + * 574 + * RETURN: Status 575 + * 576 + * DESCRIPTION: Remove a previously installed block of GPE registers 577 + * 578 + ******************************************************************************/ 579 + acpi_status acpi_remove_gpe_block(acpi_handle gpe_device) 580 + { 581 + union acpi_operand_object *obj_desc; 582 + acpi_status status; 583 + struct acpi_namespace_node *node; 584 + 585 + ACPI_FUNCTION_TRACE(acpi_remove_gpe_block); 586 + 587 + if (!gpe_device) { 588 + return_ACPI_STATUS(AE_BAD_PARAMETER); 589 + } 590 + 591 + status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); 592 + if (ACPI_FAILURE(status)) { 593 + return (status); 594 + } 595 + 596 + node = acpi_ns_validate_handle(gpe_device); 597 + if (!node) { 598 + status = AE_BAD_PARAMETER; 599 + goto unlock_and_exit; 600 + } 601 + 602 + /* Get the device_object attached to the node */ 603 + 604 + obj_desc = acpi_ns_get_attached_object(node); 605 + if (!obj_desc || !obj_desc->device.gpe_block) { 606 + return_ACPI_STATUS(AE_NULL_OBJECT); 607 + } 608 + 609 + /* Delete the GPE block (but not the device_object) */ 610 + 611 + status = acpi_ev_delete_gpe_block(obj_desc->device.gpe_block); 612 + if (ACPI_SUCCESS(status)) { 613 + obj_desc->device.gpe_block = NULL; 614 + } 615 + 616 + unlock_and_exit: 617 + (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 618 + return_ACPI_STATUS(status); 619 + } 620 + 621 + ACPI_EXPORT_SYMBOL(acpi_remove_gpe_block) 622 + 623 + /******************************************************************************* 624 + * 625 + * FUNCTION: acpi_get_gpe_device 626 + * 627 + * PARAMETERS: Index - System GPE index (0-current_gpe_count) 628 + * gpe_device - Where the parent GPE Device is returned 629 + * 630 + * RETURN: Status 631 + * 632 + * DESCRIPTION: Obtain the GPE device associated with the input index. A NULL 633 + * gpe device indicates that the gpe number is contained in one of 634 + * the FADT-defined gpe blocks. Otherwise, the GPE block device. 635 + * 636 + ******************************************************************************/ 637 + acpi_status 638 + acpi_get_gpe_device(u32 index, acpi_handle *gpe_device) 639 + { 640 + struct acpi_gpe_device_info info; 641 + acpi_status status; 642 + 643 + ACPI_FUNCTION_TRACE(acpi_get_gpe_device); 644 + 645 + if (!gpe_device) { 646 + return_ACPI_STATUS(AE_BAD_PARAMETER); 647 + } 648 + 649 + if (index >= acpi_current_gpe_count) { 650 + return_ACPI_STATUS(AE_NOT_EXIST); 651 + } 652 + 653 + /* Setup and walk the GPE list */ 654 + 655 + info.index = index; 656 + info.status = AE_NOT_EXIST; 657 + info.gpe_device = NULL; 658 + info.next_block_base_index = 0; 659 + 660 + status = acpi_ev_walk_gpe_list(acpi_ev_get_gpe_device, &info); 661 + if (ACPI_FAILURE(status)) { 662 + return_ACPI_STATUS(status); 663 + } 664 + 665 + *gpe_device = ACPI_CAST_PTR(acpi_handle, info.gpe_device); 666 + return_ACPI_STATUS(info.status); 667 + } 668 + 669 + ACPI_EXPORT_SYMBOL(acpi_get_gpe_device)
+19 -13
drivers/acpi/acpica/hwgpe.c
··· 62 62 * PARAMETERS: gpe_event_info - Info block for the GPE 63 63 * gpe_register_info - Info block for the GPE register 64 64 * 65 - * RETURN: Status 65 + * RETURN: Register mask with a one in the GPE bit position 66 66 * 67 - * DESCRIPTION: Compute GPE enable mask with one bit corresponding to the given 68 - * GPE set. 67 + * DESCRIPTION: Compute the register mask for this GPE. One bit is set in the 68 + * correct position for the input GPE. 69 69 * 70 70 ******************************************************************************/ 71 71 ··· 85 85 * 86 86 * RETURN: Status 87 87 * 88 - * DESCRIPTION: Enable or disable a single GPE in its enable register. 88 + * DESCRIPTION: Enable or disable a single GPE in the parent enable register. 89 89 * 90 90 ******************************************************************************/ 91 91 92 92 acpi_status 93 - acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u8 action) 93 + acpi_hw_low_set_gpe(struct acpi_gpe_event_info *gpe_event_info, u32 action) 94 94 { 95 95 struct acpi_gpe_register_info *gpe_register_info; 96 96 acpi_status status; ··· 113 113 return (status); 114 114 } 115 115 116 - /* Set ot clear just the bit that corresponds to this GPE */ 116 + /* Set or clear just the bit that corresponds to this GPE */ 117 117 118 118 register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info, 119 119 gpe_register_info); 120 120 switch (action) { 121 - case ACPI_GPE_COND_ENABLE: 122 - if (!(register_bit & gpe_register_info->enable_for_run)) 121 + case ACPI_GPE_CONDITIONAL_ENABLE: 122 + 123 + /* Only enable if the enable_for_run bit is set */ 124 + 125 + if (!(register_bit & gpe_register_info->enable_for_run)) { 123 126 return (AE_BAD_PARAMETER); 127 + } 128 + 129 + /*lint -fallthrough */ 124 130 125 131 case ACPI_GPE_ENABLE: 126 132 ACPI_SET_BIT(enable_mask, register_bit); ··· 137 131 break; 138 132 139 133 default: 140 - ACPI_ERROR((AE_INFO, "Invalid action\n")); 134 + ACPI_ERROR((AE_INFO, "Invalid GPE Action, %u\n", action)); 141 135 return (AE_BAD_PARAMETER); 142 136 } 143 137 ··· 174 168 return (AE_NOT_EXIST); 175 169 } 176 170 177 - register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info, 178 - gpe_register_info); 179 - 180 171 /* 181 172 * Write a one to the appropriate bit in the status register to 182 173 * clear this GPE. 183 174 */ 175 + register_bit = 176 + acpi_hw_get_gpe_register_bit(gpe_event_info, gpe_register_info); 177 + 184 178 status = acpi_hw_write(register_bit, 185 179 &gpe_register_info->status_address); 186 180 ··· 207 201 u32 in_byte; 208 202 u32 register_bit; 209 203 struct acpi_gpe_register_info *gpe_register_info; 210 - acpi_status status; 211 204 acpi_event_status local_event_status = 0; 205 + acpi_status status; 212 206 213 207 ACPI_FUNCTION_ENTRY(); 214 208
+2 -1
drivers/acpi/acpica/utglobal.c
··· 768 768 acpi_gbl_gpe_fadt_blocks[0] = NULL; 769 769 acpi_gbl_gpe_fadt_blocks[1] = NULL; 770 770 acpi_current_gpe_count = 0; 771 - acpi_all_gpes_initialized = FALSE; 771 + acpi_gbl_all_gpes_initialized = FALSE; 772 772 773 773 /* Global handlers */ 774 774 ··· 778 778 acpi_gbl_init_handler = NULL; 779 779 acpi_gbl_table_handler = NULL; 780 780 acpi_gbl_interface_handler = NULL; 781 + acpi_gbl_global_event_handler = NULL; 781 782 782 783 /* Global Lock support */ 783 784
+2
drivers/acpi/apei/apei-internal.h
··· 109 109 return sizeof(*estatus) + estatus->data_length; 110 110 } 111 111 112 + void apei_estatus_print(const char *pfx, 113 + const struct acpi_hest_generic_status *estatus); 112 114 int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus); 113 115 int apei_estatus_check(const struct acpi_hest_generic_status *estatus); 114 116 #endif
+311
drivers/acpi/apei/cper.c
··· 46 46 } 47 47 EXPORT_SYMBOL_GPL(cper_next_record_id); 48 48 49 + static const char *cper_severity_strs[] = { 50 + "recoverable", 51 + "fatal", 52 + "corrected", 53 + "info", 54 + }; 55 + 56 + static const char *cper_severity_str(unsigned int severity) 57 + { 58 + return severity < ARRAY_SIZE(cper_severity_strs) ? 59 + cper_severity_strs[severity] : "unknown"; 60 + } 61 + 62 + /* 63 + * cper_print_bits - print strings for set bits 64 + * @pfx: prefix for each line, including log level and prefix string 65 + * @bits: bit mask 66 + * @strs: string array, indexed by bit position 67 + * @strs_size: size of the string array: @strs 68 + * 69 + * For each set bit in @bits, print the corresponding string in @strs. 70 + * If the output length is longer than 80, multiple line will be 71 + * printed, with @pfx is printed at the beginning of each line. 72 + */ 73 + static void cper_print_bits(const char *pfx, unsigned int bits, 74 + const char *strs[], unsigned int strs_size) 75 + { 76 + int i, len = 0; 77 + const char *str; 78 + char buf[84]; 79 + 80 + for (i = 0; i < strs_size; i++) { 81 + if (!(bits & (1U << i))) 82 + continue; 83 + str = strs[i]; 84 + if (len && len + strlen(str) + 2 > 80) { 85 + printk("%s\n", buf); 86 + len = 0; 87 + } 88 + if (!len) 89 + len = snprintf(buf, sizeof(buf), "%s%s", pfx, str); 90 + else 91 + len += snprintf(buf+len, sizeof(buf)-len, ", %s", str); 92 + } 93 + if (len) 94 + printk("%s\n", buf); 95 + } 96 + 97 + static const char *cper_proc_type_strs[] = { 98 + "IA32/X64", 99 + "IA64", 100 + }; 101 + 102 + static const char *cper_proc_isa_strs[] = { 103 + "IA32", 104 + "IA64", 105 + "X64", 106 + }; 107 + 108 + static const char *cper_proc_error_type_strs[] = { 109 + "cache error", 110 + "TLB error", 111 + "bus error", 112 + "micro-architectural error", 113 + }; 114 + 115 + static const char *cper_proc_op_strs[] = { 116 + "unknown or generic", 117 + "data read", 118 + "data write", 119 + "instruction execution", 120 + }; 121 + 122 + static const char *cper_proc_flag_strs[] = { 123 + "restartable", 124 + "precise IP", 125 + "overflow", 126 + "corrected", 127 + }; 128 + 129 + static void cper_print_proc_generic(const char *pfx, 130 + const struct cper_sec_proc_generic *proc) 131 + { 132 + if (proc->validation_bits & CPER_PROC_VALID_TYPE) 133 + printk("%s""processor_type: %d, %s\n", pfx, proc->proc_type, 134 + proc->proc_type < ARRAY_SIZE(cper_proc_type_strs) ? 135 + cper_proc_type_strs[proc->proc_type] : "unknown"); 136 + if (proc->validation_bits & CPER_PROC_VALID_ISA) 137 + printk("%s""processor_isa: %d, %s\n", pfx, proc->proc_isa, 138 + proc->proc_isa < ARRAY_SIZE(cper_proc_isa_strs) ? 139 + cper_proc_isa_strs[proc->proc_isa] : "unknown"); 140 + if (proc->validation_bits & CPER_PROC_VALID_ERROR_TYPE) { 141 + printk("%s""error_type: 0x%02x\n", pfx, proc->proc_error_type); 142 + cper_print_bits(pfx, proc->proc_error_type, 143 + cper_proc_error_type_strs, 144 + ARRAY_SIZE(cper_proc_error_type_strs)); 145 + } 146 + if (proc->validation_bits & CPER_PROC_VALID_OPERATION) 147 + printk("%s""operation: %d, %s\n", pfx, proc->operation, 148 + proc->operation < ARRAY_SIZE(cper_proc_op_strs) ? 149 + cper_proc_op_strs[proc->operation] : "unknown"); 150 + if (proc->validation_bits & CPER_PROC_VALID_FLAGS) { 151 + printk("%s""flags: 0x%02x\n", pfx, proc->flags); 152 + cper_print_bits(pfx, proc->flags, cper_proc_flag_strs, 153 + ARRAY_SIZE(cper_proc_flag_strs)); 154 + } 155 + if (proc->validation_bits & CPER_PROC_VALID_LEVEL) 156 + printk("%s""level: %d\n", pfx, proc->level); 157 + if (proc->validation_bits & CPER_PROC_VALID_VERSION) 158 + printk("%s""version_info: 0x%016llx\n", pfx, proc->cpu_version); 159 + if (proc->validation_bits & CPER_PROC_VALID_ID) 160 + printk("%s""processor_id: 0x%016llx\n", pfx, proc->proc_id); 161 + if (proc->validation_bits & CPER_PROC_VALID_TARGET_ADDRESS) 162 + printk("%s""target_address: 0x%016llx\n", 163 + pfx, proc->target_addr); 164 + if (proc->validation_bits & CPER_PROC_VALID_REQUESTOR_ID) 165 + printk("%s""requestor_id: 0x%016llx\n", 166 + pfx, proc->requestor_id); 167 + if (proc->validation_bits & CPER_PROC_VALID_RESPONDER_ID) 168 + printk("%s""responder_id: 0x%016llx\n", 169 + pfx, proc->responder_id); 170 + if (proc->validation_bits & CPER_PROC_VALID_IP) 171 + printk("%s""IP: 0x%016llx\n", pfx, proc->ip); 172 + } 173 + 174 + static const char *cper_mem_err_type_strs[] = { 175 + "unknown", 176 + "no error", 177 + "single-bit ECC", 178 + "multi-bit ECC", 179 + "single-symbol chipkill ECC", 180 + "multi-symbol chipkill ECC", 181 + "master abort", 182 + "target abort", 183 + "parity error", 184 + "watchdog timeout", 185 + "invalid address", 186 + "mirror Broken", 187 + "memory sparing", 188 + "scrub corrected error", 189 + "scrub uncorrected error", 190 + }; 191 + 192 + static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem) 193 + { 194 + if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS) 195 + printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status); 196 + if (mem->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS) 197 + printk("%s""physical_address: 0x%016llx\n", 198 + pfx, mem->physical_addr); 199 + if (mem->validation_bits & CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK) 200 + printk("%s""physical_address_mask: 0x%016llx\n", 201 + pfx, mem->physical_addr_mask); 202 + if (mem->validation_bits & CPER_MEM_VALID_NODE) 203 + printk("%s""node: %d\n", pfx, mem->node); 204 + if (mem->validation_bits & CPER_MEM_VALID_CARD) 205 + printk("%s""card: %d\n", pfx, mem->card); 206 + if (mem->validation_bits & CPER_MEM_VALID_MODULE) 207 + printk("%s""module: %d\n", pfx, mem->module); 208 + if (mem->validation_bits & CPER_MEM_VALID_BANK) 209 + printk("%s""bank: %d\n", pfx, mem->bank); 210 + if (mem->validation_bits & CPER_MEM_VALID_DEVICE) 211 + printk("%s""device: %d\n", pfx, mem->device); 212 + if (mem->validation_bits & CPER_MEM_VALID_ROW) 213 + printk("%s""row: %d\n", pfx, mem->row); 214 + if (mem->validation_bits & CPER_MEM_VALID_COLUMN) 215 + printk("%s""column: %d\n", pfx, mem->column); 216 + if (mem->validation_bits & CPER_MEM_VALID_BIT_POSITION) 217 + printk("%s""bit_position: %d\n", pfx, mem->bit_pos); 218 + if (mem->validation_bits & CPER_MEM_VALID_REQUESTOR_ID) 219 + printk("%s""requestor_id: 0x%016llx\n", pfx, mem->requestor_id); 220 + if (mem->validation_bits & CPER_MEM_VALID_RESPONDER_ID) 221 + printk("%s""responder_id: 0x%016llx\n", pfx, mem->responder_id); 222 + if (mem->validation_bits & CPER_MEM_VALID_TARGET_ID) 223 + printk("%s""target_id: 0x%016llx\n", pfx, mem->target_id); 224 + if (mem->validation_bits & CPER_MEM_VALID_ERROR_TYPE) { 225 + u8 etype = mem->error_type; 226 + printk("%s""error_type: %d, %s\n", pfx, etype, 227 + etype < ARRAY_SIZE(cper_mem_err_type_strs) ? 228 + cper_mem_err_type_strs[etype] : "unknown"); 229 + } 230 + } 231 + 232 + static const char *cper_pcie_port_type_strs[] = { 233 + "PCIe end point", 234 + "legacy PCI end point", 235 + "unknown", 236 + "unknown", 237 + "root port", 238 + "upstream switch port", 239 + "downstream switch port", 240 + "PCIe to PCI/PCI-X bridge", 241 + "PCI/PCI-X to PCIe bridge", 242 + "root complex integrated endpoint device", 243 + "root complex event collector", 244 + }; 245 + 246 + static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie) 247 + { 248 + if (pcie->validation_bits & CPER_PCIE_VALID_PORT_TYPE) 249 + printk("%s""port_type: %d, %s\n", pfx, pcie->port_type, 250 + pcie->port_type < ARRAY_SIZE(cper_pcie_port_type_strs) ? 251 + cper_pcie_port_type_strs[pcie->port_type] : "unknown"); 252 + if (pcie->validation_bits & CPER_PCIE_VALID_VERSION) 253 + printk("%s""version: %d.%d\n", pfx, 254 + pcie->version.major, pcie->version.minor); 255 + if (pcie->validation_bits & CPER_PCIE_VALID_COMMAND_STATUS) 256 + printk("%s""command: 0x%04x, status: 0x%04x\n", pfx, 257 + pcie->command, pcie->status); 258 + if (pcie->validation_bits & CPER_PCIE_VALID_DEVICE_ID) { 259 + const __u8 *p; 260 + printk("%s""device_id: %04x:%02x:%02x.%x\n", pfx, 261 + pcie->device_id.segment, pcie->device_id.bus, 262 + pcie->device_id.device, pcie->device_id.function); 263 + printk("%s""slot: %d\n", pfx, 264 + pcie->device_id.slot >> CPER_PCIE_SLOT_SHIFT); 265 + printk("%s""secondary_bus: 0x%02x\n", pfx, 266 + pcie->device_id.secondary_bus); 267 + printk("%s""vendor_id: 0x%04x, device_id: 0x%04x\n", pfx, 268 + pcie->device_id.vendor_id, pcie->device_id.device_id); 269 + p = pcie->device_id.class_code; 270 + printk("%s""class_code: %02x%02x%02x\n", pfx, p[0], p[1], p[2]); 271 + } 272 + if (pcie->validation_bits & CPER_PCIE_VALID_SERIAL_NUMBER) 273 + printk("%s""serial number: 0x%04x, 0x%04x\n", pfx, 274 + pcie->serial_number.lower, pcie->serial_number.upper); 275 + if (pcie->validation_bits & CPER_PCIE_VALID_BRIDGE_CONTROL_STATUS) 276 + printk( 277 + "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n", 278 + pfx, pcie->bridge.secondary_status, pcie->bridge.control); 279 + } 280 + 281 + static const char *apei_estatus_section_flag_strs[] = { 282 + "primary", 283 + "containment warning", 284 + "reset", 285 + "threshold exceeded", 286 + "resource not accessible", 287 + "latent error", 288 + }; 289 + 290 + static void apei_estatus_print_section( 291 + const char *pfx, const struct acpi_hest_generic_data *gdata, int sec_no) 292 + { 293 + uuid_le *sec_type = (uuid_le *)gdata->section_type; 294 + __u16 severity; 295 + 296 + severity = gdata->error_severity; 297 + printk("%s""section: %d, severity: %d, %s\n", pfx, sec_no, severity, 298 + cper_severity_str(severity)); 299 + printk("%s""flags: 0x%02x\n", pfx, gdata->flags); 300 + cper_print_bits(pfx, gdata->flags, apei_estatus_section_flag_strs, 301 + ARRAY_SIZE(apei_estatus_section_flag_strs)); 302 + if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID) 303 + printk("%s""fru_id: %pUl\n", pfx, (uuid_le *)gdata->fru_id); 304 + if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT) 305 + printk("%s""fru_text: %.20s\n", pfx, gdata->fru_text); 306 + 307 + if (!uuid_le_cmp(*sec_type, CPER_SEC_PROC_GENERIC)) { 308 + struct cper_sec_proc_generic *proc_err = (void *)(gdata + 1); 309 + printk("%s""section_type: general processor error\n", pfx); 310 + if (gdata->error_data_length >= sizeof(*proc_err)) 311 + cper_print_proc_generic(pfx, proc_err); 312 + else 313 + goto err_section_too_small; 314 + } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) { 315 + struct cper_sec_mem_err *mem_err = (void *)(gdata + 1); 316 + printk("%s""section_type: memory error\n", pfx); 317 + if (gdata->error_data_length >= sizeof(*mem_err)) 318 + cper_print_mem(pfx, mem_err); 319 + else 320 + goto err_section_too_small; 321 + } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) { 322 + struct cper_sec_pcie *pcie = (void *)(gdata + 1); 323 + printk("%s""section_type: PCIe error\n", pfx); 324 + if (gdata->error_data_length >= sizeof(*pcie)) 325 + cper_print_pcie(pfx, pcie); 326 + else 327 + goto err_section_too_small; 328 + } else 329 + printk("%s""section type: unknown, %pUl\n", pfx, sec_type); 330 + 331 + return; 332 + 333 + err_section_too_small: 334 + pr_err(FW_WARN "error section length is too small\n"); 335 + } 336 + 337 + void apei_estatus_print(const char *pfx, 338 + const struct acpi_hest_generic_status *estatus) 339 + { 340 + struct acpi_hest_generic_data *gdata; 341 + unsigned int data_len, gedata_len; 342 + int sec_no = 0; 343 + __u16 severity; 344 + 345 + printk("%s""APEI generic hardware error status\n", pfx); 346 + severity = estatus->error_severity; 347 + printk("%s""severity: %d, %s\n", pfx, severity, 348 + cper_severity_str(severity)); 349 + data_len = estatus->data_length; 350 + gdata = (struct acpi_hest_generic_data *)(estatus + 1); 351 + while (data_len > sizeof(*gdata)) { 352 + gedata_len = gdata->error_data_length; 353 + apei_estatus_print_section(pfx, gdata, sec_no); 354 + data_len -= gedata_len + sizeof(*gdata); 355 + sec_no++; 356 + } 357 + } 358 + EXPORT_SYMBOL_GPL(apei_estatus_print); 359 + 49 360 int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus) 50 361 { 51 362 if (estatus->data_length &&
+342 -87
drivers/acpi/apei/ghes.c
··· 12 12 * For more information about Generic Hardware Error Source, please 13 13 * refer to ACPI Specification version 4.0, section 17.3.2.6 14 14 * 15 - * Now, only SCI notification type and memory errors are 16 - * supported. More notification type and hardware error type will be 17 - * added later. 18 - * 19 15 * Copyright 2010 Intel Corp. 20 16 * Author: Huang Ying <ying.huang@intel.com> 21 17 * ··· 35 39 #include <linux/acpi.h> 36 40 #include <linux/io.h> 37 41 #include <linux/interrupt.h> 42 + #include <linux/timer.h> 38 43 #include <linux/cper.h> 39 44 #include <linux/kdebug.h> 40 45 #include <linux/platform_device.h> 41 46 #include <linux/mutex.h> 47 + #include <linux/ratelimit.h> 48 + #include <linux/vmalloc.h> 42 49 #include <acpi/apei.h> 43 50 #include <acpi/atomicio.h> 44 51 #include <acpi/hed.h> 45 52 #include <asm/mce.h> 53 + #include <asm/tlbflush.h> 46 54 47 55 #include "apei-internal.h" 48 56 ··· 55 55 #define GHES_ESTATUS_MAX_SIZE 65536 56 56 57 57 /* 58 - * One struct ghes is created for each generic hardware error 59 - * source. 60 - * 58 + * One struct ghes is created for each generic hardware error source. 61 59 * It provides the context for APEI hardware error timer/IRQ/SCI/NMI 62 - * handler. Handler for one generic hardware error source is only 63 - * triggered after the previous one is done. So handler can uses 64 - * struct ghes without locking. 60 + * handler. 65 61 * 66 62 * estatus: memory buffer for error status block, allocated during 67 63 * HEST parsing. 68 64 */ 69 65 #define GHES_TO_CLEAR 0x0001 66 + #define GHES_EXITING 0x0002 70 67 71 68 struct ghes { 72 69 struct acpi_hest_generic *generic; 73 70 struct acpi_hest_generic_status *estatus; 74 - struct list_head list; 75 71 u64 buffer_paddr; 76 72 unsigned long flags; 73 + union { 74 + struct list_head list; 75 + struct timer_list timer; 76 + unsigned int irq; 77 + }; 77 78 }; 78 79 80 + static int ghes_panic_timeout __read_mostly = 30; 81 + 79 82 /* 80 - * Error source lists, one list for each notification method. The 81 - * members in lists are struct ghes. 83 + * All error sources notified with SCI shares one notifier function, 84 + * so they need to be linked and checked one by one. This is applied 85 + * to NMI too. 82 86 * 83 - * The list members are only added in HEST parsing and deleted during 84 - * module_exit, that is, single-threaded. So no lock is needed for 85 - * that. 86 - * 87 - * But the mutual exclusion is needed between members adding/deleting 88 - * and timer/IRQ/SCI/NMI handler, which may traverse the list. RCU is 89 - * used for that. 87 + * RCU is used for these lists, so ghes_list_mutex is only used for 88 + * list changing, not for traversing. 90 89 */ 91 90 static LIST_HEAD(ghes_sci); 91 + static LIST_HEAD(ghes_nmi); 92 92 static DEFINE_MUTEX(ghes_list_mutex); 93 + 94 + /* 95 + * NMI may be triggered on any CPU, so ghes_nmi_lock is used for 96 + * mutual exclusion. 97 + */ 98 + static DEFINE_RAW_SPINLOCK(ghes_nmi_lock); 99 + 100 + /* 101 + * Because the memory area used to transfer hardware error information 102 + * from BIOS to Linux can be determined only in NMI, IRQ or timer 103 + * handler, but general ioremap can not be used in atomic context, so 104 + * a special version of atomic ioremap is implemented for that. 105 + */ 106 + 107 + /* 108 + * Two virtual pages are used, one for NMI context, the other for 109 + * IRQ/PROCESS context 110 + */ 111 + #define GHES_IOREMAP_PAGES 2 112 + #define GHES_IOREMAP_NMI_PAGE(base) (base) 113 + #define GHES_IOREMAP_IRQ_PAGE(base) ((base) + PAGE_SIZE) 114 + 115 + /* virtual memory area for atomic ioremap */ 116 + static struct vm_struct *ghes_ioremap_area; 117 + /* 118 + * These 2 spinlock is used to prevent atomic ioremap virtual memory 119 + * area from being mapped simultaneously. 120 + */ 121 + static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi); 122 + static DEFINE_SPINLOCK(ghes_ioremap_lock_irq); 123 + 124 + static int ghes_ioremap_init(void) 125 + { 126 + ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES, 127 + VM_IOREMAP, VMALLOC_START, VMALLOC_END); 128 + if (!ghes_ioremap_area) { 129 + pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n"); 130 + return -ENOMEM; 131 + } 132 + 133 + return 0; 134 + } 135 + 136 + static void ghes_ioremap_exit(void) 137 + { 138 + free_vm_area(ghes_ioremap_area); 139 + } 140 + 141 + static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn) 142 + { 143 + unsigned long vaddr; 144 + 145 + vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr); 146 + ioremap_page_range(vaddr, vaddr + PAGE_SIZE, 147 + pfn << PAGE_SHIFT, PAGE_KERNEL); 148 + 149 + return (void __iomem *)vaddr; 150 + } 151 + 152 + static void __iomem *ghes_ioremap_pfn_irq(u64 pfn) 153 + { 154 + unsigned long vaddr; 155 + 156 + vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr); 157 + ioremap_page_range(vaddr, vaddr + PAGE_SIZE, 158 + pfn << PAGE_SHIFT, PAGE_KERNEL); 159 + 160 + return (void __iomem *)vaddr; 161 + } 162 + 163 + static void ghes_iounmap_nmi(void __iomem *vaddr_ptr) 164 + { 165 + unsigned long vaddr = (unsigned long __force)vaddr_ptr; 166 + void *base = ghes_ioremap_area->addr; 167 + 168 + BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base)); 169 + unmap_kernel_range_noflush(vaddr, PAGE_SIZE); 170 + __flush_tlb_one(vaddr); 171 + } 172 + 173 + static void ghes_iounmap_irq(void __iomem *vaddr_ptr) 174 + { 175 + unsigned long vaddr = (unsigned long __force)vaddr_ptr; 176 + void *base = ghes_ioremap_area->addr; 177 + 178 + BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base)); 179 + unmap_kernel_range_noflush(vaddr, PAGE_SIZE); 180 + __flush_tlb_one(vaddr); 181 + } 93 182 94 183 static struct ghes *ghes_new(struct acpi_hest_generic *generic) 95 184 { ··· 190 101 if (!ghes) 191 102 return ERR_PTR(-ENOMEM); 192 103 ghes->generic = generic; 193 - INIT_LIST_HEAD(&ghes->list); 194 104 rc = acpi_pre_map_gar(&generic->error_status_address); 195 105 if (rc) 196 106 goto err_free; ··· 246 158 } 247 159 } 248 160 249 - /* SCI handler run in work queue, so ioremap can be used here */ 250 - static int ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, 251 - int from_phys) 161 + static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, 162 + int from_phys) 252 163 { 253 - void *vaddr; 164 + void __iomem *vaddr; 165 + unsigned long flags = 0; 166 + int in_nmi = in_nmi(); 167 + u64 offset; 168 + u32 trunk; 254 169 255 - vaddr = ioremap_cache(paddr, len); 256 - if (!vaddr) 257 - return -ENOMEM; 258 - if (from_phys) 259 - memcpy(buffer, vaddr, len); 260 - else 261 - memcpy(vaddr, buffer, len); 262 - iounmap(vaddr); 263 - 264 - return 0; 170 + while (len > 0) { 171 + offset = paddr - (paddr & PAGE_MASK); 172 + if (in_nmi) { 173 + raw_spin_lock(&ghes_ioremap_lock_nmi); 174 + vaddr = ghes_ioremap_pfn_nmi(paddr >> PAGE_SHIFT); 175 + } else { 176 + spin_lock_irqsave(&ghes_ioremap_lock_irq, flags); 177 + vaddr = ghes_ioremap_pfn_irq(paddr >> PAGE_SHIFT); 178 + } 179 + trunk = PAGE_SIZE - offset; 180 + trunk = min(trunk, len); 181 + if (from_phys) 182 + memcpy_fromio(buffer, vaddr + offset, trunk); 183 + else 184 + memcpy_toio(vaddr + offset, buffer, trunk); 185 + len -= trunk; 186 + paddr += trunk; 187 + buffer += trunk; 188 + if (in_nmi) { 189 + ghes_iounmap_nmi(vaddr); 190 + raw_spin_unlock(&ghes_ioremap_lock_nmi); 191 + } else { 192 + ghes_iounmap_irq(vaddr); 193 + spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags); 194 + } 195 + } 265 196 } 266 197 267 198 static int ghes_read_estatus(struct ghes *ghes, int silent) ··· 301 194 if (!buf_paddr) 302 195 return -ENOENT; 303 196 304 - rc = ghes_copy_tofrom_phys(ghes->estatus, buf_paddr, 305 - sizeof(*ghes->estatus), 1); 306 - if (rc) 307 - return rc; 197 + ghes_copy_tofrom_phys(ghes->estatus, buf_paddr, 198 + sizeof(*ghes->estatus), 1); 308 199 if (!ghes->estatus->block_status) 309 200 return -ENOENT; 310 201 ··· 317 212 goto err_read_block; 318 213 if (apei_estatus_check_header(ghes->estatus)) 319 214 goto err_read_block; 320 - rc = ghes_copy_tofrom_phys(ghes->estatus + 1, 321 - buf_paddr + sizeof(*ghes->estatus), 322 - len - sizeof(*ghes->estatus), 1); 323 - if (rc) 324 - return rc; 215 + ghes_copy_tofrom_phys(ghes->estatus + 1, 216 + buf_paddr + sizeof(*ghes->estatus), 217 + len - sizeof(*ghes->estatus), 1); 325 218 if (apei_estatus_check(ghes->estatus)) 326 219 goto err_read_block; 327 220 rc = 0; 328 221 329 222 err_read_block: 330 - if (rc && !silent) 223 + if (rc && !silent && printk_ratelimit()) 331 224 pr_warning(FW_WARN GHES_PFX 332 225 "Failed to read error status block!\n"); 333 226 return rc; ··· 358 255 } 359 256 #endif 360 257 } 258 + } 361 259 362 - if (!processed && printk_ratelimit()) 363 - pr_warning(GHES_PFX 364 - "Unknown error record from generic hardware error source: %d\n", 365 - ghes->generic->header.source_id); 260 + static void ghes_print_estatus(const char *pfx, struct ghes *ghes) 261 + { 262 + /* Not more than 2 messages every 5 seconds */ 263 + static DEFINE_RATELIMIT_STATE(ratelimit, 5*HZ, 2); 264 + 265 + if (pfx == NULL) { 266 + if (ghes_severity(ghes->estatus->error_severity) <= 267 + GHES_SEV_CORRECTED) 268 + pfx = KERN_WARNING HW_ERR; 269 + else 270 + pfx = KERN_ERR HW_ERR; 271 + } 272 + if (__ratelimit(&ratelimit)) { 273 + printk( 274 + "%s""Hardware error from APEI Generic Hardware Error Source: %d\n", 275 + pfx, ghes->generic->header.source_id); 276 + apei_estatus_print(pfx, ghes->estatus); 277 + } 366 278 } 367 279 368 280 static int ghes_proc(struct ghes *ghes) ··· 387 269 rc = ghes_read_estatus(ghes, 0); 388 270 if (rc) 389 271 goto out; 272 + ghes_print_estatus(NULL, ghes); 390 273 ghes_do_proc(ghes); 391 274 392 275 out: 393 276 ghes_clear_estatus(ghes); 394 277 return 0; 278 + } 279 + 280 + static void ghes_add_timer(struct ghes *ghes) 281 + { 282 + struct acpi_hest_generic *g = ghes->generic; 283 + unsigned long expire; 284 + 285 + if (!g->notify.poll_interval) { 286 + pr_warning(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n", 287 + g->header.source_id); 288 + return; 289 + } 290 + expire = jiffies + msecs_to_jiffies(g->notify.poll_interval); 291 + ghes->timer.expires = round_jiffies_relative(expire); 292 + add_timer(&ghes->timer); 293 + } 294 + 295 + static void ghes_poll_func(unsigned long data) 296 + { 297 + struct ghes *ghes = (void *)data; 298 + 299 + ghes_proc(ghes); 300 + if (!(ghes->flags & GHES_EXITING)) 301 + ghes_add_timer(ghes); 302 + } 303 + 304 + static irqreturn_t ghes_irq_func(int irq, void *data) 305 + { 306 + struct ghes *ghes = data; 307 + int rc; 308 + 309 + rc = ghes_proc(ghes); 310 + if (rc) 311 + return IRQ_NONE; 312 + 313 + return IRQ_HANDLED; 395 314 } 396 315 397 316 static int ghes_notify_sci(struct notifier_block *this, ··· 447 292 return ret; 448 293 } 449 294 295 + static int ghes_notify_nmi(struct notifier_block *this, 296 + unsigned long cmd, void *data) 297 + { 298 + struct ghes *ghes, *ghes_global = NULL; 299 + int sev, sev_global = -1; 300 + int ret = NOTIFY_DONE; 301 + 302 + if (cmd != DIE_NMI) 303 + return ret; 304 + 305 + raw_spin_lock(&ghes_nmi_lock); 306 + list_for_each_entry_rcu(ghes, &ghes_nmi, list) { 307 + if (ghes_read_estatus(ghes, 1)) { 308 + ghes_clear_estatus(ghes); 309 + continue; 310 + } 311 + sev = ghes_severity(ghes->estatus->error_severity); 312 + if (sev > sev_global) { 313 + sev_global = sev; 314 + ghes_global = ghes; 315 + } 316 + ret = NOTIFY_STOP; 317 + } 318 + 319 + if (ret == NOTIFY_DONE) 320 + goto out; 321 + 322 + if (sev_global >= GHES_SEV_PANIC) { 323 + oops_begin(); 324 + ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global); 325 + /* reboot to log the error! */ 326 + if (panic_timeout == 0) 327 + panic_timeout = ghes_panic_timeout; 328 + panic("Fatal hardware error!"); 329 + } 330 + 331 + list_for_each_entry_rcu(ghes, &ghes_nmi, list) { 332 + if (!(ghes->flags & GHES_TO_CLEAR)) 333 + continue; 334 + /* Do not print estatus because printk is not NMI safe */ 335 + ghes_do_proc(ghes); 336 + ghes_clear_estatus(ghes); 337 + } 338 + 339 + out: 340 + raw_spin_unlock(&ghes_nmi_lock); 341 + return ret; 342 + } 343 + 450 344 static struct notifier_block ghes_notifier_sci = { 451 345 .notifier_call = ghes_notify_sci, 346 + }; 347 + 348 + static struct notifier_block ghes_notifier_nmi = { 349 + .notifier_call = ghes_notify_nmi, 452 350 }; 453 351 454 352 static int __devinit ghes_probe(struct platform_device *ghes_dev) ··· 514 306 if (!generic->enabled) 515 307 return -ENODEV; 516 308 517 - if (generic->error_block_length < 518 - sizeof(struct acpi_hest_generic_status)) { 519 - pr_warning(FW_BUG GHES_PFX 520 - "Invalid error block length: %u for generic hardware error source: %d\n", 521 - generic->error_block_length, 309 + switch (generic->notify.type) { 310 + case ACPI_HEST_NOTIFY_POLLED: 311 + case ACPI_HEST_NOTIFY_EXTERNAL: 312 + case ACPI_HEST_NOTIFY_SCI: 313 + case ACPI_HEST_NOTIFY_NMI: 314 + break; 315 + case ACPI_HEST_NOTIFY_LOCAL: 316 + pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n", 522 317 generic->header.source_id); 523 318 goto err; 319 + default: 320 + pr_warning(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n", 321 + generic->notify.type, generic->header.source_id); 322 + goto err; 524 323 } 525 - if (generic->records_to_preallocate == 0) { 526 - pr_warning(FW_BUG GHES_PFX 527 - "Invalid records to preallocate: %u for generic hardware error source: %d\n", 528 - generic->records_to_preallocate, 324 + 325 + rc = -EIO; 326 + if (generic->error_block_length < 327 + sizeof(struct acpi_hest_generic_status)) { 328 + pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n", 329 + generic->error_block_length, 529 330 generic->header.source_id); 530 331 goto err; 531 332 } ··· 544 327 ghes = NULL; 545 328 goto err; 546 329 } 547 - if (generic->notify.type == ACPI_HEST_NOTIFY_SCI) { 330 + switch (generic->notify.type) { 331 + case ACPI_HEST_NOTIFY_POLLED: 332 + ghes->timer.function = ghes_poll_func; 333 + ghes->timer.data = (unsigned long)ghes; 334 + init_timer_deferrable(&ghes->timer); 335 + ghes_add_timer(ghes); 336 + break; 337 + case ACPI_HEST_NOTIFY_EXTERNAL: 338 + /* External interrupt vector is GSI */ 339 + if (acpi_gsi_to_irq(generic->notify.vector, &ghes->irq)) { 340 + pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n", 341 + generic->header.source_id); 342 + goto err; 343 + } 344 + if (request_irq(ghes->irq, ghes_irq_func, 345 + 0, "GHES IRQ", ghes)) { 346 + pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n", 347 + generic->header.source_id); 348 + goto err; 349 + } 350 + break; 351 + case ACPI_HEST_NOTIFY_SCI: 548 352 mutex_lock(&ghes_list_mutex); 549 353 if (list_empty(&ghes_sci)) 550 354 register_acpi_hed_notifier(&ghes_notifier_sci); 551 355 list_add_rcu(&ghes->list, &ghes_sci); 552 356 mutex_unlock(&ghes_list_mutex); 553 - } else { 554 - unsigned char *notify = NULL; 555 - 556 - switch (generic->notify.type) { 557 - case ACPI_HEST_NOTIFY_POLLED: 558 - notify = "POLL"; 559 - break; 560 - case ACPI_HEST_NOTIFY_EXTERNAL: 561 - case ACPI_HEST_NOTIFY_LOCAL: 562 - notify = "IRQ"; 563 - break; 564 - case ACPI_HEST_NOTIFY_NMI: 565 - notify = "NMI"; 566 - break; 567 - } 568 - if (notify) { 569 - pr_warning(GHES_PFX 570 - "Generic hardware error source: %d notified via %s is not supported!\n", 571 - generic->header.source_id, notify); 572 - } else { 573 - pr_warning(FW_WARN GHES_PFX 574 - "Unknown notification type: %u for generic hardware error source: %d\n", 575 - generic->notify.type, generic->header.source_id); 576 - } 577 - rc = -ENODEV; 578 - goto err; 357 + break; 358 + case ACPI_HEST_NOTIFY_NMI: 359 + mutex_lock(&ghes_list_mutex); 360 + if (list_empty(&ghes_nmi)) 361 + register_die_notifier(&ghes_notifier_nmi); 362 + list_add_rcu(&ghes->list, &ghes_nmi); 363 + mutex_unlock(&ghes_list_mutex); 364 + break; 365 + default: 366 + BUG(); 579 367 } 580 368 platform_set_drvdata(ghes_dev, ghes); 581 369 ··· 601 379 ghes = platform_get_drvdata(ghes_dev); 602 380 generic = ghes->generic; 603 381 382 + ghes->flags |= GHES_EXITING; 604 383 switch (generic->notify.type) { 384 + case ACPI_HEST_NOTIFY_POLLED: 385 + del_timer_sync(&ghes->timer); 386 + break; 387 + case ACPI_HEST_NOTIFY_EXTERNAL: 388 + free_irq(ghes->irq, ghes); 389 + break; 605 390 case ACPI_HEST_NOTIFY_SCI: 606 391 mutex_lock(&ghes_list_mutex); 607 392 list_del_rcu(&ghes->list); ··· 616 387 unregister_acpi_hed_notifier(&ghes_notifier_sci); 617 388 mutex_unlock(&ghes_list_mutex); 618 389 break; 390 + case ACPI_HEST_NOTIFY_NMI: 391 + mutex_lock(&ghes_list_mutex); 392 + list_del_rcu(&ghes->list); 393 + if (list_empty(&ghes_nmi)) 394 + unregister_die_notifier(&ghes_notifier_nmi); 395 + mutex_unlock(&ghes_list_mutex); 396 + /* 397 + * To synchronize with NMI handler, ghes can only be 398 + * freed after NMI handler finishes. 399 + */ 400 + synchronize_rcu(); 401 + break; 619 402 default: 620 403 BUG(); 621 404 break; 622 405 } 623 406 624 - synchronize_rcu(); 625 407 ghes_fini(ghes); 626 408 kfree(ghes); 627 409 ··· 652 412 653 413 static int __init ghes_init(void) 654 414 { 415 + int rc; 416 + 655 417 if (acpi_disabled) 656 418 return -ENODEV; 657 419 ··· 662 420 return -EINVAL; 663 421 } 664 422 665 - return platform_driver_register(&ghes_platform_driver); 423 + rc = ghes_ioremap_init(); 424 + if (rc) 425 + goto err; 426 + 427 + rc = platform_driver_register(&ghes_platform_driver); 428 + if (rc) 429 + goto err_ioremap_exit; 430 + 431 + return 0; 432 + err_ioremap_exit: 433 + ghes_ioremap_exit(); 434 + err: 435 + return rc; 666 436 } 667 437 668 438 static void __exit ghes_exit(void) 669 439 { 670 440 platform_driver_unregister(&ghes_platform_driver); 441 + ghes_ioremap_exit(); 671 442 } 672 443 673 444 module_init(ghes_init);
+16
drivers/acpi/battery.c
··· 631 631 return result; 632 632 } 633 633 634 + static void acpi_battery_refresh(struct acpi_battery *battery) 635 + { 636 + if (!battery->bat.dev) 637 + return; 638 + 639 + acpi_battery_get_info(battery); 640 + /* The battery may have changed its reporting units. */ 641 + sysfs_remove_battery(battery); 642 + sysfs_add_battery(battery); 643 + } 644 + 634 645 /* -------------------------------------------------------------------------- 635 646 FS Interface (/proc) 636 647 -------------------------------------------------------------------------- */ ··· 879 868 struct proc_dir_entry *entry = NULL; 880 869 int i; 881 870 871 + printk(KERN_WARNING PREFIX "Deprecated procfs I/F for battery is loaded," 872 + " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); 882 873 if (!acpi_device_dir(device)) { 883 874 acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), 884 875 acpi_battery_dir); ··· 927 914 if (!battery) 928 915 return; 929 916 old = battery->bat.dev; 917 + if (event == ACPI_BATTERY_NOTIFY_INFO) 918 + acpi_battery_refresh(battery); 930 919 acpi_battery_update(battery); 931 920 acpi_bus_generate_proc_event(device, event, 932 921 acpi_battery_present(battery)); ··· 998 983 if (!device) 999 984 return -EINVAL; 1000 985 battery = acpi_driver_data(device); 986 + acpi_battery_refresh(battery); 1001 987 battery->update_time = 0; 1002 988 acpi_battery_update(battery); 1003 989 return 0;
+81 -72
drivers/acpi/bus.c
··· 52 52 53 53 #define STRUCT_TO_INT(s) (*((int*)&s)) 54 54 55 - static int set_power_nocheck(const struct dmi_system_id *id) 56 - { 57 - printk(KERN_NOTICE PREFIX "%s detected - " 58 - "disable power check in power transition\n", id->ident); 59 - acpi_power_nocheck = 1; 60 - return 0; 61 - } 62 - static struct dmi_system_id __cpuinitdata power_nocheck_dmi_table[] = { 63 - { 64 - set_power_nocheck, "HP Pavilion 05", { 65 - DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), 66 - DMI_MATCH(DMI_SYS_VENDOR, "HP Pavilion 05"), 67 - DMI_MATCH(DMI_PRODUCT_VERSION, "2001211RE101GLEND") }, NULL}, 68 - {}, 69 - }; 70 - 71 55 72 56 #ifdef CONFIG_X86 73 57 static int set_copy_dsdt(const struct dmi_system_id *id) ··· 180 196 Power Management 181 197 -------------------------------------------------------------------------- */ 182 198 183 - int acpi_bus_get_power(acpi_handle handle, int *state) 199 + static int __acpi_bus_get_power(struct acpi_device *device, int *state) 184 200 { 185 201 int result = 0; 186 202 acpi_status status = 0; 187 - struct acpi_device *device = NULL; 188 203 unsigned long long psc = 0; 189 204 190 - 191 - result = acpi_bus_get_device(handle, &device); 192 - if (result) 193 - return result; 205 + if (!device || !state) 206 + return -EINVAL; 194 207 195 208 *state = ACPI_STATE_UNKNOWN; 196 209 197 - if (!device->flags.power_manageable) { 198 - /* TBD: Non-recursive algorithm for walking up hierarchy */ 199 - if (device->parent) 200 - *state = device->parent->power.state; 201 - else 202 - *state = ACPI_STATE_D0; 203 - } else { 210 + if (device->flags.power_manageable) { 204 211 /* 205 212 * Get the device's power state either directly (via _PSC) or 206 213 * indirectly (via power resources). 207 214 */ 208 215 if (device->power.flags.power_resources) { 209 - result = acpi_power_get_inferred_state(device); 216 + result = acpi_power_get_inferred_state(device, state); 210 217 if (result) 211 218 return result; 212 219 } else if (device->power.flags.explicit_get) { ··· 205 230 NULL, &psc); 206 231 if (ACPI_FAILURE(status)) 207 232 return -ENODEV; 208 - device->power.state = (int)psc; 233 + *state = (int)psc; 209 234 } 210 - 211 - *state = device->power.state; 235 + } else { 236 + /* TBD: Non-recursive algorithm for walking up hierarchy. */ 237 + *state = device->parent ? 238 + device->parent->power.state : ACPI_STATE_D0; 212 239 } 213 240 214 241 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n", 215 - device->pnp.bus_id, device->power.state)); 242 + device->pnp.bus_id, *state)); 216 243 217 244 return 0; 218 245 } 219 246 220 - EXPORT_SYMBOL(acpi_bus_get_power); 221 247 222 - int acpi_bus_set_power(acpi_handle handle, int state) 248 + static int __acpi_bus_set_power(struct acpi_device *device, int state) 223 249 { 224 250 int result = 0; 225 251 acpi_status status = AE_OK; 226 - struct acpi_device *device = NULL; 227 252 char object_name[5] = { '_', 'P', 'S', '0' + state, '\0' }; 228 253 229 - 230 - result = acpi_bus_get_device(handle, &device); 231 - if (result) 232 - return result; 233 - 234 - if ((state < ACPI_STATE_D0) || (state > ACPI_STATE_D3)) 254 + if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3)) 235 255 return -EINVAL; 236 256 237 257 /* Make sure this is a valid target state */ 238 258 239 - if (!device->flags.power_manageable) { 240 - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device `[%s]' is not power manageable\n", 241 - kobject_name(&device->dev.kobj))); 242 - return -ENODEV; 243 - } 244 - /* 245 - * Get device's current power state 246 - */ 247 - if (!acpi_power_nocheck) { 248 - /* 249 - * Maybe the incorrect power state is returned on the bogus 250 - * bios, which is different with the real power state. 251 - * For example: the bios returns D0 state and the real power 252 - * state is D3. OS expects to set the device to D0 state. In 253 - * such case if OS uses the power state returned by the BIOS, 254 - * the device can't be transisted to the correct power state. 255 - * So if the acpi_power_nocheck is set, it is unnecessary to 256 - * get the power state by calling acpi_bus_get_power. 257 - */ 258 - acpi_bus_get_power(device->handle, &device->power.state); 259 - } 260 - if ((state == device->power.state) && !device->flags.force_power_state) { 259 + if (state == device->power.state) { 261 260 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n", 262 261 state)); 263 262 return 0; ··· 300 351 return result; 301 352 } 302 353 354 + 355 + int acpi_bus_set_power(acpi_handle handle, int state) 356 + { 357 + struct acpi_device *device; 358 + int result; 359 + 360 + result = acpi_bus_get_device(handle, &device); 361 + if (result) 362 + return result; 363 + 364 + if (!device->flags.power_manageable) { 365 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, 366 + "Device [%s] is not power manageable\n", 367 + dev_name(&device->dev))); 368 + return -ENODEV; 369 + } 370 + 371 + return __acpi_bus_set_power(device, state); 372 + } 303 373 EXPORT_SYMBOL(acpi_bus_set_power); 374 + 375 + 376 + int acpi_bus_init_power(struct acpi_device *device) 377 + { 378 + int state; 379 + int result; 380 + 381 + if (!device) 382 + return -EINVAL; 383 + 384 + device->power.state = ACPI_STATE_UNKNOWN; 385 + 386 + result = __acpi_bus_get_power(device, &state); 387 + if (result) 388 + return result; 389 + 390 + if (device->power.flags.power_resources) 391 + result = acpi_power_on_resources(device, state); 392 + 393 + if (!result) 394 + device->power.state = state; 395 + 396 + return result; 397 + } 398 + 399 + 400 + int acpi_bus_update_power(acpi_handle handle, int *state_p) 401 + { 402 + struct acpi_device *device; 403 + int state; 404 + int result; 405 + 406 + result = acpi_bus_get_device(handle, &device); 407 + if (result) 408 + return result; 409 + 410 + result = __acpi_bus_get_power(device, &state); 411 + if (result) 412 + return result; 413 + 414 + result = __acpi_bus_set_power(device, state); 415 + if (!result && state_p) 416 + *state_p = state; 417 + 418 + return result; 419 + } 420 + EXPORT_SYMBOL_GPL(acpi_bus_update_power); 421 + 304 422 305 423 bool acpi_bus_power_manageable(acpi_handle handle) 306 424 { ··· 1039 1023 if (acpi_disabled) 1040 1024 return result; 1041 1025 1042 - /* 1043 - * If the laptop falls into the DMI check table, the power state check 1044 - * will be disabled in the course of device power transition. 1045 - */ 1046 - dmi_check_system(power_nocheck_dmi_table); 1047 - 1048 1026 acpi_scan_init(); 1049 1027 acpi_ec_init(); 1050 - acpi_power_init(); 1051 1028 acpi_debugfs_init(); 1052 1029 acpi_sleep_proc_init(); 1053 1030 acpi_wakeup_device_init();
+7 -2
drivers/acpi/button.c
··· 279 279 input_report_switch(button->input, SW_LID, !state); 280 280 input_sync(button->input); 281 281 282 + if (state) 283 + pm_wakeup_event(&device->dev, 0); 284 + 282 285 ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, device); 283 286 if (ret == NOTIFY_DONE) 284 287 ret = blocking_notifier_call_chain(&acpi_lid_notifier, state, ··· 317 314 input_sync(input); 318 315 input_report_key(input, keycode, 0); 319 316 input_sync(input); 317 + 318 + pm_wakeup_event(&device->dev, 0); 320 319 } 321 320 322 321 acpi_bus_generate_proc_event(device, event, ++button->pushed); ··· 431 426 acpi_enable_gpe(device->wakeup.gpe_device, 432 427 device->wakeup.gpe_number); 433 428 device->wakeup.run_wake_count++; 434 - device->wakeup.state.enabled = 1; 429 + device_set_wakeup_enable(&device->dev, true); 435 430 } 436 431 437 432 printk(KERN_INFO PREFIX "%s [%s]\n", name, acpi_device_bid(device)); ··· 454 449 acpi_disable_gpe(device->wakeup.gpe_device, 455 450 device->wakeup.gpe_number); 456 451 device->wakeup.run_wake_count--; 457 - device->wakeup.state.enabled = 0; 452 + device_set_wakeup_enable(&device->dev, false); 458 453 } 459 454 460 455 acpi_button_remove_fs(device);
+1 -1
drivers/acpi/dock.c
··· 725 725 complete_dock(ds); 726 726 dock_event(ds, event, DOCK_EVENT); 727 727 dock_lock(ds, 1); 728 - acpi_update_gpes(); 728 + acpi_update_all_gpes(); 729 729 break; 730 730 } 731 731 if (dock_present(ds) || dock_in_progress(ds))
+3 -2
drivers/acpi/ec.c
··· 606 606 return 0; 607 607 } 608 608 609 - static u32 acpi_ec_gpe_handler(void *data) 609 + static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, 610 + u32 gpe_number, void *data) 610 611 { 611 612 struct acpi_ec *ec = data; 612 613 ··· 619 618 wake_up(&ec->wait); 620 619 ec_check_sci(ec, acpi_ec_read_status(ec)); 621 620 } 622 - return ACPI_INTERRUPT_HANDLED; 621 + return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE; 623 622 } 624 623 625 624 /* --------------------------------------------------------------------------
+7 -20
drivers/acpi/fan.c
··· 86 86 if (!device) 87 87 return -EINVAL; 88 88 89 - result = acpi_bus_get_power(device->handle, &acpi_state); 89 + result = acpi_bus_update_power(device->handle, &acpi_state); 90 90 if (result) 91 91 return result; 92 92 ··· 123 123 static int acpi_fan_add(struct acpi_device *device) 124 124 { 125 125 int result = 0; 126 - int state = 0; 127 126 struct thermal_cooling_device *cdev; 128 127 129 128 if (!device) ··· 131 132 strcpy(acpi_device_name(device), "Fan"); 132 133 strcpy(acpi_device_class(device), ACPI_FAN_CLASS); 133 134 134 - result = acpi_bus_get_power(device->handle, &state); 135 + result = acpi_bus_update_power(device->handle, NULL); 135 136 if (result) { 136 - printk(KERN_ERR PREFIX "Reading power state\n"); 137 + printk(KERN_ERR PREFIX "Setting initial power state\n"); 137 138 goto end; 138 139 } 139 - 140 - device->flags.force_power_state = 1; 141 - acpi_bus_set_power(device->handle, state); 142 - device->flags.force_power_state = 0; 143 140 144 141 cdev = thermal_cooling_device_register("Fan", device, 145 142 &fan_cooling_ops); ··· 195 200 196 201 static int acpi_fan_resume(struct acpi_device *device) 197 202 { 198 - int result = 0; 199 - int power_state = 0; 203 + int result; 200 204 201 205 if (!device) 202 206 return -EINVAL; 203 207 204 - result = acpi_bus_get_power(device->handle, &power_state); 205 - if (result) { 206 - printk(KERN_ERR PREFIX 207 - "Error reading fan power state\n"); 208 - return result; 209 - } 210 - 211 - device->flags.force_power_state = 1; 212 - acpi_bus_set_power(device->handle, power_state); 213 - device->flags.force_power_state = 0; 208 + result = acpi_bus_update_power(device->handle, NULL); 209 + if (result) 210 + printk(KERN_ERR PREFIX "Error updating fan power state\n"); 214 211 215 212 return result; 216 213 }
+1 -4
drivers/acpi/glue.c
··· 167 167 "firmware_node"); 168 168 ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, 169 169 "physical_node"); 170 - if (acpi_dev->wakeup.flags.valid) { 170 + if (acpi_dev->wakeup.flags.valid) 171 171 device_set_wakeup_capable(dev, true); 172 - device_set_wakeup_enable(dev, 173 - acpi_dev->wakeup.state.enabled); 174 - } 175 172 } 176 173 177 174 return 0;
+11 -2
drivers/acpi/internal.h
··· 41 41 int acpi_power_init(void); 42 42 int acpi_device_sleep_wake(struct acpi_device *dev, 43 43 int enable, int sleep_state, int dev_state); 44 - int acpi_power_get_inferred_state(struct acpi_device *device); 44 + int acpi_power_get_inferred_state(struct acpi_device *device, int *state); 45 + int acpi_power_on_resources(struct acpi_device *device, int state); 45 46 int acpi_power_transition(struct acpi_device *device, int state); 46 - extern int acpi_power_nocheck; 47 + int acpi_bus_init_power(struct acpi_device *device); 47 48 48 49 int acpi_wakeup_device_init(void); 49 50 void acpi_early_processor_set_pdc(void); ··· 83 82 84 83 #ifdef CONFIG_ACPI_SLEEP 85 84 int acpi_sleep_proc_init(void); 85 + int suspend_nvs_alloc(void); 86 + void suspend_nvs_free(void); 87 + int suspend_nvs_save(void); 88 + void suspend_nvs_restore(void); 86 89 #else 87 90 static inline int acpi_sleep_proc_init(void) { return 0; } 91 + static inline int suspend_nvs_alloc(void) { return 0; } 92 + static inline void suspend_nvs_free(void) {} 93 + static inline int suspend_nvs_save(void) { return 0; } 94 + static inline void suspend_nvs_restore(void) {} 88 95 #endif 89 96 90 97 #endif /* _ACPI_INTERNAL_H_ */
+10 -7
drivers/acpi/osl.c
··· 320 320 321 321 pg_off = round_down(phys, PAGE_SIZE); 322 322 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; 323 - virt = ioremap(pg_off, pg_sz); 323 + virt = ioremap_cache(pg_off, pg_sz); 324 324 if (!virt) { 325 325 kfree(map); 326 326 return NULL; ··· 642 642 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 643 643 rcu_read_unlock(); 644 644 if (!virt_addr) { 645 - virt_addr = ioremap(phys_addr, size); 645 + virt_addr = ioremap_cache(phys_addr, size); 646 646 unmap = 1; 647 647 } 648 648 if (!value) ··· 678 678 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 679 679 rcu_read_unlock(); 680 680 if (!virt_addr) { 681 - virt_addr = ioremap(phys_addr, size); 681 + virt_addr = ioremap_cache(phys_addr, size); 682 682 unmap = 1; 683 683 } 684 684 ··· 1233 1233 int acpi_check_resource_conflict(const struct resource *res) 1234 1234 { 1235 1235 struct acpi_res_list *res_list_elem; 1236 - int ioport; 1237 - int clash = 0; 1236 + int ioport = 0, clash = 0; 1238 1237 1239 1238 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO) 1240 1239 return 0; ··· 1263 1264 if (clash) { 1264 1265 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) { 1265 1266 printk(KERN_WARNING "ACPI: resource %s %pR" 1266 - " conflicts with ACPI region %s %pR\n", 1267 + " conflicts with ACPI region %s " 1268 + "[%s 0x%zx-0x%zx]\n", 1267 1269 res->name, res, res_list_elem->name, 1268 - res_list_elem); 1270 + (res_list_elem->resource_type == 1271 + ACPI_ADR_SPACE_SYSTEM_IO) ? "io" : "mem", 1272 + (size_t) res_list_elem->start, 1273 + (size_t) res_list_elem->end); 1269 1274 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX) 1270 1275 printk(KERN_NOTICE "ACPI: This conflict may" 1271 1276 " cause random problems and system"
+72 -56
drivers/acpi/power.c
··· 56 56 #define ACPI_POWER_RESOURCE_STATE_ON 0x01 57 57 #define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF 58 58 59 - int acpi_power_nocheck; 60 - module_param_named(power_nocheck, acpi_power_nocheck, bool, 000); 61 - 62 59 static int acpi_power_add(struct acpi_device *device); 63 60 static int acpi_power_remove(struct acpi_device *device, int type); 64 61 static int acpi_power_resume(struct acpi_device *device); ··· 145 148 146 149 static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state) 147 150 { 148 - int result = 0, state1; 149 - u32 i = 0; 150 - 151 + int cur_state; 152 + int i = 0; 151 153 152 154 if (!list || !state) 153 155 return -EINVAL; ··· 154 158 /* The state of the list is 'on' IFF all resources are 'on'. */ 155 159 156 160 for (i = 0; i < list->count; i++) { 157 - /* 158 - * The state of the power resource can be obtained by 159 - * using the ACPI handle. In such case it is unnecessary to 160 - * get the Power resource first and then get its state again. 161 - */ 162 - result = acpi_power_get_state(list->handles[i], &state1); 161 + struct acpi_power_resource *resource; 162 + acpi_handle handle = list->handles[i]; 163 + int result; 164 + 165 + result = acpi_power_get_context(handle, &resource); 163 166 if (result) 164 167 return result; 165 168 166 - *state = state1; 169 + mutex_lock(&resource->resource_lock); 167 170 168 - if (*state != ACPI_POWER_RESOURCE_STATE_ON) 171 + result = acpi_power_get_state(handle, &cur_state); 172 + 173 + mutex_unlock(&resource->resource_lock); 174 + 175 + if (result) 176 + return result; 177 + 178 + if (cur_state != ACPI_POWER_RESOURCE_STATE_ON) 169 179 break; 170 180 } 171 181 172 182 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Resource list is %s\n", 173 - *state ? "on" : "off")); 183 + cur_state ? "on" : "off")); 174 184 175 - return result; 185 + *state = cur_state; 186 + 187 + return 0; 176 188 } 177 189 178 190 static int __acpi_power_on(struct acpi_power_resource *resource) ··· 226 222 return result; 227 223 } 228 224 229 - static int acpi_power_off_device(acpi_handle handle) 225 + static int acpi_power_off(acpi_handle handle) 230 226 { 231 227 int result = 0; 232 228 acpi_status status = AE_OK; ··· 266 262 267 263 unlock: 268 264 mutex_unlock(&resource->resource_lock); 265 + 266 + return result; 267 + } 268 + 269 + static void __acpi_power_off_list(struct acpi_handle_list *list, int num_res) 270 + { 271 + int i; 272 + 273 + for (i = num_res - 1; i >= 0 ; i--) 274 + acpi_power_off(list->handles[i]); 275 + } 276 + 277 + static void acpi_power_off_list(struct acpi_handle_list *list) 278 + { 279 + __acpi_power_off_list(list, list->count); 280 + } 281 + 282 + static int acpi_power_on_list(struct acpi_handle_list *list) 283 + { 284 + int result = 0; 285 + int i; 286 + 287 + for (i = 0; i < list->count; i++) { 288 + result = acpi_power_on(list->handles[i]); 289 + if (result) { 290 + __acpi_power_off_list(list, i); 291 + break; 292 + } 293 + } 269 294 270 295 return result; 271 296 } ··· 437 404 438 405 /* Close power resource */ 439 406 for (i = 0; i < dev->wakeup.resources.count; i++) { 440 - int ret = acpi_power_off_device( 441 - dev->wakeup.resources.handles[i]); 407 + int ret = acpi_power_off(dev->wakeup.resources.handles[i]); 442 408 if (ret) { 443 409 printk(KERN_ERR PREFIX "Transition power state\n"); 444 410 dev->wakeup.flags.valid = 0; ··· 455 423 Device Power Management 456 424 -------------------------------------------------------------------------- */ 457 425 458 - int acpi_power_get_inferred_state(struct acpi_device *device) 426 + int acpi_power_get_inferred_state(struct acpi_device *device, int *state) 459 427 { 460 428 int result = 0; 461 429 struct acpi_handle_list *list = NULL; 462 430 int list_state = 0; 463 431 int i = 0; 464 432 465 - 466 - if (!device) 433 + if (!device || !state) 467 434 return -EINVAL; 468 - 469 - device->power.state = ACPI_STATE_UNKNOWN; 470 435 471 436 /* 472 437 * We know a device's inferred power state when all the resources ··· 479 450 return result; 480 451 481 452 if (list_state == ACPI_POWER_RESOURCE_STATE_ON) { 482 - device->power.state = i; 453 + *state = i; 483 454 return 0; 484 455 } 485 456 } 486 457 487 - device->power.state = ACPI_STATE_D3; 488 - 458 + *state = ACPI_STATE_D3; 489 459 return 0; 460 + } 461 + 462 + int acpi_power_on_resources(struct acpi_device *device, int state) 463 + { 464 + if (!device || state < ACPI_STATE_D0 || state > ACPI_STATE_D3) 465 + return -EINVAL; 466 + 467 + return acpi_power_on_list(&device->power.states[state].resources); 490 468 } 491 469 492 470 int acpi_power_transition(struct acpi_device *device, int state) 493 471 { 494 - int result = 0; 495 - struct acpi_handle_list *cl = NULL; /* Current Resources */ 496 - struct acpi_handle_list *tl = NULL; /* Target Resources */ 497 - int i = 0; 472 + int result; 498 473 499 474 if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3)) 500 475 return -EINVAL; ··· 510 477 || (device->power.state > ACPI_STATE_D3)) 511 478 return -ENODEV; 512 479 513 - cl = &device->power.states[device->power.state].resources; 514 - tl = &device->power.states[state].resources; 515 - 516 480 /* TBD: Resources must be ordered. */ 517 481 518 482 /* 519 483 * First we reference all power resources required in the target list 520 - * (e.g. so the device doesn't lose power while transitioning). 484 + * (e.g. so the device doesn't lose power while transitioning). Then, 485 + * we dereference all power resources used in the current list. 521 486 */ 522 - for (i = 0; i < tl->count; i++) { 523 - result = acpi_power_on(tl->handles[i]); 524 - if (result) 525 - goto end; 526 - } 487 + result = acpi_power_on_list(&device->power.states[state].resources); 488 + if (!result) 489 + acpi_power_off_list( 490 + &device->power.states[device->power.state].resources); 527 491 528 - /* 529 - * Then we dereference all power resources used in the current list. 530 - */ 531 - for (i = 0; i < cl->count; i++) { 532 - result = acpi_power_off_device(cl->handles[i]); 533 - if (result) 534 - goto end; 535 - } 536 - 537 - end: 538 - if (result) 539 - device->power.state = ACPI_STATE_UNKNOWN; 540 - else { 541 - /* We shouldn't change the state till all above operations succeed */ 542 - device->power.state = state; 543 - } 492 + /* We shouldn't change the state unless the above operations succeed. */ 493 + device->power.state = result ? ACPI_STATE_UNKNOWN : state; 544 494 545 495 return result; 546 496 }
+12 -29
drivers/acpi/proc.c
··· 311 311 dev->pnp.bus_id, 312 312 (u32) dev->wakeup.sleep_state, 313 313 dev->wakeup.flags.run_wake ? '*' : ' ', 314 - dev->wakeup.state.enabled ? "enabled" : "disabled"); 314 + (device_may_wakeup(&dev->dev) 315 + || (ldev && device_may_wakeup(ldev))) ? 316 + "enabled" : "disabled"); 315 317 if (ldev) 316 318 seq_printf(seq, "%s:%s", 317 319 ldev->bus ? ldev->bus->name : "no-bus", ··· 330 328 { 331 329 struct device *dev = acpi_get_physical_device(adev->handle); 332 330 333 - if (dev && device_can_wakeup(dev)) 334 - device_set_wakeup_enable(dev, adev->wakeup.state.enabled); 331 + if (dev && device_can_wakeup(dev)) { 332 + bool enable = !device_may_wakeup(dev); 333 + device_set_wakeup_enable(dev, enable); 334 + } 335 335 } 336 336 337 337 static ssize_t ··· 345 341 char strbuf[5]; 346 342 char str[5] = ""; 347 343 unsigned int len = count; 348 - struct acpi_device *found_dev = NULL; 349 344 350 345 if (len > 4) 351 346 len = 4; ··· 364 361 continue; 365 362 366 363 if (!strncmp(dev->pnp.bus_id, str, 4)) { 367 - dev->wakeup.state.enabled = 368 - dev->wakeup.state.enabled ? 0 : 1; 369 - found_dev = dev; 370 - break; 371 - } 372 - } 373 - if (found_dev) { 374 - physical_device_enable_wakeup(found_dev); 375 - list_for_each_safe(node, next, &acpi_wakeup_device_list) { 376 - struct acpi_device *dev = container_of(node, 377 - struct 378 - acpi_device, 379 - wakeup_list); 380 - 381 - if ((dev != found_dev) && 382 - (dev->wakeup.gpe_number == 383 - found_dev->wakeup.gpe_number) 384 - && (dev->wakeup.gpe_device == 385 - found_dev->wakeup.gpe_device)) { 386 - printk(KERN_WARNING 387 - "ACPI: '%s' and '%s' have the same GPE, " 388 - "can't disable/enable one separately\n", 389 - dev->pnp.bus_id, found_dev->pnp.bus_id); 390 - dev->wakeup.state.enabled = 391 - found_dev->wakeup.state.enabled; 364 + if (device_can_wakeup(&dev->dev)) { 365 + bool enable = !device_may_wakeup(&dev->dev); 366 + device_set_wakeup_enable(&dev->dev, enable); 367 + } else { 392 368 physical_device_enable_wakeup(dev); 393 369 } 370 + break; 394 371 } 395 372 } 396 373 mutex_unlock(&acpi_device_lock);
+6 -74
drivers/acpi/processor_driver.c
··· 40 40 #include <linux/pm.h> 41 41 #include <linux/cpufreq.h> 42 42 #include <linux/cpu.h> 43 - #ifdef CONFIG_ACPI_PROCFS 44 - #include <linux/proc_fs.h> 45 - #include <linux/seq_file.h> 46 - #endif 47 43 #include <linux/dmi.h> 48 44 #include <linux/moduleparam.h> 49 45 #include <linux/cpuidle.h> ··· 242 246 return result; 243 247 } 244 248 245 - #ifdef CONFIG_ACPI_PROCFS 246 - static struct proc_dir_entry *acpi_processor_dir = NULL; 247 - 248 - static int __cpuinit acpi_processor_add_fs(struct acpi_device *device) 249 - { 250 - struct proc_dir_entry *entry = NULL; 251 - 252 - 253 - if (!acpi_device_dir(device)) { 254 - acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), 255 - acpi_processor_dir); 256 - if (!acpi_device_dir(device)) 257 - return -ENODEV; 258 - } 259 - 260 - /* 'throttling' [R/W] */ 261 - entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING, 262 - S_IFREG | S_IRUGO | S_IWUSR, 263 - acpi_device_dir(device), 264 - &acpi_processor_throttling_fops, 265 - acpi_driver_data(device)); 266 - if (!entry) 267 - return -EIO; 268 - return 0; 269 - } 270 - static int acpi_processor_remove_fs(struct acpi_device *device) 271 - { 272 - 273 - if (acpi_device_dir(device)) { 274 - remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING, 275 - acpi_device_dir(device)); 276 - remove_proc_entry(acpi_device_bid(device), acpi_processor_dir); 277 - acpi_device_dir(device) = NULL; 278 - } 279 - 280 - return 0; 281 - } 282 - #else 283 - static inline int acpi_processor_add_fs(struct acpi_device *device) 284 - { 285 - return 0; 286 - } 287 - static inline int acpi_processor_remove_fs(struct acpi_device *device) 288 - { 289 - return 0; 290 - } 291 - #endif 292 249 /* -------------------------------------------------------------------------- 293 250 Driver Interface 294 251 -------------------------------------------------------------------------- */ ··· 427 478 if (action == CPU_ONLINE && pr) { 428 479 acpi_processor_ppc_has_changed(pr, 0); 429 480 acpi_processor_cst_has_changed(pr); 481 + acpi_processor_reevaluate_tstate(pr, action); 430 482 acpi_processor_tstate_has_changed(pr); 483 + } 484 + if (action == CPU_DEAD && pr) { 485 + /* invalidate the flag.throttling after one CPU is offline */ 486 + acpi_processor_reevaluate_tstate(pr, action); 431 487 } 432 488 return NOTIFY_OK; 433 489 } ··· 491 537 492 538 per_cpu(processors, pr->id) = pr; 493 539 494 - result = acpi_processor_add_fs(device); 495 - if (result) 496 - goto err_free_cpumask; 497 - 498 540 sysdev = get_cpu_sysdev(pr->id); 499 541 if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) { 500 542 result = -EFAULT; 501 - goto err_remove_fs; 543 + goto err_free_cpumask; 502 544 } 503 545 504 546 #ifdef CONFIG_CPU_FREQ ··· 540 590 thermal_cooling_device_unregister(pr->cdev); 541 591 err_power_exit: 542 592 acpi_processor_power_exit(pr, device); 543 - err_remove_fs: 544 - acpi_processor_remove_fs(device); 545 593 err_free_cpumask: 546 594 free_cpumask_var(pr->throttling.shared_cpu_map); 547 595 ··· 567 619 acpi_processor_power_exit(pr, device); 568 620 569 621 sysfs_remove_link(&device->dev.kobj, "sysdev"); 570 - 571 - acpi_processor_remove_fs(device); 572 622 573 623 if (pr->cdev) { 574 624 sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); ··· 800 854 801 855 memset(&errata, 0, sizeof(errata)); 802 856 803 - #ifdef CONFIG_ACPI_PROCFS 804 - acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir); 805 - if (!acpi_processor_dir) 806 - return -ENOMEM; 807 - #endif 808 - 809 857 if (!cpuidle_register_driver(&acpi_idle_driver)) { 810 858 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n", 811 859 acpi_idle_driver.name); ··· 825 885 out_cpuidle: 826 886 cpuidle_unregister_driver(&acpi_idle_driver); 827 887 828 - #ifdef CONFIG_ACPI_PROCFS 829 - remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); 830 - #endif 831 - 832 888 return result; 833 889 } 834 890 ··· 842 906 acpi_bus_unregister_driver(&acpi_processor_driver); 843 907 844 908 cpuidle_unregister_driver(&acpi_idle_driver); 845 - 846 - #ifdef CONFIG_ACPI_PROCFS 847 - remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); 848 - #endif 849 909 850 910 return; 851 911 }
+73 -117
drivers/acpi/processor_throttling.c
··· 32 32 #include <linux/init.h> 33 33 #include <linux/sched.h> 34 34 #include <linux/cpufreq.h> 35 - #ifdef CONFIG_ACPI_PROCFS 36 - #include <linux/proc_fs.h> 37 - #include <linux/seq_file.h> 38 - #endif 39 35 40 36 #include <asm/io.h> 41 37 #include <asm/uaccess.h> ··· 365 369 return acpi_processor_set_throttling(pr, target_state, false); 366 370 } 367 371 372 + /* 373 + * This function is used to reevaluate whether the T-state is valid 374 + * after one CPU is onlined/offlined. 375 + * It is noted that it won't reevaluate the following properties for 376 + * the T-state. 377 + * 1. Control method. 378 + * 2. the number of supported T-state 379 + * 3. TSD domain 380 + */ 381 + void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, 382 + unsigned long action) 383 + { 384 + int result = 0; 385 + 386 + if (action == CPU_DEAD) { 387 + /* When one CPU is offline, the T-state throttling 388 + * will be invalidated. 389 + */ 390 + pr->flags.throttling = 0; 391 + return; 392 + } 393 + /* the following is to recheck whether the T-state is valid for 394 + * the online CPU 395 + */ 396 + if (!pr->throttling.state_count) { 397 + /* If the number of T-state is invalid, it is 398 + * invalidated. 399 + */ 400 + pr->flags.throttling = 0; 401 + return; 402 + } 403 + pr->flags.throttling = 1; 404 + 405 + /* Disable throttling (if enabled). We'll let subsequent 406 + * policy (e.g.thermal) decide to lower performance if it 407 + * so chooses, but for now we'll crank up the speed. 408 + */ 409 + 410 + result = acpi_processor_get_throttling(pr); 411 + if (result) 412 + goto end; 413 + 414 + if (pr->throttling.state) { 415 + result = acpi_processor_set_throttling(pr, 0, false); 416 + if (result) 417 + goto end; 418 + } 419 + 420 + end: 421 + if (result) 422 + pr->flags.throttling = 0; 423 + } 368 424 /* 369 425 * _PTC - Processor Throttling Control (and status) register location 370 426 */ ··· 924 876 */ 925 877 cpumask_copy(saved_mask, &current->cpus_allowed); 926 878 /* FIXME: use work_on_cpu() */ 927 - set_cpus_allowed_ptr(current, cpumask_of(pr->id)); 879 + if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) { 880 + /* Can't migrate to the target pr->id CPU. Exit */ 881 + free_cpumask_var(saved_mask); 882 + return -ENODEV; 883 + } 928 884 ret = pr->throttling.acpi_processor_get_throttling(pr); 929 885 /* restore the previous state */ 930 886 set_cpus_allowed_ptr(current, saved_mask); ··· 1103 1051 return -ENOMEM; 1104 1052 } 1105 1053 1054 + if (cpu_is_offline(pr->id)) { 1055 + /* 1056 + * the cpu pointed by pr->id is offline. Unnecessary to change 1057 + * the throttling state any more. 1058 + */ 1059 + return -ENODEV; 1060 + } 1061 + 1106 1062 cpumask_copy(saved_mask, &current->cpus_allowed); 1107 1063 t_state.target_state = state; 1108 1064 p_throttling = &(pr->throttling); ··· 1134 1074 */ 1135 1075 if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) { 1136 1076 /* FIXME: use work_on_cpu() */ 1137 - set_cpus_allowed_ptr(current, cpumask_of(pr->id)); 1077 + if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) { 1078 + /* Can't migrate to the pr->id CPU. Exit */ 1079 + ret = -ENODEV; 1080 + goto exit; 1081 + } 1138 1082 ret = p_throttling->acpi_processor_set_throttling(pr, 1139 1083 t_state.target_state, force); 1140 1084 } else { ··· 1170 1106 } 1171 1107 t_state.cpu = i; 1172 1108 /* FIXME: use work_on_cpu() */ 1173 - set_cpus_allowed_ptr(current, cpumask_of(i)); 1109 + if (set_cpus_allowed_ptr(current, cpumask_of(i))) 1110 + continue; 1174 1111 ret = match_pr->throttling. 1175 1112 acpi_processor_set_throttling( 1176 1113 match_pr, t_state.target_state, force); ··· 1191 1126 /* restore the previous state */ 1192 1127 /* FIXME: use work_on_cpu() */ 1193 1128 set_cpus_allowed_ptr(current, saved_mask); 1129 + exit: 1194 1130 free_cpumask_var(online_throttling_cpus); 1195 1131 free_cpumask_var(saved_mask); 1196 1132 return ret; ··· 1282 1216 return result; 1283 1217 } 1284 1218 1285 - #ifdef CONFIG_ACPI_PROCFS 1286 - /* proc interface */ 1287 - static int acpi_processor_throttling_seq_show(struct seq_file *seq, 1288 - void *offset) 1289 - { 1290 - struct acpi_processor *pr = seq->private; 1291 - int i = 0; 1292 - int result = 0; 1293 - 1294 - if (!pr) 1295 - goto end; 1296 - 1297 - if (!(pr->throttling.state_count > 0)) { 1298 - seq_puts(seq, "<not supported>\n"); 1299 - goto end; 1300 - } 1301 - 1302 - result = acpi_processor_get_throttling(pr); 1303 - 1304 - if (result) { 1305 - seq_puts(seq, 1306 - "Could not determine current throttling state.\n"); 1307 - goto end; 1308 - } 1309 - 1310 - seq_printf(seq, "state count: %d\n" 1311 - "active state: T%d\n" 1312 - "state available: T%d to T%d\n", 1313 - pr->throttling.state_count, pr->throttling.state, 1314 - pr->throttling_platform_limit, 1315 - pr->throttling.state_count - 1); 1316 - 1317 - seq_puts(seq, "states:\n"); 1318 - if (pr->throttling.acpi_processor_get_throttling == 1319 - acpi_processor_get_throttling_fadt) { 1320 - for (i = 0; i < pr->throttling.state_count; i++) 1321 - seq_printf(seq, " %cT%d: %02d%%\n", 1322 - (i == pr->throttling.state ? '*' : ' '), i, 1323 - (pr->throttling.states[i].performance ? pr-> 1324 - throttling.states[i].performance / 10 : 0)); 1325 - } else { 1326 - for (i = 0; i < pr->throttling.state_count; i++) 1327 - seq_printf(seq, " %cT%d: %02d%%\n", 1328 - (i == pr->throttling.state ? '*' : ' '), i, 1329 - (int)pr->throttling.states_tss[i]. 1330 - freqpercentage); 1331 - } 1332 - 1333 - end: 1334 - return 0; 1335 - } 1336 - 1337 - static int acpi_processor_throttling_open_fs(struct inode *inode, 1338 - struct file *file) 1339 - { 1340 - return single_open(file, acpi_processor_throttling_seq_show, 1341 - PDE(inode)->data); 1342 - } 1343 - 1344 - static ssize_t acpi_processor_write_throttling(struct file *file, 1345 - const char __user * buffer, 1346 - size_t count, loff_t * data) 1347 - { 1348 - int result = 0; 1349 - struct seq_file *m = file->private_data; 1350 - struct acpi_processor *pr = m->private; 1351 - char state_string[5] = ""; 1352 - char *charp = NULL; 1353 - size_t state_val = 0; 1354 - char tmpbuf[5] = ""; 1355 - 1356 - if (!pr || (count > sizeof(state_string) - 1)) 1357 - return -EINVAL; 1358 - 1359 - if (copy_from_user(state_string, buffer, count)) 1360 - return -EFAULT; 1361 - 1362 - state_string[count] = '\0'; 1363 - if ((count > 0) && (state_string[count-1] == '\n')) 1364 - state_string[count-1] = '\0'; 1365 - 1366 - charp = state_string; 1367 - if ((state_string[0] == 't') || (state_string[0] == 'T')) 1368 - charp++; 1369 - 1370 - state_val = simple_strtoul(charp, NULL, 0); 1371 - if (state_val >= pr->throttling.state_count) 1372 - return -EINVAL; 1373 - 1374 - snprintf(tmpbuf, 5, "%zu", state_val); 1375 - 1376 - if (strcmp(tmpbuf, charp) != 0) 1377 - return -EINVAL; 1378 - 1379 - result = acpi_processor_set_throttling(pr, state_val, false); 1380 - if (result) 1381 - return result; 1382 - 1383 - return count; 1384 - } 1385 - 1386 - const struct file_operations acpi_processor_throttling_fops = { 1387 - .owner = THIS_MODULE, 1388 - .open = acpi_processor_throttling_open_fs, 1389 - .read = seq_read, 1390 - .write = acpi_processor_write_throttling, 1391 - .llseek = seq_lseek, 1392 - .release = single_release, 1393 - }; 1394 - #endif
+2
drivers/acpi/sbs.c
··· 484 484 const struct file_operations *state_fops, 485 485 const struct file_operations *alarm_fops, void *data) 486 486 { 487 + printk(KERN_WARNING PREFIX "Deprecated procfs I/F for SBS is loaded," 488 + " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n"); 487 489 if (!*dir) { 488 490 *dir = proc_mkdir(dir_name, parent_dir); 489 491 if (!*dir) {
+43 -27
drivers/acpi/scan.c
··· 778 778 wakeup->resources.handles[i] = element->reference.handle; 779 779 } 780 780 781 - acpi_gpe_can_wake(wakeup->gpe_device, wakeup->gpe_number); 781 + acpi_setup_gpe_for_wake(handle, wakeup->gpe_device, wakeup->gpe_number); 782 782 783 783 out: 784 784 kfree(buffer.pointer); ··· 803 803 /* Power button, Lid switch always enable wakeup */ 804 804 if (!acpi_match_device_ids(device, button_device_ids)) { 805 805 device->wakeup.flags.run_wake = 1; 806 - device->wakeup.flags.always_enabled = 1; 806 + device_set_wakeup_capable(&device->dev, true); 807 807 return; 808 808 } 809 809 ··· 815 815 !!(event_status & ACPI_EVENT_FLAG_HANDLE); 816 816 } 817 817 818 - static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device) 818 + static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device) 819 819 { 820 + acpi_handle temp; 820 821 acpi_status status = 0; 821 822 int psw_error; 823 + 824 + /* Presence of _PRW indicates wake capable */ 825 + status = acpi_get_handle(device->handle, "_PRW", &temp); 826 + if (ACPI_FAILURE(status)) 827 + return; 822 828 823 829 status = acpi_bus_extract_wakeup_device_power_package(device->handle, 824 830 &device->wakeup); 825 831 if (ACPI_FAILURE(status)) { 826 832 ACPI_EXCEPTION((AE_INFO, status, "Extracting _PRW package")); 827 - goto end; 833 + return; 828 834 } 829 835 830 836 device->wakeup.flags.valid = 1; ··· 846 840 if (psw_error) 847 841 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 848 842 "error in _DSW or _PSW evaluation\n")); 849 - 850 - end: 851 - if (ACPI_FAILURE(status)) 852 - device->flags.wake_capable = 0; 853 - return 0; 854 843 } 844 + 845 + static void acpi_bus_add_power_resource(acpi_handle handle); 855 846 856 847 static int acpi_bus_get_power_flags(struct acpi_device *device) 857 848 { ··· 878 875 acpi_evaluate_reference(device->handle, object_name, NULL, 879 876 &ps->resources); 880 877 if (ps->resources.count) { 878 + int j; 879 + 881 880 device->power.flags.power_resources = 1; 882 881 ps->flags.valid = 1; 882 + for (j = 0; j < ps->resources.count; j++) 883 + acpi_bus_add_power_resource(ps->resources.handles[j]); 883 884 } 884 885 885 886 /* Evaluate "_PSx" to see if we can do explicit sets */ ··· 908 901 device->power.states[ACPI_STATE_D3].flags.valid = 1; 909 902 device->power.states[ACPI_STATE_D3].power = 0; 910 903 911 - /* TBD: System wake support and resource requirements. */ 912 - 913 - device->power.state = ACPI_STATE_UNKNOWN; 914 - acpi_bus_get_power(device->handle, &(device->power.state)); 904 + acpi_bus_init_power(device); 915 905 916 906 return 0; 917 907 } ··· 950 946 status = acpi_get_handle(device->handle, "_PR0", &temp); 951 947 if (ACPI_SUCCESS(status)) 952 948 device->flags.power_manageable = 1; 953 - 954 - /* Presence of _PRW indicates wake capable */ 955 - status = acpi_get_handle(device->handle, "_PRW", &temp); 956 - if (ACPI_SUCCESS(status)) 957 - device->flags.wake_capable = 1; 958 949 959 950 /* TBD: Performance management */ 960 951 ··· 1277 1278 * Wakeup device management 1278 1279 *----------------------- 1279 1280 */ 1280 - if (device->flags.wake_capable) { 1281 - result = acpi_bus_get_wakeup_device_flags(device); 1282 - if (result) 1283 - goto end; 1284 - } 1281 + acpi_bus_get_wakeup_device_flags(device); 1285 1282 1286 1283 /* 1287 1284 * Performance Management ··· 1320 1325 1321 1326 #define ACPI_STA_DEFAULT (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | \ 1322 1327 ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING) 1328 + 1329 + static void acpi_bus_add_power_resource(acpi_handle handle) 1330 + { 1331 + struct acpi_bus_ops ops = { 1332 + .acpi_op_add = 1, 1333 + .acpi_op_start = 1, 1334 + }; 1335 + struct acpi_device *device = NULL; 1336 + 1337 + acpi_bus_get_device(handle, &device); 1338 + if (!device) 1339 + acpi_add_single_object(&device, handle, ACPI_BUS_TYPE_POWER, 1340 + ACPI_STA_DEFAULT, &ops); 1341 + } 1323 1342 1324 1343 static int acpi_bus_type_and_status(acpi_handle handle, int *type, 1325 1344 unsigned long long *sta) ··· 1380 1371 struct acpi_bus_ops *ops = context; 1381 1372 int type; 1382 1373 unsigned long long sta; 1383 - struct acpi_device_wakeup wakeup; 1384 1374 struct acpi_device *device; 1385 1375 acpi_status status; 1386 1376 int result; ··· 1390 1382 1391 1383 if (!(sta & ACPI_STA_DEVICE_PRESENT) && 1392 1384 !(sta & ACPI_STA_DEVICE_FUNCTIONING)) { 1393 - acpi_bus_extract_wakeup_device_power_package(handle, &wakeup); 1385 + struct acpi_device_wakeup wakeup; 1386 + acpi_handle temp; 1387 + 1388 + status = acpi_get_handle(handle, "_PRW", &temp); 1389 + if (ACPI_SUCCESS(status)) 1390 + acpi_bus_extract_wakeup_device_power_package(handle, 1391 + &wakeup); 1394 1392 return AE_CTRL_DEPTH; 1395 1393 } 1396 1394 ··· 1481 1467 1482 1468 result = acpi_bus_scan(device->handle, &ops, NULL); 1483 1469 1484 - acpi_update_gpes(); 1470 + acpi_update_all_gpes(); 1485 1471 1486 1472 return result; 1487 1473 } ··· 1587 1573 printk(KERN_ERR PREFIX "Could not register bus type\n"); 1588 1574 } 1589 1575 1576 + acpi_power_init(); 1577 + 1590 1578 /* 1591 1579 * Enumerate devices in the ACPI namespace. 1592 1580 */ ··· 1600 1584 if (result) 1601 1585 acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL); 1602 1586 else 1603 - acpi_update_gpes(); 1587 + acpi_update_all_gpes(); 1604 1588 1605 1589 return result; 1606 1590 }
+10 -3
drivers/acpi/sleep.c
··· 124 124 static int acpi_pm_pre_suspend(void) 125 125 { 126 126 acpi_pm_freeze(); 127 - suspend_nvs_save(); 128 - return 0; 127 + return suspend_nvs_save(); 129 128 } 130 129 131 130 /** ··· 150 151 { 151 152 int error = __acpi_pm_prepare(); 152 153 if (!error) 153 - acpi_pm_pre_suspend(); 154 + error = acpi_pm_pre_suspend(); 154 155 155 156 return error; 156 157 } ··· 432 433 .matches = { 433 434 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 434 435 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-NW130D"), 436 + }, 437 + }, 438 + { 439 + .callback = init_nvs_nosave, 440 + .ident = "Averatec AV1020-ED2", 441 + .matches = { 442 + DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"), 443 + DMI_MATCH(DMI_PRODUCT_NAME, "1000 Series"), 435 444 }, 436 445 }, 437 446 {},
+17 -2
drivers/acpi/sysfs.c
··· 438 438 return; 439 439 } 440 440 441 - void acpi_os_gpe_count(u32 gpe_number) 441 + static void gpe_count(u32 gpe_number) 442 442 { 443 443 acpi_gpe_count++; 444 444 ··· 454 454 return; 455 455 } 456 456 457 - void acpi_os_fixed_event_count(u32 event_number) 457 + static void fixed_event_count(u32 event_number) 458 458 { 459 459 if (!all_counters) 460 460 return; ··· 466 466 COUNT_ERROR].count++; 467 467 468 468 return; 469 + } 470 + 471 + static void acpi_gbl_event_handler(u32 event_type, acpi_handle device, 472 + u32 event_number, void *context) 473 + { 474 + if (event_type == ACPI_EVENT_TYPE_GPE) 475 + gpe_count(event_number); 476 + 477 + if (event_type == ACPI_EVENT_TYPE_FIXED) 478 + fixed_event_count(event_number); 469 479 } 470 480 471 481 static int get_status(u32 index, acpi_event_status *status, ··· 611 601 612 602 void acpi_irq_stats_init(void) 613 603 { 604 + acpi_status status; 614 605 int i; 615 606 616 607 if (all_counters) ··· 628 617 all_counters = kzalloc(sizeof(struct event_counter) * (num_counters), 629 618 GFP_KERNEL); 630 619 if (all_counters == NULL) 620 + goto fail; 621 + 622 + status = acpi_install_global_event_handler(acpi_gbl_event_handler, NULL); 623 + if (ACPI_FAILURE(status)) 631 624 goto fail; 632 625 633 626 counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters),
+3 -2
drivers/acpi/thermal.c
··· 1059 1059 break; 1060 1060 tz->trips.active[i].flags.enabled = 1; 1061 1061 for (j = 0; j < tz->trips.active[i].devices.count; j++) { 1062 - result = acpi_bus_get_power(tz->trips.active[i].devices. 1063 - handles[j], &power_state); 1062 + result = acpi_bus_update_power( 1063 + tz->trips.active[i].devices.handles[j], 1064 + &power_state); 1064 1065 if (result || (power_state != ACPI_STATE_D0)) { 1065 1066 tz->trips.active[i].flags.enabled = 0; 1066 1067 break;
+15 -89
drivers/acpi/video.c
··· 33 33 #include <linux/input.h> 34 34 #include <linux/backlight.h> 35 35 #include <linux/thermal.h> 36 - #include <linux/video_output.h> 37 36 #include <linux/sort.h> 38 37 #include <linux/pci.h> 39 38 #include <linux/pci_ids.h> ··· 79 80 */ 80 81 static int allow_duplicates; 81 82 module_param(allow_duplicates, bool, 0644); 83 + 84 + /* 85 + * Some BIOSes claim they use minimum backlight at boot, 86 + * and this may bring dimming screen after boot 87 + */ 88 + static int use_bios_initial_backlight = 1; 89 + module_param(use_bios_initial_backlight, bool, 0644); 82 90 83 91 static int register_count = 0; 84 92 static int acpi_video_bus_add(struct acpi_device *device); ··· 178 172 u8 _BQC:1; /* Get current brightness level */ 179 173 u8 _BCQ:1; /* Some buggy BIOS uses _BCQ instead of _BQC */ 180 174 u8 _DDC:1; /*Return the EDID for this device */ 181 - u8 _DCS:1; /*Return status of output device */ 182 - u8 _DGS:1; /*Query graphics state */ 183 - u8 _DSS:1; /*Device state set */ 184 175 }; 185 176 186 177 struct acpi_video_brightness_flags { ··· 205 202 struct acpi_video_device_brightness *brightness; 206 203 struct backlight_device *backlight; 207 204 struct thermal_cooling_device *cooling_dev; 208 - struct output_device *output_dev; 209 205 }; 210 206 211 207 static const char device_decode[][30] = { ··· 228 226 u32 level_current, u32 event); 229 227 static int acpi_video_switch_brightness(struct acpi_video_device *device, 230 228 int event); 231 - static int acpi_video_device_get_state(struct acpi_video_device *device, 232 - unsigned long long *state); 233 - static int acpi_video_output_get(struct output_device *od); 234 - static int acpi_video_device_set_state(struct acpi_video_device *device, int state); 235 229 236 230 /*backlight device sysfs support*/ 237 231 static int acpi_video_get_brightness(struct backlight_device *bd) ··· 262 264 .get_brightness = acpi_video_get_brightness, 263 265 .update_status = acpi_video_set_brightness, 264 266 }; 265 - 266 - /*video output device sysfs support*/ 267 - static int acpi_video_output_get(struct output_device *od) 268 - { 269 - unsigned long long state; 270 - struct acpi_video_device *vd = 271 - (struct acpi_video_device *)dev_get_drvdata(&od->dev); 272 - acpi_video_device_get_state(vd, &state); 273 - return (int)state; 274 - } 275 - 276 - static int acpi_video_output_set(struct output_device *od) 277 - { 278 - unsigned long state = od->request_state; 279 - struct acpi_video_device *vd= 280 - (struct acpi_video_device *)dev_get_drvdata(&od->dev); 281 - return acpi_video_device_set_state(vd, state); 282 - } 283 - 284 - static struct output_properties acpi_output_properties = { 285 - .set_state = acpi_video_output_set, 286 - .get_status = acpi_video_output_get, 287 - }; 288 - 289 267 290 268 /* thermal cooling device callbacks */ 291 269 static int video_get_max_state(struct thermal_cooling_device *cooling_dev, unsigned ··· 317 343 /* -------------------------------------------------------------------------- 318 344 Video Management 319 345 -------------------------------------------------------------------------- */ 320 - 321 - /* device */ 322 - 323 - static int 324 - acpi_video_device_get_state(struct acpi_video_device *device, 325 - unsigned long long *state) 326 - { 327 - int status; 328 - 329 - status = acpi_evaluate_integer(device->dev->handle, "_DCS", NULL, state); 330 - 331 - return status; 332 - } 333 - 334 - static int 335 - acpi_video_device_set_state(struct acpi_video_device *device, int state) 336 - { 337 - int status; 338 - union acpi_object arg0 = { ACPI_TYPE_INTEGER }; 339 - struct acpi_object_list args = { 1, &arg0 }; 340 - unsigned long long ret; 341 - 342 - 343 - arg0.integer.value = state; 344 - status = acpi_evaluate_integer(device->dev->handle, "_DSS", &args, &ret); 345 - 346 - return status; 347 - } 348 346 349 347 static int 350 348 acpi_video_device_lcd_query_levels(struct acpi_video_device *device, ··· 712 766 * when invoked for the first time, i.e. level_old is invalid. 713 767 * set the backlight to max_level in this case 714 768 */ 715 - for (i = 2; i < br->count; i++) 716 - if (level_old == br->levels[i]) 717 - level = level_old; 769 + if (use_bios_initial_backlight) { 770 + for (i = 2; i < br->count; i++) 771 + if (level_old == br->levels[i]) 772 + level = level_old; 773 + } 718 774 goto set_level; 719 775 } 720 776 ··· 778 830 779 831 if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DDC", &h_dummy1))) { 780 832 device->cap._DDC = 1; 781 - } 782 - if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DCS", &h_dummy1))) { 783 - device->cap._DCS = 1; 784 - } 785 - if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DGS", &h_dummy1))) { 786 - device->cap._DGS = 1; 787 - } 788 - if (ACPI_SUCCESS(acpi_get_handle(device->dev->handle, "_DSS", &h_dummy1))) { 789 - device->cap._DSS = 1; 790 833 } 791 834 792 835 if (acpi_video_backlight_support()) { ··· 842 903 if (result) 843 904 printk(KERN_ERR PREFIX "Create sysfs link\n"); 844 905 845 - } 846 - 847 - if (acpi_video_display_switch_support()) { 848 - 849 - if (device->cap._DCS && device->cap._DSS) { 850 - static int count; 851 - char *name; 852 - name = kasprintf(GFP_KERNEL, "acpi_video%d", count); 853 - if (!name) 854 - return; 855 - count++; 856 - device->output_dev = video_output_register(name, 857 - NULL, device, &acpi_output_properties); 858 - kfree(name); 859 - } 860 906 } 861 907 } 862 908 ··· 1284 1360 if (!video_device) 1285 1361 continue; 1286 1362 1363 + if (!video_device->cap._DDC) 1364 + continue; 1365 + 1287 1366 if (type) { 1288 1367 switch (type) { 1289 1368 case ACPI_VIDEO_DISPLAY_CRT: ··· 1379 1452 thermal_cooling_device_unregister(device->cooling_dev); 1380 1453 device->cooling_dev = NULL; 1381 1454 } 1382 - video_output_unregister(device->output_dev); 1383 1455 1384 1456 return 0; 1385 1457 }
+4 -53
drivers/acpi/video_detect.c
··· 17 17 * capabilities the graphics cards plugged in support. The check for general 18 18 * video capabilities will be triggered by the first caller of 19 19 * acpi_video_get_capabilities(NULL); which will happen when the first 20 - * backlight (or display output) switching supporting driver calls: 20 + * backlight switching supporting driver calls: 21 21 * acpi_video_backlight_support(); 22 22 * 23 23 * Depending on whether ACPI graphics extensions (cmp. ACPI spec Appendix B) 24 24 * are available, video.ko should be used to handle the device. 25 25 * 26 26 * Otherwise vendor specific drivers like thinkpad_acpi, asus_acpi, 27 - * sony_acpi,... can take care about backlight brightness and display output 28 - * switching. 27 + * sony_acpi,... can take care about backlight brightness. 29 28 * 30 29 * If CONFIG_ACPI_VIDEO is neither set as "compiled in" (y) nor as a module (m) 31 30 * this file will not be compiled, acpi_video_get_capabilities() and ··· 81 82 82 83 if (!device) 83 84 return 0; 84 - 85 - /* Is this device able to support video switching ? */ 86 - if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) || 87 - ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy))) 88 - video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING; 89 85 90 86 /* Is this device able to retrieve a video ROM ? */ 91 87 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy))) ··· 155 161 * 156 162 * if (dmi_name_in_vendors("XY")) { 157 163 * acpi_video_support |= 158 - * ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR; 159 - * acpi_video_support |= 160 164 * ACPI_VIDEO_BACKLIGHT_DMI_VENDOR; 161 165 *} 162 166 */ ··· 204 212 EXPORT_SYMBOL(acpi_video_backlight_support); 205 213 206 214 /* 207 - * Returns true if video.ko can do display output switching. 208 - * This does not work well/at all with binary graphics drivers 209 - * which disable system io ranges and do it on their own. 210 - */ 211 - int acpi_video_display_switch_support(void) 212 - { 213 - if (!acpi_video_caps_checked) 214 - acpi_video_get_capabilities(NULL); 215 - 216 - if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR) 217 - return 0; 218 - else if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO) 219 - return 1; 220 - 221 - if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VENDOR) 222 - return 0; 223 - else if (acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING_DMI_VIDEO) 224 - return 1; 225 - 226 - return acpi_video_support & ACPI_VIDEO_OUTPUT_SWITCHING; 227 - } 228 - EXPORT_SYMBOL(acpi_video_display_switch_support); 229 - 230 - /* 231 - * Use acpi_display_output=vendor/video or acpi_backlight=vendor/video 232 - * To force that backlight or display output switching is processed by vendor 233 - * specific acpi drivers or video.ko driver. 215 + * Use acpi_backlight=vendor/video to force that backlight switching 216 + * is processed by vendor specific acpi drivers or video.ko driver. 234 217 */ 235 218 static int __init acpi_backlight(char *str) 236 219 { ··· 222 255 return 1; 223 256 } 224 257 __setup("acpi_backlight=", acpi_backlight); 225 - 226 - static int __init acpi_display_output(char *str) 227 - { 228 - if (str == NULL || *str == '\0') 229 - return 1; 230 - else { 231 - if (!strcmp("vendor", str)) 232 - acpi_video_support |= 233 - ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VENDOR; 234 - if (!strcmp("video", str)) 235 - acpi_video_support |= 236 - ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO; 237 - } 238 - return 1; 239 - } 240 - __setup("acpi_display_output=", acpi_display_output);
+12 -10
drivers/acpi/wakeup.c
··· 37 37 container_of(node, struct acpi_device, wakeup_list); 38 38 39 39 if (!dev->wakeup.flags.valid 40 - || !(dev->wakeup.state.enabled || dev->wakeup.prepare_count) 41 - || sleep_state > (u32) dev->wakeup.sleep_state) 40 + || sleep_state > (u32) dev->wakeup.sleep_state 41 + || !(device_may_wakeup(&dev->dev) 42 + || dev->wakeup.prepare_count)) 42 43 continue; 43 44 44 - if (dev->wakeup.state.enabled) 45 + if (device_may_wakeup(&dev->dev)) 45 46 acpi_enable_wakeup_device_power(dev, sleep_state); 46 47 47 48 /* The wake-up power should have been enabled already. */ 48 - acpi_gpe_wakeup(dev->wakeup.gpe_device, dev->wakeup.gpe_number, 49 + acpi_set_gpe_wake_mask(dev->wakeup.gpe_device, dev->wakeup.gpe_number, 49 50 ACPI_GPE_ENABLE); 50 51 } 51 52 } ··· 64 63 container_of(node, struct acpi_device, wakeup_list); 65 64 66 65 if (!dev->wakeup.flags.valid 67 - || !(dev->wakeup.state.enabled || dev->wakeup.prepare_count) 68 - || (sleep_state > (u32) dev->wakeup.sleep_state)) 66 + || sleep_state > (u32) dev->wakeup.sleep_state 67 + || !(device_may_wakeup(&dev->dev) 68 + || dev->wakeup.prepare_count)) 69 69 continue; 70 70 71 - acpi_gpe_wakeup(dev->wakeup.gpe_device, dev->wakeup.gpe_number, 71 + acpi_set_gpe_wake_mask(dev->wakeup.gpe_device, dev->wakeup.gpe_number, 72 72 ACPI_GPE_DISABLE); 73 73 74 - if (dev->wakeup.state.enabled) 74 + if (device_may_wakeup(&dev->dev)) 75 75 acpi_disable_wakeup_device_power(dev); 76 76 } 77 77 } ··· 86 84 struct acpi_device *dev = container_of(node, 87 85 struct acpi_device, 88 86 wakeup_list); 89 - if (dev->wakeup.flags.always_enabled) 90 - dev->wakeup.state.enabled = 1; 87 + if (device_can_wakeup(&dev->dev)) 88 + device_set_wakeup_enable(&dev->dev, true); 91 89 } 92 90 mutex_unlock(&acpi_device_lock); 93 91 return 0;
+27
drivers/char/ipmi/ipmi_msghandler.c
··· 970 970 } 971 971 EXPORT_SYMBOL(ipmi_create_user); 972 972 973 + int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data) 974 + { 975 + int rv = 0; 976 + ipmi_smi_t intf; 977 + struct ipmi_smi_handlers *handlers; 978 + 979 + mutex_lock(&ipmi_interfaces_mutex); 980 + list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { 981 + if (intf->intf_num == if_num) 982 + goto found; 983 + } 984 + /* Not found, return an error */ 985 + rv = -EINVAL; 986 + mutex_unlock(&ipmi_interfaces_mutex); 987 + return rv; 988 + 989 + found: 990 + handlers = intf->handlers; 991 + rv = -ENOSYS; 992 + if (handlers->get_smi_info) 993 + rv = handlers->get_smi_info(intf->send_info, data); 994 + mutex_unlock(&ipmi_interfaces_mutex); 995 + 996 + return rv; 997 + } 998 + EXPORT_SYMBOL(ipmi_get_smi_info); 999 + 973 1000 static void free_user(struct kref *ref) 974 1001 { 975 1002 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
+18 -5
drivers/char/ipmi/ipmi_si_intf.c
··· 57 57 #include <asm/irq.h> 58 58 #include <linux/interrupt.h> 59 59 #include <linux/rcupdate.h> 60 + #include <linux/ipmi.h> 60 61 #include <linux/ipmi_smi.h> 61 62 #include <asm/io.h> 62 63 #include "ipmi_si_sm.h" ··· 110 109 }; 111 110 static char *si_to_str[] = { "kcs", "smic", "bt" }; 112 111 113 - enum ipmi_addr_src { 114 - SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS, 115 - SI_PCI, SI_DEVICETREE, SI_DEFAULT 116 - }; 117 112 static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI", 118 113 "ACPI", "SMBIOS", "PCI", 119 114 "device-tree", "default" }; ··· 290 293 struct task_struct *thread; 291 294 292 295 struct list_head link; 296 + union ipmi_smi_info_union addr_info; 293 297 }; 294 298 295 299 #define smi_inc_stat(smi, stat) \ ··· 1186 1188 return 0; 1187 1189 } 1188 1190 1191 + static int get_smi_info(void *send_info, struct ipmi_smi_info *data) 1192 + { 1193 + struct smi_info *smi = send_info; 1194 + 1195 + data->addr_src = smi->addr_source; 1196 + data->dev = smi->dev; 1197 + data->addr_info = smi->addr_info; 1198 + get_device(smi->dev); 1199 + 1200 + return 0; 1201 + } 1202 + 1189 1203 static void set_maintenance_mode(void *send_info, int enable) 1190 1204 { 1191 1205 struct smi_info *smi_info = send_info; ··· 1209 1199 static struct ipmi_smi_handlers handlers = { 1210 1200 .owner = THIS_MODULE, 1211 1201 .start_processing = smi_start_processing, 1202 + .get_smi_info = get_smi_info, 1212 1203 .sender = sender, 1213 1204 .request_events = request_events, 1214 1205 .set_maintenance_mode = set_maintenance_mode, ··· 1941 1930 static int acpi_failure; 1942 1931 1943 1932 /* For GPE-type interrupts. */ 1944 - static u32 ipmi_acpi_gpe(void *context) 1933 + static u32 ipmi_acpi_gpe(acpi_handle gpe_device, 1934 + u32 gpe_number, void *context) 1945 1935 { 1946 1936 struct smi_info *smi_info = context; 1947 1937 unsigned long flags; ··· 2170 2158 printk(KERN_INFO PFX "probing via ACPI\n"); 2171 2159 2172 2160 handle = acpi_dev->handle; 2161 + info->addr_info.acpi_info.acpi_handle = handle; 2173 2162 2174 2163 /* _IFT tells us the interface type: KCS, BT, etc */ 2175 2164 status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
-1
drivers/gpu/drm/Kconfig
··· 107 107 select FB_CFB_IMAGEBLIT 108 108 # i915 depends on ACPI_VIDEO when ACPI is enabled 109 109 # but for select to work, need to select ACPI_VIDEO's dependencies, ick 110 - select VIDEO_OUTPUT_CONTROL if ACPI 111 110 select BACKLIGHT_CLASS_DEVICE if ACPI 112 111 select INPUT if ACPI 113 112 select ACPI_VIDEO if ACPI
-1
drivers/gpu/stub/Kconfig
··· 3 3 depends on PCI 4 4 # Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled 5 5 # but for select to work, need to select ACPI_VIDEO's dependencies, ick 6 - select VIDEO_OUTPUT_CONTROL if ACPI 7 6 select BACKLIGHT_CLASS_DEVICE if ACPI 8 7 select INPUT if ACPI 9 8 select ACPI_VIDEO if ACPI
+2 -2
drivers/platform/x86/fujitsu-laptop.c
··· 689 689 if (error) 690 690 goto err_free_input_dev; 691 691 692 - result = acpi_bus_get_power(fujitsu->acpi_handle, &state); 692 + result = acpi_bus_update_power(fujitsu->acpi_handle, &state); 693 693 if (result) { 694 694 printk(KERN_ERR "Error reading power state\n"); 695 695 goto err_unregister_input_dev; ··· 857 857 if (error) 858 858 goto err_free_input_dev; 859 859 860 - result = acpi_bus_get_power(fujitsu_hotkey->acpi_handle, &state); 860 + result = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state); 861 861 if (result) { 862 862 printk(KERN_ERR "Error reading power state\n"); 863 863 goto err_unregister_input_dev;
+4 -2
drivers/pnp/Makefile
··· 2 2 # Makefile for the Linux Plug-and-Play Support. 3 3 # 4 4 5 - obj-y := core.o card.o driver.o resource.o manager.o support.o interface.o quirks.o 5 + obj-y := pnp.o 6 + 7 + pnp-y := core.o card.o driver.o resource.o manager.o support.o interface.o quirks.o 6 8 7 9 obj-$(CONFIG_PNPACPI) += pnpacpi/ 8 10 obj-$(CONFIG_PNPBIOS) += pnpbios/ 9 11 obj-$(CONFIG_ISAPNP) += isapnp/ 10 12 11 13 # pnp_system_init goes after pnpacpi/pnpbios init 12 - obj-y += system.o 14 + pnp-y += system.o
+1 -6
drivers/pnp/core.c
··· 220 220 int pnp_debug; 221 221 222 222 #if defined(CONFIG_PNP_DEBUG_MESSAGES) 223 - static int __init pnp_debug_setup(char *__unused) 224 - { 225 - pnp_debug = 1; 226 - return 1; 227 - } 228 - __setup("pnp.debug", pnp_debug_setup); 223 + module_param_named(debug, pnp_debug, int, 0644); 229 224 #endif
+5 -2
drivers/pnp/driver.c
··· 189 189 if (!pnp_drv) 190 190 return 0; 191 191 192 - if (pnp_dev->protocol->resume) 193 - pnp_dev->protocol->resume(pnp_dev); 192 + if (pnp_dev->protocol->resume) { 193 + error = pnp_dev->protocol->resume(pnp_dev); 194 + if (error) 195 + return error; 196 + } 194 197 195 198 if (pnp_can_write(pnp_dev)) { 196 199 error = pnp_start_dev(pnp_dev);
+3 -3
drivers/pnp/isapnp/Makefile
··· 1 1 # 2 2 # Makefile for the kernel ISAPNP driver. 3 3 # 4 + obj-y += pnp.o 5 + pnp-y := core.o compat.o 4 6 5 - isapnp-proc-$(CONFIG_PROC_FS) = proc.o 6 - 7 - obj-y := core.o compat.o $(isapnp-proc-y) 7 + pnp-$(CONFIG_PROC_FS) += proc.o
+2 -1
drivers/pnp/pnpacpi/Makefile
··· 1 1 # 2 2 # Makefile for the kernel PNPACPI driver. 3 3 # 4 + obj-y += pnp.o 4 5 5 - obj-y := core.o rsparser.o 6 + pnp-y := core.o rsparser.o
+67 -26
drivers/pnp/pnpacpi/core.c
··· 81 81 82 82 static int pnpacpi_set_resources(struct pnp_dev *dev) 83 83 { 84 - struct acpi_device *acpi_dev = dev->data; 85 - acpi_handle handle = acpi_dev->handle; 84 + struct acpi_device *acpi_dev; 85 + acpi_handle handle; 86 86 struct acpi_buffer buffer; 87 87 int ret; 88 88 89 89 pnp_dbg(&dev->dev, "set resources\n"); 90 + 91 + handle = DEVICE_ACPI_HANDLE(&dev->dev); 92 + if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { 93 + dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); 94 + return -ENODEV; 95 + } 96 + 90 97 ret = pnpacpi_build_resource_template(dev, &buffer); 91 98 if (ret) 92 99 return ret; ··· 112 105 113 106 static int pnpacpi_disable_resources(struct pnp_dev *dev) 114 107 { 115 - struct acpi_device *acpi_dev = dev->data; 116 - acpi_handle handle = acpi_dev->handle; 108 + struct acpi_device *acpi_dev; 109 + acpi_handle handle; 117 110 int ret; 118 111 119 112 dev_dbg(&dev->dev, "disable resources\n"); 113 + 114 + handle = DEVICE_ACPI_HANDLE(&dev->dev); 115 + if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { 116 + dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); 117 + return 0; 118 + } 120 119 121 120 /* acpi_unregister_gsi(pnp_irq(dev, 0)); */ 122 121 ret = 0; ··· 137 124 #ifdef CONFIG_ACPI_SLEEP 138 125 static bool pnpacpi_can_wakeup(struct pnp_dev *dev) 139 126 { 140 - struct acpi_device *acpi_dev = dev->data; 141 - acpi_handle handle = acpi_dev->handle; 127 + struct acpi_device *acpi_dev; 128 + acpi_handle handle; 129 + 130 + handle = DEVICE_ACPI_HANDLE(&dev->dev); 131 + if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { 132 + dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); 133 + return false; 134 + } 142 135 143 136 return acpi_bus_can_wakeup(handle); 144 137 } 145 138 146 139 static int pnpacpi_suspend(struct pnp_dev *dev, pm_message_t state) 147 140 { 148 - struct acpi_device *acpi_dev = dev->data; 149 - acpi_handle handle = acpi_dev->handle; 150 - int power_state; 141 + struct acpi_device *acpi_dev; 142 + acpi_handle handle; 143 + int error = 0; 144 + 145 + handle = DEVICE_ACPI_HANDLE(&dev->dev); 146 + if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { 147 + dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); 148 + return 0; 149 + } 151 150 152 151 if (device_can_wakeup(&dev->dev)) { 153 - int rc = acpi_pm_device_sleep_wake(&dev->dev, 152 + error = acpi_pm_device_sleep_wake(&dev->dev, 154 153 device_may_wakeup(&dev->dev)); 155 - 156 - if (rc) 157 - return rc; 154 + if (error) 155 + return error; 158 156 } 159 - power_state = acpi_pm_device_sleep_state(&dev->dev, NULL); 160 - if (power_state < 0) 161 - power_state = (state.event == PM_EVENT_ON) ? 162 - ACPI_STATE_D0 : ACPI_STATE_D3; 163 157 164 - /* acpi_bus_set_power() often fails (keyboard port can't be 165 - * powered-down?), and in any case, our return value is ignored 166 - * by pnp_bus_suspend(). Hence we don't revert the wakeup 167 - * setting if the set_power fails. 168 - */ 169 - return acpi_bus_set_power(handle, power_state); 158 + if (acpi_bus_power_manageable(handle)) { 159 + int power_state = acpi_pm_device_sleep_state(&dev->dev, NULL); 160 + 161 + if (power_state < 0) 162 + power_state = (state.event == PM_EVENT_ON) ? 163 + ACPI_STATE_D0 : ACPI_STATE_D3; 164 + 165 + /* 166 + * acpi_bus_set_power() often fails (keyboard port can't be 167 + * powered-down?), and in any case, our return value is ignored 168 + * by pnp_bus_suspend(). Hence we don't revert the wakeup 169 + * setting if the set_power fails. 170 + */ 171 + error = acpi_bus_set_power(handle, power_state); 172 + } 173 + 174 + return error; 170 175 } 171 176 172 177 static int pnpacpi_resume(struct pnp_dev *dev) 173 178 { 174 - struct acpi_device *acpi_dev = dev->data; 175 - acpi_handle handle = acpi_dev->handle; 179 + struct acpi_device *acpi_dev; 180 + acpi_handle handle = DEVICE_ACPI_HANDLE(&dev->dev); 181 + int error = 0; 182 + 183 + if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev))) { 184 + dev_dbg(&dev->dev, "ACPI device not found in %s!\n", __func__); 185 + return -ENODEV; 186 + } 176 187 177 188 if (device_may_wakeup(&dev->dev)) 178 189 acpi_pm_device_sleep_wake(&dev->dev, false); 179 - return acpi_bus_set_power(handle, ACPI_STATE_D0); 190 + 191 + if (acpi_bus_power_manageable(handle)) 192 + error = acpi_bus_set_power(handle, ACPI_STATE_D0); 193 + 194 + return error; 180 195 } 181 196 #endif 182 197
+3 -2
drivers/pnp/pnpbios/Makefile
··· 1 1 # 2 2 # Makefile for the kernel PNPBIOS driver. 3 3 # 4 + obj-y := pnp.o 4 5 5 - pnpbios-proc-$(CONFIG_PNPBIOS_PROC_FS) = proc.o 6 + pnp-y := core.o bioscalls.o rsparser.o 6 7 7 - obj-y := core.o bioscalls.o rsparser.o $(pnpbios-proc-y) 8 + pnp-$(CONFIG_PNPBIOS_PROC_FS) += proc.o
+1
drivers/thermal/Kconfig
··· 4 4 5 5 menuconfig THERMAL 6 6 tristate "Generic Thermal sysfs driver" 7 + depends on NET 7 8 help 8 9 Generic Thermal Sysfs driver offers a generic mechanism for 9 10 thermal management. Usually it's made up of one or more thermal
+107 -13
drivers/thermal/thermal_sys.c
··· 32 32 #include <linux/thermal.h> 33 33 #include <linux/spinlock.h> 34 34 #include <linux/reboot.h> 35 + #include <net/netlink.h> 36 + #include <net/genetlink.h> 35 37 36 38 MODULE_AUTHOR("Zhang Rui"); 37 39 MODULE_DESCRIPTION("Generic thermal management sysfs support"); ··· 59 57 static LIST_HEAD(thermal_tz_list); 60 58 static LIST_HEAD(thermal_cdev_list); 61 59 static DEFINE_MUTEX(thermal_list_lock); 60 + 61 + static unsigned int thermal_event_seqnum; 62 + 63 + static struct genl_family thermal_event_genl_family = { 64 + .id = GENL_ID_GENERATE, 65 + .name = THERMAL_GENL_FAMILY_NAME, 66 + .version = THERMAL_GENL_VERSION, 67 + .maxattr = THERMAL_GENL_ATTR_MAX, 68 + }; 69 + 70 + static struct genl_multicast_group thermal_event_mcgrp = { 71 + .name = THERMAL_GENL_MCAST_GROUP_NAME, 72 + }; 73 + 74 + static int genetlink_init(void); 75 + static void genetlink_exit(void); 62 76 63 77 static int get_idr(struct idr *idr, struct mutex *lock, int *id) 64 78 { ··· 841 823 * @devdata: device private data. 842 824 * @ops: standard thermal cooling devices callbacks. 843 825 */ 844 - struct thermal_cooling_device *thermal_cooling_device_register(char *type, 845 - void *devdata, 846 - struct 847 - thermal_cooling_device_ops 848 - *ops) 826 + struct thermal_cooling_device *thermal_cooling_device_register( 827 + char *type, void *devdata, const struct thermal_cooling_device_ops *ops) 849 828 { 850 829 struct thermal_cooling_device *cdev; 851 830 struct thermal_zone_device *pos; ··· 1063 1048 * section 11.1.5.1 of the ACPI specification 3.0. 1064 1049 */ 1065 1050 struct thermal_zone_device *thermal_zone_device_register(char *type, 1066 - int trips, 1067 - void *devdata, struct 1068 - thermal_zone_device_ops 1069 - *ops, int tc1, int 1070 - tc2, 1071 - int passive_delay, 1072 - int polling_delay) 1051 + int trips, void *devdata, 1052 + const struct thermal_zone_device_ops *ops, 1053 + int tc1, int tc2, int passive_delay, int polling_delay) 1073 1054 { 1074 1055 struct thermal_zone_device *tz; 1075 1056 struct thermal_cooling_device *pos; ··· 1225 1214 1226 1215 EXPORT_SYMBOL(thermal_zone_device_unregister); 1227 1216 1217 + int generate_netlink_event(u32 orig, enum events event) 1218 + { 1219 + struct sk_buff *skb; 1220 + struct nlattr *attr; 1221 + struct thermal_genl_event *thermal_event; 1222 + void *msg_header; 1223 + int size; 1224 + int result; 1225 + 1226 + /* allocate memory */ 1227 + size = nla_total_size(sizeof(struct thermal_genl_event)) + \ 1228 + nla_total_size(0); 1229 + 1230 + skb = genlmsg_new(size, GFP_ATOMIC); 1231 + if (!skb) 1232 + return -ENOMEM; 1233 + 1234 + /* add the genetlink message header */ 1235 + msg_header = genlmsg_put(skb, 0, thermal_event_seqnum++, 1236 + &thermal_event_genl_family, 0, 1237 + THERMAL_GENL_CMD_EVENT); 1238 + if (!msg_header) { 1239 + nlmsg_free(skb); 1240 + return -ENOMEM; 1241 + } 1242 + 1243 + /* fill the data */ 1244 + attr = nla_reserve(skb, THERMAL_GENL_ATTR_EVENT, \ 1245 + sizeof(struct thermal_genl_event)); 1246 + 1247 + if (!attr) { 1248 + nlmsg_free(skb); 1249 + return -EINVAL; 1250 + } 1251 + 1252 + thermal_event = nla_data(attr); 1253 + if (!thermal_event) { 1254 + nlmsg_free(skb); 1255 + return -EINVAL; 1256 + } 1257 + 1258 + memset(thermal_event, 0, sizeof(struct thermal_genl_event)); 1259 + 1260 + thermal_event->orig = orig; 1261 + thermal_event->event = event; 1262 + 1263 + /* send multicast genetlink message */ 1264 + result = genlmsg_end(skb, msg_header); 1265 + if (result < 0) { 1266 + nlmsg_free(skb); 1267 + return result; 1268 + } 1269 + 1270 + result = genlmsg_multicast(skb, 0, thermal_event_mcgrp.id, GFP_ATOMIC); 1271 + if (result) 1272 + printk(KERN_INFO "failed to send netlink event:%d", result); 1273 + 1274 + return result; 1275 + } 1276 + EXPORT_SYMBOL(generate_netlink_event); 1277 + 1278 + static int genetlink_init(void) 1279 + { 1280 + int result; 1281 + 1282 + result = genl_register_family(&thermal_event_genl_family); 1283 + if (result) 1284 + return result; 1285 + 1286 + result = genl_register_mc_group(&thermal_event_genl_family, 1287 + &thermal_event_mcgrp); 1288 + if (result) 1289 + genl_unregister_family(&thermal_event_genl_family); 1290 + return result; 1291 + } 1292 + 1228 1293 static int __init thermal_init(void) 1229 1294 { 1230 1295 int result = 0; ··· 1312 1225 mutex_destroy(&thermal_idr_lock); 1313 1226 mutex_destroy(&thermal_list_lock); 1314 1227 } 1228 + result = genetlink_init(); 1315 1229 return result; 1230 + } 1231 + 1232 + static void genetlink_exit(void) 1233 + { 1234 + genl_unregister_family(&thermal_event_genl_family); 1316 1235 } 1317 1236 1318 1237 static void __exit thermal_exit(void) ··· 1328 1235 idr_destroy(&thermal_cdev_idr); 1329 1236 mutex_destroy(&thermal_idr_lock); 1330 1237 mutex_destroy(&thermal_list_lock); 1238 + genetlink_exit(); 1331 1239 } 1332 1240 1333 - subsys_initcall(thermal_init); 1241 + fs_initcall(thermal_init); 1334 1242 module_exit(thermal_exit);
+2 -10
include/acpi/acpi_bus.h
··· 148 148 u32 suprise_removal_ok:1; 149 149 u32 power_manageable:1; 150 150 u32 performance_manageable:1; 151 - u32 wake_capable:1; /* Wakeup(_PRW) supported? */ 152 - u32 force_power_state:1; 153 - u32 reserved:22; 151 + u32 reserved:24; 154 152 }; 155 153 156 154 /* File System */ ··· 240 242 struct acpi_device_wakeup_flags { 241 243 u8 valid:1; /* Can successfully enable wakeup? */ 242 244 u8 run_wake:1; /* Run-Wake GPE devices */ 243 - u8 always_enabled:1; /* Run-wake devices that are always enabled */ 244 245 u8 notifier_present:1; /* Wake-up notify handler has been installed */ 245 - }; 246 - 247 - struct acpi_device_wakeup_state { 248 - u8 enabled:1; 249 246 }; 250 247 251 248 struct acpi_device_wakeup { ··· 248 255 u64 gpe_number; 249 256 u64 sleep_state; 250 257 struct acpi_handle_list resources; 251 - struct acpi_device_wakeup_state state; 252 258 struct acpi_device_wakeup_flags flags; 253 259 int prepare_count; 254 260 int run_wake_count; ··· 320 328 acpi_status acpi_bus_get_status_handle(acpi_handle handle, 321 329 unsigned long long *sta); 322 330 int acpi_bus_get_status(struct acpi_device *device); 323 - int acpi_bus_get_power(acpi_handle handle, int *state); 324 331 int acpi_bus_set_power(acpi_handle handle, int state); 332 + int acpi_bus_update_power(acpi_handle handle, int *state_p); 325 333 bool acpi_bus_power_manageable(acpi_handle handle); 326 334 bool acpi_bus_can_wakeup(acpi_handle handle); 327 335 #ifdef CONFIG_ACPI_PROC_EVENT
+13 -7
include/acpi/acpixf.h
··· 47 47 48 48 /* Current ACPICA subsystem version in YYYYMMDD format */ 49 49 50 - #define ACPI_CA_VERSION 0x20101013 50 + #define ACPI_CA_VERSION 0x20101209 51 51 52 52 #include "actypes.h" 53 53 #include "actbl.h" ··· 229 229 acpi_install_initialization_handler(acpi_init_handler handler, u32 function); 230 230 231 231 acpi_status 232 + acpi_install_global_event_handler(ACPI_GBL_EVENT_HANDLER handler, 233 + void *context); 234 + 235 + acpi_status 232 236 acpi_install_fixed_event_handler(u32 acpi_event, 233 237 acpi_event_handler handler, void *context); 234 238 ··· 262 258 acpi_status 263 259 acpi_install_gpe_handler(acpi_handle gpe_device, 264 260 u32 gpe_number, 265 - u32 type, acpi_event_handler address, void *context); 261 + u32 type, acpi_gpe_handler address, void *context); 266 262 267 263 acpi_status 268 264 acpi_remove_gpe_handler(acpi_handle gpe_device, 269 - u32 gpe_number, acpi_event_handler address); 265 + u32 gpe_number, acpi_gpe_handler address); 270 266 271 267 #ifdef ACPI_FUTURE_USAGE 272 268 acpi_status acpi_install_exception_handler(acpi_exception_handler handler); ··· 296 292 297 293 acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number); 298 294 299 - acpi_status acpi_gpe_can_wake(acpi_handle gpe_device, u32 gpe_number); 300 - 301 295 acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number); 302 296 303 - acpi_status acpi_gpe_wakeup(acpi_handle gpe_device, u32 gpe_number, u8 action); 297 + acpi_status 298 + acpi_setup_gpe_for_wake(acpi_handle parent_device, 299 + acpi_handle gpe_device, u32 gpe_number); 300 + 301 + acpi_status acpi_set_gpe_wake_mask(acpi_handle gpe_device, u32 gpe_number, u8 action); 304 302 305 303 acpi_status 306 304 acpi_get_gpe_status(acpi_handle gpe_device, ··· 321 315 322 316 acpi_status acpi_remove_gpe_block(acpi_handle gpe_device); 323 317 324 - acpi_status acpi_update_gpes(void); 318 + acpi_status acpi_update_all_gpes(void); 325 319 326 320 /* 327 321 * Resource interfaces
+34 -18
include/acpi/actypes.h
··· 656 656 #define ACPI_GPE_MAX 0xFF 657 657 #define ACPI_NUM_GPE 256 658 658 659 - /* Actions for acpi_gpe_wakeup, acpi_hw_low_set_gpe */ 659 + /* Actions for acpi_set_gpe_wake_mask, acpi_hw_low_set_gpe */ 660 660 661 661 #define ACPI_GPE_ENABLE 0 662 662 #define ACPI_GPE_DISABLE 1 663 - #define ACPI_GPE_COND_ENABLE 2 663 + #define ACPI_GPE_CONDITIONAL_ENABLE 2 664 664 665 665 /* 666 666 * GPE info flags - Per GPE 667 - * +-------+---+-+-+ 668 - * | 7:4 |3:2|1|0| 669 - * +-------+---+-+-+ 670 - * | | | | 671 - * | | | +--- Interrupt type: edge or level triggered 672 - * | | +----- GPE can wake the system 673 - * | +-------- Type of dispatch:to method, handler, or none 674 - * +-------------- <Reserved> 667 + * +-------+-+-+---+ 668 + * | 7:4 |3|2|1:0| 669 + * +-------+-+-+---+ 670 + * | | | | 671 + * | | | +-- Type of dispatch:to method, handler, notify, or none 672 + * | | +----- Interrupt type: edge or level triggered 673 + * | +------- Is a Wake GPE 674 + * +------------ <Reserved> 675 675 */ 676 - #define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x01 677 - #define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x01 676 + #define ACPI_GPE_DISPATCH_NONE (u8) 0x00 677 + #define ACPI_GPE_DISPATCH_METHOD (u8) 0x01 678 + #define ACPI_GPE_DISPATCH_HANDLER (u8) 0x02 679 + #define ACPI_GPE_DISPATCH_NOTIFY (u8) 0x03 680 + #define ACPI_GPE_DISPATCH_MASK (u8) 0x03 681 + 682 + #define ACPI_GPE_LEVEL_TRIGGERED (u8) 0x04 678 683 #define ACPI_GPE_EDGE_TRIGGERED (u8) 0x00 684 + #define ACPI_GPE_XRUPT_TYPE_MASK (u8) 0x04 679 685 680 - #define ACPI_GPE_CAN_WAKE (u8) 0x02 681 - 682 - #define ACPI_GPE_DISPATCH_MASK (u8) 0x0C 683 - #define ACPI_GPE_DISPATCH_HANDLER (u8) 0x04 684 - #define ACPI_GPE_DISPATCH_METHOD (u8) 0x08 685 - #define ACPI_GPE_DISPATCH_NOT_USED (u8) 0x00 686 + #define ACPI_GPE_CAN_WAKE (u8) 0x08 686 687 687 688 /* 688 689 * Flags for GPE and Lock interfaces ··· 895 894 /* 896 895 * Various handlers and callback procedures 897 896 */ 897 + typedef 898 + void (*ACPI_GBL_EVENT_HANDLER) (u32 event_type, 899 + acpi_handle device, 900 + u32 event_number, void *context); 901 + 902 + #define ACPI_EVENT_TYPE_GPE 0 903 + #define ACPI_EVENT_TYPE_FIXED 1 904 + 898 905 typedef u32(*acpi_event_handler) (void *context); 906 + 907 + typedef 908 + u32 (*acpi_gpe_handler) (acpi_handle gpe_device, u32 gpe_number, void *context); 899 909 900 910 typedef 901 911 void (*acpi_notify_handler) (acpi_handle device, u32 value, void *context); ··· 962 950 963 951 #define ACPI_INTERRUPT_NOT_HANDLED 0x00 964 952 #define ACPI_INTERRUPT_HANDLED 0x01 953 + 954 + /* GPE handler return values */ 955 + 956 + #define ACPI_REENABLE_GPE 0x80 965 957 966 958 /* Length of 32-bit EISAID values when converted back to a string */ 967 959
+6
include/acpi/processor.h
··· 324 324 int acpi_processor_get_throttling_info(struct acpi_processor *pr); 325 325 extern int acpi_processor_set_throttling(struct acpi_processor *pr, 326 326 int state, bool force); 327 + /* 328 + * Reevaluate whether the T-state is invalid after one cpu is 329 + * onlined/offlined. In such case the flags.throttling will be updated. 330 + */ 331 + extern void acpi_processor_reevaluate_tstate(struct acpi_processor *pr, 332 + unsigned long action); 327 333 extern const struct file_operations acpi_processor_throttling_fops; 328 334 extern void acpi_processor_throttling_init(void); 329 335 /* in processor_idle.c */
+10
include/linux/acpi.h
··· 352 352 return -1; 353 353 } 354 354 #endif /* !CONFIG_ACPI */ 355 + 356 + #ifdef CONFIG_ACPI_SLEEP 357 + int suspend_nvs_register(unsigned long start, unsigned long size); 358 + #else 359 + static inline int suspend_nvs_register(unsigned long a, unsigned long b) 360 + { 361 + return 0; 362 + } 363 + #endif 364 + 355 365 #endif /*_LINUX_ACPI_H*/
+82 -4
include/linux/cper.h
··· 39 39 * Severity difinition for error_severity in struct cper_record_header 40 40 * and section_severity in struct cper_section_descriptor 41 41 */ 42 - #define CPER_SEV_RECOVERABLE 0x0 43 - #define CPER_SEV_FATAL 0x1 44 - #define CPER_SEV_CORRECTED 0x2 45 - #define CPER_SEV_INFORMATIONAL 0x3 42 + enum { 43 + CPER_SEV_RECOVERABLE, 44 + CPER_SEV_FATAL, 45 + CPER_SEV_CORRECTED, 46 + CPER_SEV_INFORMATIONAL, 47 + }; 46 48 47 49 /* 48 50 * Validation bits difinition for validation_bits in struct ··· 203 201 UUID_LE(0x036F84E1, 0x7F37, 0x428c, 0xA7, 0x9E, 0x57, 0x5F, \ 204 202 0xDF, 0xAA, 0x84, 0xEC) 205 203 204 + #define CPER_PROC_VALID_TYPE 0x0001 205 + #define CPER_PROC_VALID_ISA 0x0002 206 + #define CPER_PROC_VALID_ERROR_TYPE 0x0004 207 + #define CPER_PROC_VALID_OPERATION 0x0008 208 + #define CPER_PROC_VALID_FLAGS 0x0010 209 + #define CPER_PROC_VALID_LEVEL 0x0020 210 + #define CPER_PROC_VALID_VERSION 0x0040 211 + #define CPER_PROC_VALID_BRAND_INFO 0x0080 212 + #define CPER_PROC_VALID_ID 0x0100 213 + #define CPER_PROC_VALID_TARGET_ADDRESS 0x0200 214 + #define CPER_PROC_VALID_REQUESTOR_ID 0x0400 215 + #define CPER_PROC_VALID_RESPONDER_ID 0x0800 216 + #define CPER_PROC_VALID_IP 0x1000 217 + 218 + #define CPER_MEM_VALID_ERROR_STATUS 0x0001 219 + #define CPER_MEM_VALID_PHYSICAL_ADDRESS 0x0002 220 + #define CPER_MEM_VALID_PHYSICAL_ADDRESS_MASK 0x0004 221 + #define CPER_MEM_VALID_NODE 0x0008 222 + #define CPER_MEM_VALID_CARD 0x0010 223 + #define CPER_MEM_VALID_MODULE 0x0020 224 + #define CPER_MEM_VALID_BANK 0x0040 225 + #define CPER_MEM_VALID_DEVICE 0x0080 226 + #define CPER_MEM_VALID_ROW 0x0100 227 + #define CPER_MEM_VALID_COLUMN 0x0200 228 + #define CPER_MEM_VALID_BIT_POSITION 0x0400 229 + #define CPER_MEM_VALID_REQUESTOR_ID 0x0800 230 + #define CPER_MEM_VALID_RESPONDER_ID 0x1000 231 + #define CPER_MEM_VALID_TARGET_ID 0x2000 232 + #define CPER_MEM_VALID_ERROR_TYPE 0x4000 233 + 234 + #define CPER_PCIE_VALID_PORT_TYPE 0x0001 235 + #define CPER_PCIE_VALID_VERSION 0x0002 236 + #define CPER_PCIE_VALID_COMMAND_STATUS 0x0004 237 + #define CPER_PCIE_VALID_DEVICE_ID 0x0008 238 + #define CPER_PCIE_VALID_SERIAL_NUMBER 0x0010 239 + #define CPER_PCIE_VALID_BRIDGE_CONTROL_STATUS 0x0020 240 + #define CPER_PCIE_VALID_CAPABILITY 0x0040 241 + #define CPER_PCIE_VALID_AER_INFO 0x0080 242 + 243 + #define CPER_PCIE_SLOT_SHIFT 3 244 + 206 245 /* 207 246 * All tables and structs must be byte-packed to match CPER 208 247 * specification, since the tables are provided by the system BIOS ··· 347 304 __u64 responder_id; 348 305 __u64 target_id; 349 306 __u8 error_type; 307 + }; 308 + 309 + struct cper_sec_pcie { 310 + __u64 validation_bits; 311 + __u32 port_type; 312 + struct { 313 + __u8 minor; 314 + __u8 major; 315 + __u8 reserved[2]; 316 + } version; 317 + __u16 command; 318 + __u16 status; 319 + __u32 reserved; 320 + struct { 321 + __u16 vendor_id; 322 + __u16 device_id; 323 + __u8 class_code[3]; 324 + __u8 function; 325 + __u8 device; 326 + __u16 segment; 327 + __u8 bus; 328 + __u8 secondary_bus; 329 + __u16 slot; 330 + __u8 reserved; 331 + } device_id; 332 + struct { 333 + __u32 lower; 334 + __u32 upper; 335 + } serial_number; 336 + struct { 337 + __u16 secondary_status; 338 + __u16 control; 339 + } bridge; 340 + __u8 capability[60]; 341 + __u8 aer_info[96]; 350 342 }; 351 343 352 344 /* Reset to default packing */
+38
include/linux/ipmi.h
··· 454 454 /* Validate that the given IPMI address is valid. */ 455 455 int ipmi_validate_addr(struct ipmi_addr *addr, int len); 456 456 457 + /* 458 + * How did the IPMI driver find out about the device? 459 + */ 460 + enum ipmi_addr_src { 461 + SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS, 462 + SI_PCI, SI_DEVICETREE, SI_DEFAULT 463 + }; 464 + 465 + union ipmi_smi_info_union { 466 + /* 467 + * the acpi_info element is defined for the SI_ACPI 468 + * address type 469 + */ 470 + struct { 471 + void *acpi_handle; 472 + } acpi_info; 473 + }; 474 + 475 + struct ipmi_smi_info { 476 + enum ipmi_addr_src addr_src; 477 + 478 + /* 479 + * Base device for the interface. Don't forget to put this when 480 + * you are done. 481 + */ 482 + struct device *dev; 483 + 484 + /* 485 + * The addr_info provides more detailed info for some IPMI 486 + * devices, depending on the addr_src. Currently only SI_ACPI 487 + * info is provided. 488 + */ 489 + union ipmi_smi_info_union addr_info; 490 + }; 491 + 492 + /* This is to get the private info of ipmi_smi_t */ 493 + extern int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data); 494 + 457 495 #endif /* __KERNEL__ */ 458 496 459 497
+8
include/linux/ipmi_smi.h
··· 39 39 #include <linux/module.h> 40 40 #include <linux/device.h> 41 41 #include <linux/platform_device.h> 42 + #include <linux/ipmi.h> 42 43 43 44 /* This files describes the interface for IPMI system management interface 44 45 drivers to bind into the IPMI message handler. */ ··· 86 85 this call. */ 87 86 int (*start_processing)(void *send_info, 88 87 ipmi_smi_t new_intf); 88 + 89 + /* 90 + * Get the detailed private info of the low level interface and store 91 + * it into the structure of ipmi_smi_data. For example: the 92 + * ACPI device handle will be returned for the pnp_acpi IPMI device. 93 + */ 94 + int (*get_smi_info)(void *send_info, struct ipmi_smi_info *data); 89 95 90 96 /* Called to enqueue an SMI message to be sent. This 91 97 operation is not allowed to fail. If an error occurs, it
-17
include/linux/suspend.h
··· 258 258 static inline bool system_entering_hibernation(void) { return false; } 259 259 #endif /* CONFIG_HIBERNATION */ 260 260 261 - #ifdef CONFIG_SUSPEND_NVS 262 - extern int suspend_nvs_register(unsigned long start, unsigned long size); 263 - extern int suspend_nvs_alloc(void); 264 - extern void suspend_nvs_free(void); 265 - extern void suspend_nvs_save(void); 266 - extern void suspend_nvs_restore(void); 267 - #else /* CONFIG_SUSPEND_NVS */ 268 - static inline int suspend_nvs_register(unsigned long a, unsigned long b) 269 - { 270 - return 0; 271 - } 272 - static inline int suspend_nvs_alloc(void) { return 0; } 273 - static inline void suspend_nvs_free(void) {} 274 - static inline void suspend_nvs_save(void) {} 275 - static inline void suspend_nvs_restore(void) {} 276 - #endif /* CONFIG_SUSPEND_NVS */ 277 - 278 261 #ifdef CONFIG_PM_SLEEP 279 262 void save_processor_state(void); 280 263 void restore_processor_state(void);
+37 -10
include/linux/thermal.h
··· 77 77 char type[THERMAL_NAME_LENGTH]; 78 78 struct device device; 79 79 void *devdata; 80 - struct thermal_cooling_device_ops *ops; 80 + const struct thermal_cooling_device_ops *ops; 81 81 struct list_head node; 82 82 }; 83 83 ··· 114 114 int last_temperature; 115 115 bool passive; 116 116 unsigned int forced_passive; 117 - struct thermal_zone_device_ops *ops; 117 + const struct thermal_zone_device_ops *ops; 118 118 struct list_head cooling_devices; 119 119 struct idr idr; 120 120 struct mutex lock; /* protect cooling devices list */ ··· 127 127 struct thermal_hwmon_attr temp_crit; /* hwmon sys attr */ 128 128 #endif 129 129 }; 130 + /* Adding event notification support elements */ 131 + #define THERMAL_GENL_FAMILY_NAME "thermal_event" 132 + #define THERMAL_GENL_VERSION 0x01 133 + #define THERMAL_GENL_MCAST_GROUP_NAME "thermal_mc_group" 134 + 135 + enum events { 136 + THERMAL_AUX0, 137 + THERMAL_AUX1, 138 + THERMAL_CRITICAL, 139 + THERMAL_DEV_FAULT, 140 + }; 141 + 142 + struct thermal_genl_event { 143 + u32 orig; 144 + enum events event; 145 + }; 146 + /* attributes of thermal_genl_family */ 147 + enum { 148 + THERMAL_GENL_ATTR_UNSPEC, 149 + THERMAL_GENL_ATTR_EVENT, 150 + __THERMAL_GENL_ATTR_MAX, 151 + }; 152 + #define THERMAL_GENL_ATTR_MAX (__THERMAL_GENL_ATTR_MAX - 1) 153 + 154 + /* commands supported by the thermal_genl_family */ 155 + enum { 156 + THERMAL_GENL_CMD_UNSPEC, 157 + THERMAL_GENL_CMD_EVENT, 158 + __THERMAL_GENL_CMD_MAX, 159 + }; 160 + #define THERMAL_GENL_CMD_MAX (__THERMAL_GENL_CMD_MAX - 1) 130 161 131 162 struct thermal_zone_device *thermal_zone_device_register(char *, int, void *, 132 - struct 133 - thermal_zone_device_ops 134 - *, int tc1, int tc2, 135 - int passive_freq, 136 - int polling_freq); 163 + const struct thermal_zone_device_ops *, int tc1, int tc2, 164 + int passive_freq, int polling_freq); 137 165 void thermal_zone_device_unregister(struct thermal_zone_device *); 138 166 139 167 int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int, ··· 170 142 struct thermal_cooling_device *); 171 143 void thermal_zone_device_update(struct thermal_zone_device *); 172 144 struct thermal_cooling_device *thermal_cooling_device_register(char *, void *, 173 - struct 174 - thermal_cooling_device_ops 175 - *); 145 + const struct thermal_cooling_device_ops *); 176 146 void thermal_cooling_device_unregister(struct thermal_cooling_device *); 147 + extern int generate_netlink_event(u32 orig, enum events event); 177 148 178 149 #endif /* __THERMAL_H__ */
+1
kernel/panic.c
··· 34 34 static DEFINE_SPINLOCK(pause_on_oops_lock); 35 35 36 36 int panic_timeout; 37 + EXPORT_SYMBOL_GPL(panic_timeout); 37 38 38 39 ATOMIC_NOTIFIER_HEAD(panic_notifier_list); 39 40
-5
kernel/power/Kconfig
··· 100 100 depends on PM_ADVANCED_DEBUG 101 101 default n 102 102 103 - config SUSPEND_NVS 104 - bool 105 - 106 103 config SUSPEND 107 104 bool "Suspend to RAM and standby" 108 105 depends on PM && ARCH_SUSPEND_POSSIBLE 109 - select SUSPEND_NVS if HAS_IOMEM 110 106 default y 111 107 ---help--- 112 108 Allow the system to enter sleep states in which main memory is ··· 136 140 depends on PM && SWAP && ARCH_HIBERNATION_POSSIBLE 137 141 select LZO_COMPRESS 138 142 select LZO_DECOMPRESS 139 - select SUSPEND_NVS if HAS_IOMEM 140 143 ---help--- 141 144 Enable the suspend to disk (STD) functionality, which is usually 142 145 called "hibernation" in user interfaces. STD checkpoints the
-1
kernel/power/Makefile
··· 7 7 obj-$(CONFIG_PM_TEST_SUSPEND) += suspend_test.o 8 8 obj-$(CONFIG_HIBERNATION) += hibernate.o snapshot.o swap.o user.o \ 9 9 block_io.o 10 - obj-$(CONFIG_SUSPEND_NVS) += nvs.o 11 10 12 11 obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
+14 -6
kernel/power/nvs.c drivers/acpi/nvs.c
··· 1 1 /* 2 - * linux/kernel/power/hibernate_nvs.c - Routines for handling NVS memory 2 + * nvs.c - Routines for saving and restoring ACPI NVS memory region 3 3 * 4 - * Copyright (C) 2008,2009 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 4 + * Copyright (C) 2008-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. 5 5 * 6 6 * This file is released under the GPLv2. 7 7 */ ··· 11 11 #include <linux/list.h> 12 12 #include <linux/mm.h> 13 13 #include <linux/slab.h> 14 - #include <linux/suspend.h> 14 + #include <linux/acpi.h> 15 + #include <acpi/acpiosxf.h> 15 16 16 17 /* 17 18 * Platforms, like ACPI, may want us to save some memory used by them during ··· 80 79 free_page((unsigned long)entry->data); 81 80 entry->data = NULL; 82 81 if (entry->kaddr) { 83 - iounmap(entry->kaddr); 82 + acpi_os_unmap_memory(entry->kaddr, entry->size); 84 83 entry->kaddr = NULL; 85 84 } 86 85 } ··· 106 105 /** 107 106 * suspend_nvs_save - save NVS memory regions 108 107 */ 109 - void suspend_nvs_save(void) 108 + int suspend_nvs_save(void) 110 109 { 111 110 struct nvs_page *entry; 112 111 ··· 114 113 115 114 list_for_each_entry(entry, &nvs_list, node) 116 115 if (entry->data) { 117 - entry->kaddr = ioremap(entry->phys_start, entry->size); 116 + entry->kaddr = acpi_os_map_memory(entry->phys_start, 117 + entry->size); 118 + if (!entry->kaddr) { 119 + suspend_nvs_free(); 120 + return -ENOMEM; 121 + } 118 122 memcpy(entry->data, entry->kaddr, entry->size); 119 123 } 124 + 125 + return 0; 120 126 } 121 127 122 128 /**
+2
lib/ioremap.c
··· 9 9 #include <linux/mm.h> 10 10 #include <linux/sched.h> 11 11 #include <linux/io.h> 12 + #include <linux/module.h> 12 13 #include <asm/cacheflush.h> 13 14 #include <asm/pgtable.h> 14 15 ··· 91 90 92 91 return err; 93 92 } 93 + EXPORT_SYMBOL_GPL(ioremap_page_range);
+1
mm/vmalloc.c
··· 1175 1175 { 1176 1176 vunmap_page_range(addr, addr + size); 1177 1177 } 1178 + EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush); 1178 1179 1179 1180 /** 1180 1181 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB