Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux

Quoth Len:
"This fixes a merge-window regression due to a conflict
between error injection and preparation to remove atomicio.c
Here we fix that regression and complete the removal
of atomicio.c.

This also re-orders some idle initialization code to
complete the merge window series that allows cpuidle
to cope with bringing processors on-line after boot."

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux:
Use acpi_os_map_memory() instead of ioremap() in einj driver
ACPI, APEI, EINJ, cleanup 0 vs NULL confusion
ACPI, APEI, EINJ Allow empty Trigger Error Action Table
thermal: Rename generate_netlink_event
ACPI / PM: Add Sony Vaio VPCCW29FX to nonvs blacklist.
ACPI: Remove ./drivers/acpi/atomicio.[ch]
ACPI, APEI: Add RAM mapping support to ACPI
ACPI, APEI: Add 64-bit read/write support for APEI on i386
ACPI processor hotplug: Delay acpi_processor_start() call for hotplugged cores
ACPI processor hotplug: Split up acpi_processor_add

+334 -560
+1 -1
Documentation/thermal/sysfs-api.txt
··· 284 284 The framework includes a simple notification mechanism, in the form of a 285 285 netlink event. Netlink socket initialization is done during the _init_ 286 286 of the framework. Drivers which intend to use the notification mechanism 287 - just need to call generate_netlink_event() with two arguments viz 287 + just need to call thermal_generate_netlink_event() with two arguments viz 288 288 (originator, event). Typically the originator will be an integer assigned 289 289 to a thermal_zone_device when it registers itself with the framework. The 290 290 event will be one of:{THERMAL_AUX0, THERMAL_AUX1, THERMAL_CRITICAL,
-1
drivers/acpi/Makefile
··· 19 19 20 20 # All the builtin files are in the "acpi." module_param namespace. 21 21 acpi-y += osl.o utils.o reboot.o 22 - acpi-y += atomicio.o 23 22 acpi-y += nvs.o 24 23 25 24 # sleep related files
+4 -31
drivers/acpi/apei/apei-base.c
··· 596 596 { 597 597 int rc; 598 598 u64 address; 599 - u32 tmp, width = reg->bit_width; 600 599 acpi_status status; 601 600 602 601 rc = apei_check_gar(reg, &address); 603 602 if (rc) 604 603 return rc; 605 604 606 - if (width == 64) 607 - width = 32; /* Break into two 32-bit transfers */ 608 - 609 605 *val = 0; 610 606 switch(reg->space_id) { 611 607 case ACPI_ADR_SPACE_SYSTEM_MEMORY: 612 - status = acpi_os_read_memory((acpi_physical_address) 613 - address, &tmp, width); 608 + status = acpi_os_read_memory64((acpi_physical_address) 609 + address, val, reg->bit_width); 614 610 if (ACPI_FAILURE(status)) 615 611 return -EIO; 616 - *val = tmp; 617 - 618 - if (reg->bit_width == 64) { 619 - /* Read the top 32 bits */ 620 - status = acpi_os_read_memory((acpi_physical_address) 621 - (address + 4), &tmp, 32); 622 - if (ACPI_FAILURE(status)) 623 - return -EIO; 624 - *val |= ((u64)tmp << 32); 625 - } 626 612 break; 627 613 case ACPI_ADR_SPACE_SYSTEM_IO: 628 614 status = acpi_os_read_port(address, (u32 *)val, reg->bit_width); ··· 628 642 { 629 643 int rc; 630 644 u64 address; 631 - u32 width = reg->bit_width; 632 645 acpi_status status; 633 646 634 647 rc = apei_check_gar(reg, &address); 635 648 if (rc) 636 649 return rc; 637 650 638 - if (width == 64) 639 - width = 32; /* Break into two 32-bit transfers */ 640 - 641 651 switch (reg->space_id) { 642 652 case ACPI_ADR_SPACE_SYSTEM_MEMORY: 643 - status = acpi_os_write_memory((acpi_physical_address) 644 - address, ACPI_LODWORD(val), 645 - width); 653 + status = acpi_os_write_memory64((acpi_physical_address) 654 + address, val, reg->bit_width); 646 655 if (ACPI_FAILURE(status)) 647 656 return -EIO; 648 - 649 - if (reg->bit_width == 64) { 650 - status = acpi_os_write_memory((acpi_physical_address) 651 - (address + 4), 652 - ACPI_HIDWORD(val), 32); 653 - if (ACPI_FAILURE(status)) 654 - return -EIO; 655 - } 656 657 break; 657 658 case ACPI_ADR_SPACE_SYSTEM_IO: 658 659 status = acpi_os_write_port(address, val, reg->bit_width);
+47 -48
drivers/acpi/apei/einj.c
··· 141 141 142 142 static void *einj_param; 143 143 144 - #ifndef readq 145 - static inline __u64 readq(volatile void __iomem *addr) 146 - { 147 - return ((__u64)readl(addr+4) << 32) + readl(addr); 148 - } 149 - #endif 150 - 151 - #ifndef writeq 152 - static inline void writeq(__u64 val, volatile void __iomem *addr) 153 - { 154 - writel(val, addr); 155 - writel(val >> 32, addr+4); 156 - } 157 - #endif 158 - 159 144 static void einj_exec_ctx_init(struct apei_exec_context *ctx) 160 145 { 161 146 apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type), ··· 189 204 static void check_vendor_extension(u64 paddr, 190 205 struct set_error_type_with_address *v5param) 191 206 { 192 - int offset = readl(&v5param->vendor_extension); 207 + int offset = v5param->vendor_extension; 193 208 struct vendor_error_type_extension *v; 194 209 u32 sbdf; 195 210 196 211 if (!offset) 197 212 return; 198 - v = ioremap(paddr + offset, sizeof(*v)); 213 + v = acpi_os_map_memory(paddr + offset, sizeof(*v)); 199 214 if (!v) 200 215 return; 201 - sbdf = readl(&v->pcie_sbdf); 216 + sbdf = v->pcie_sbdf; 202 217 sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n", 203 218 sbdf >> 24, (sbdf >> 16) & 0xff, 204 219 (sbdf >> 11) & 0x1f, (sbdf >> 8) & 0x7, 205 - readw(&v->vendor_id), readw(&v->device_id), 206 - readb(&v->rev_id)); 207 - iounmap(v); 220 + v->vendor_id, v->device_id, v->rev_id); 221 + acpi_os_unmap_memory(v, sizeof(*v)); 208 222 } 209 223 210 224 static void *einj_get_parameter_address(void) ··· 231 247 if (paddrv5) { 232 248 struct set_error_type_with_address *v5param; 233 249 234 - v5param = ioremap(paddrv5, sizeof(*v5param)); 250 + v5param = acpi_os_map_memory(paddrv5, sizeof(*v5param)); 235 251 if (v5param) { 236 252 acpi5 = 1; 237 253 check_vendor_extension(paddrv5, v5param); ··· 241 257 if (paddrv4) { 242 258 struct einj_parameter *v4param; 243 259 244 - v4param = ioremap(paddrv4, sizeof(*v4param)); 260 + v4param = acpi_os_map_memory(paddrv4, sizeof(*v4param)); 245 261 if (!v4param) 246 - return 0; 247 - if (readq(&v4param->reserved1) || readq(&v4param->reserved2)) { 248 - iounmap(v4param); 249 - return 0; 262 + return NULL; 263 + if (v4param->reserved1 || v4param->reserved2) { 264 + acpi_os_unmap_memory(v4param, sizeof(*v4param)); 265 + return NULL; 250 266 } 251 267 return v4param; 252 268 } 253 269 254 - return 0; 270 + return NULL; 255 271 } 256 272 257 273 /* do sanity check to trigger table */ ··· 260 276 if (trigger_tab->header_size != sizeof(struct acpi_einj_trigger)) 261 277 return -EINVAL; 262 278 if (trigger_tab->table_size > PAGE_SIZE || 263 - trigger_tab->table_size <= trigger_tab->header_size) 279 + trigger_tab->table_size < trigger_tab->header_size) 264 280 return -EINVAL; 265 281 if (trigger_tab->entry_count != 266 282 (trigger_tab->table_size - trigger_tab->header_size) / ··· 324 340 "The trigger error action table is invalid\n"); 325 341 goto out_rel_header; 326 342 } 343 + 344 + /* No action structures in the TRIGGER_ERROR table, nothing to do */ 345 + if (!trigger_tab->entry_count) 346 + goto out_rel_header; 347 + 327 348 rc = -EIO; 328 349 table_size = trigger_tab->table_size; 329 350 r = request_mem_region(trigger_paddr + sizeof(*trigger_tab), ··· 424 435 if (acpi5) { 425 436 struct set_error_type_with_address *v5param = einj_param; 426 437 427 - writel(type, &v5param->type); 438 + v5param->type = type; 428 439 if (type & 0x80000000) { 429 440 switch (vendor_flags) { 430 441 case SETWA_FLAGS_APICID: 431 - writel(param1, &v5param->apicid); 442 + v5param->apicid = param1; 432 443 break; 433 444 case SETWA_FLAGS_MEM: 434 - writeq(param1, &v5param->memory_address); 435 - writeq(param2, &v5param->memory_address_range); 445 + v5param->memory_address = param1; 446 + v5param->memory_address_range = param2; 436 447 break; 437 448 case SETWA_FLAGS_PCIE_SBDF: 438 - writel(param1, &v5param->pcie_sbdf); 449 + v5param->pcie_sbdf = param1; 439 450 break; 440 451 } 441 - writel(vendor_flags, &v5param->flags); 452 + v5param->flags = vendor_flags; 442 453 } else { 443 454 switch (type) { 444 455 case ACPI_EINJ_PROCESSOR_CORRECTABLE: 445 456 case ACPI_EINJ_PROCESSOR_UNCORRECTABLE: 446 457 case ACPI_EINJ_PROCESSOR_FATAL: 447 - writel(param1, &v5param->apicid); 448 - writel(SETWA_FLAGS_APICID, &v5param->flags); 458 + v5param->apicid = param1; 459 + v5param->flags = SETWA_FLAGS_APICID; 449 460 break; 450 461 case ACPI_EINJ_MEMORY_CORRECTABLE: 451 462 case ACPI_EINJ_MEMORY_UNCORRECTABLE: 452 463 case ACPI_EINJ_MEMORY_FATAL: 453 - writeq(param1, &v5param->memory_address); 454 - writeq(param2, &v5param->memory_address_range); 455 - writel(SETWA_FLAGS_MEM, &v5param->flags); 464 + v5param->memory_address = param1; 465 + v5param->memory_address_range = param2; 466 + v5param->flags = SETWA_FLAGS_MEM; 456 467 break; 457 468 case ACPI_EINJ_PCIX_CORRECTABLE: 458 469 case ACPI_EINJ_PCIX_UNCORRECTABLE: 459 470 case ACPI_EINJ_PCIX_FATAL: 460 - writel(param1, &v5param->pcie_sbdf); 461 - writel(SETWA_FLAGS_PCIE_SBDF, &v5param->flags); 471 + v5param->pcie_sbdf = param1; 472 + v5param->flags = SETWA_FLAGS_PCIE_SBDF; 462 473 break; 463 474 } 464 475 } ··· 468 479 return rc; 469 480 if (einj_param) { 470 481 struct einj_parameter *v4param = einj_param; 471 - writeq(param1, &v4param->param1); 472 - writeq(param2, &v4param->param2); 482 + v4param->param1 = param1; 483 + v4param->param2 = param2; 473 484 } 474 485 } 475 486 rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION); ··· 720 731 return 0; 721 732 722 733 err_unmap: 723 - if (einj_param) 724 - iounmap(einj_param); 734 + if (einj_param) { 735 + acpi_size size = (acpi5) ? 736 + sizeof(struct set_error_type_with_address) : 737 + sizeof(struct einj_parameter); 738 + 739 + acpi_os_unmap_memory(einj_param, size); 740 + } 725 741 apei_exec_post_unmap_gars(&ctx); 726 742 err_release: 727 743 apei_resources_release(&einj_resources); ··· 742 748 { 743 749 struct apei_exec_context ctx; 744 750 745 - if (einj_param) 746 - iounmap(einj_param); 751 + if (einj_param) { 752 + acpi_size size = (acpi5) ? 753 + sizeof(struct set_error_type_with_address) : 754 + sizeof(struct einj_parameter); 755 + 756 + acpi_os_unmap_memory(einj_param, size); 757 + } 747 758 einj_exec_ctx_init(&ctx); 748 759 apei_exec_post_unmap_gars(&ctx); 749 760 apei_resources_release(&einj_resources);
-422
drivers/acpi/atomicio.c
··· 1 - /* 2 - * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then 3 - * accessing in atomic context. 4 - * 5 - * This is used for NMI handler to access IO memory area, because 6 - * ioremap/iounmap can not be used in NMI handler. The IO memory area 7 - * is pre-mapped in process context and accessed in NMI handler. 8 - * 9 - * Copyright (C) 2009-2010, Intel Corp. 10 - * Author: Huang Ying <ying.huang@intel.com> 11 - * 12 - * This program is free software; you can redistribute it and/or 13 - * modify it under the terms of the GNU General Public License version 14 - * 2 as published by the Free Software Foundation. 15 - * 16 - * This program is distributed in the hope that it will be useful, 17 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 - * GNU General Public License for more details. 20 - * 21 - * You should have received a copy of the GNU General Public License 22 - * along with this program; if not, write to the Free Software 23 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 24 - */ 25 - 26 - #include <linux/kernel.h> 27 - #include <linux/export.h> 28 - #include <linux/init.h> 29 - #include <linux/acpi.h> 30 - #include <linux/io.h> 31 - #include <linux/kref.h> 32 - #include <linux/rculist.h> 33 - #include <linux/interrupt.h> 34 - #include <linux/slab.h> 35 - #include <linux/mm.h> 36 - #include <linux/highmem.h> 37 - #include <acpi/atomicio.h> 38 - 39 - #define ACPI_PFX "ACPI: " 40 - 41 - static LIST_HEAD(acpi_iomaps); 42 - /* 43 - * Used for mutual exclusion between writers of acpi_iomaps list, for 44 - * synchronization between readers and writer, RCU is used. 45 - */ 46 - static DEFINE_SPINLOCK(acpi_iomaps_lock); 47 - 48 - struct acpi_iomap { 49 - struct list_head list; 50 - void __iomem *vaddr; 51 - unsigned long size; 52 - phys_addr_t paddr; 53 - struct kref ref; 54 - }; 55 - 56 - /* acpi_iomaps_lock or RCU read lock must be held before calling */ 57 - static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr, 58 - unsigned long size) 59 - { 60 - struct acpi_iomap *map; 61 - 62 - list_for_each_entry_rcu(map, &acpi_iomaps, list) { 63 - if (map->paddr + map->size >= paddr + size && 64 - map->paddr <= paddr) 65 - return map; 66 - } 67 - return NULL; 68 - } 69 - 70 - /* 71 - * Atomic "ioremap" used by NMI handler, if the specified IO memory 72 - * area is not pre-mapped, NULL will be returned. 73 - * 74 - * acpi_iomaps_lock or RCU read lock must be held before calling 75 - */ 76 - static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr, 77 - unsigned long size) 78 - { 79 - struct acpi_iomap *map; 80 - 81 - map = __acpi_find_iomap(paddr, size/8); 82 - if (map) 83 - return map->vaddr + (paddr - map->paddr); 84 - else 85 - return NULL; 86 - } 87 - 88 - /* acpi_iomaps_lock must be held before calling */ 89 - static void __iomem *__acpi_try_ioremap(phys_addr_t paddr, 90 - unsigned long size) 91 - { 92 - struct acpi_iomap *map; 93 - 94 - map = __acpi_find_iomap(paddr, size); 95 - if (map) { 96 - kref_get(&map->ref); 97 - return map->vaddr + (paddr - map->paddr); 98 - } else 99 - return NULL; 100 - } 101 - 102 - #ifndef CONFIG_IA64 103 - #define should_use_kmap(pfn) page_is_ram(pfn) 104 - #else 105 - /* ioremap will take care of cache attributes */ 106 - #define should_use_kmap(pfn) 0 107 - #endif 108 - 109 - static void __iomem *acpi_map(phys_addr_t pg_off, unsigned long pg_sz) 110 - { 111 - unsigned long pfn; 112 - 113 - pfn = pg_off >> PAGE_SHIFT; 114 - if (should_use_kmap(pfn)) { 115 - if (pg_sz > PAGE_SIZE) 116 - return NULL; 117 - return (void __iomem __force *)kmap(pfn_to_page(pfn)); 118 - } else 119 - return ioremap(pg_off, pg_sz); 120 - } 121 - 122 - static void acpi_unmap(phys_addr_t pg_off, void __iomem *vaddr) 123 - { 124 - unsigned long pfn; 125 - 126 - pfn = pg_off >> PAGE_SHIFT; 127 - if (page_is_ram(pfn)) 128 - kunmap(pfn_to_page(pfn)); 129 - else 130 - iounmap(vaddr); 131 - } 132 - 133 - /* 134 - * Used to pre-map the specified IO memory area. First try to find 135 - * whether the area is already pre-mapped, if it is, increase the 136 - * reference count (in __acpi_try_ioremap) and return; otherwise, do 137 - * the real ioremap, and add the mapping into acpi_iomaps list. 138 - */ 139 - static void __iomem *acpi_pre_map(phys_addr_t paddr, 140 - unsigned long size) 141 - { 142 - void __iomem *vaddr; 143 - struct acpi_iomap *map; 144 - unsigned long pg_sz, flags; 145 - phys_addr_t pg_off; 146 - 147 - spin_lock_irqsave(&acpi_iomaps_lock, flags); 148 - vaddr = __acpi_try_ioremap(paddr, size); 149 - spin_unlock_irqrestore(&acpi_iomaps_lock, flags); 150 - if (vaddr) 151 - return vaddr; 152 - 153 - pg_off = paddr & PAGE_MASK; 154 - pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off; 155 - vaddr = acpi_map(pg_off, pg_sz); 156 - if (!vaddr) 157 - return NULL; 158 - map = kmalloc(sizeof(*map), GFP_KERNEL); 159 - if (!map) 160 - goto err_unmap; 161 - INIT_LIST_HEAD(&map->list); 162 - map->paddr = pg_off; 163 - map->size = pg_sz; 164 - map->vaddr = vaddr; 165 - kref_init(&map->ref); 166 - 167 - spin_lock_irqsave(&acpi_iomaps_lock, flags); 168 - vaddr = __acpi_try_ioremap(paddr, size); 169 - if (vaddr) { 170 - spin_unlock_irqrestore(&acpi_iomaps_lock, flags); 171 - acpi_unmap(pg_off, map->vaddr); 172 - kfree(map); 173 - return vaddr; 174 - } 175 - list_add_tail_rcu(&map->list, &acpi_iomaps); 176 - spin_unlock_irqrestore(&acpi_iomaps_lock, flags); 177 - 178 - return map->vaddr + (paddr - map->paddr); 179 - err_unmap: 180 - acpi_unmap(pg_off, vaddr); 181 - return NULL; 182 - } 183 - 184 - /* acpi_iomaps_lock must be held before calling */ 185 - static void __acpi_kref_del_iomap(struct kref *ref) 186 - { 187 - struct acpi_iomap *map; 188 - 189 - map = container_of(ref, struct acpi_iomap, ref); 190 - list_del_rcu(&map->list); 191 - } 192 - 193 - /* 194 - * Used to post-unmap the specified IO memory area. The iounmap is 195 - * done only if the reference count goes zero. 196 - */ 197 - static void acpi_post_unmap(phys_addr_t paddr, unsigned long size) 198 - { 199 - struct acpi_iomap *map; 200 - unsigned long flags; 201 - int del; 202 - 203 - spin_lock_irqsave(&acpi_iomaps_lock, flags); 204 - map = __acpi_find_iomap(paddr, size); 205 - BUG_ON(!map); 206 - del = kref_put(&map->ref, __acpi_kref_del_iomap); 207 - spin_unlock_irqrestore(&acpi_iomaps_lock, flags); 208 - 209 - if (!del) 210 - return; 211 - 212 - synchronize_rcu(); 213 - acpi_unmap(map->paddr, map->vaddr); 214 - kfree(map); 215 - } 216 - 217 - /* In NMI handler, should set silent = 1 */ 218 - static int acpi_check_gar(struct acpi_generic_address *reg, 219 - u64 *paddr, int silent) 220 - { 221 - u32 width, space_id; 222 - 223 - width = reg->bit_width; 224 - space_id = reg->space_id; 225 - /* Handle possible alignment issues */ 226 - memcpy(paddr, &reg->address, sizeof(*paddr)); 227 - if (!*paddr) { 228 - if (!silent) 229 - pr_warning(FW_BUG ACPI_PFX 230 - "Invalid physical address in GAR [0x%llx/%u/%u]\n", 231 - *paddr, width, space_id); 232 - return -EINVAL; 233 - } 234 - 235 - if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) { 236 - if (!silent) 237 - pr_warning(FW_BUG ACPI_PFX 238 - "Invalid bit width in GAR [0x%llx/%u/%u]\n", 239 - *paddr, width, space_id); 240 - return -EINVAL; 241 - } 242 - 243 - if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY && 244 - space_id != ACPI_ADR_SPACE_SYSTEM_IO) { 245 - if (!silent) 246 - pr_warning(FW_BUG ACPI_PFX 247 - "Invalid address space type in GAR [0x%llx/%u/%u]\n", 248 - *paddr, width, space_id); 249 - return -EINVAL; 250 - } 251 - 252 - return 0; 253 - } 254 - 255 - /* Pre-map, working on GAR */ 256 - int acpi_pre_map_gar(struct acpi_generic_address *reg) 257 - { 258 - u64 paddr; 259 - void __iomem *vaddr; 260 - int rc; 261 - 262 - if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 263 - return 0; 264 - 265 - rc = acpi_check_gar(reg, &paddr, 0); 266 - if (rc) 267 - return rc; 268 - 269 - vaddr = acpi_pre_map(paddr, reg->bit_width / 8); 270 - if (!vaddr) 271 - return -EIO; 272 - 273 - return 0; 274 - } 275 - EXPORT_SYMBOL_GPL(acpi_pre_map_gar); 276 - 277 - /* Post-unmap, working on GAR */ 278 - int acpi_post_unmap_gar(struct acpi_generic_address *reg) 279 - { 280 - u64 paddr; 281 - int rc; 282 - 283 - if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) 284 - return 0; 285 - 286 - rc = acpi_check_gar(reg, &paddr, 0); 287 - if (rc) 288 - return rc; 289 - 290 - acpi_post_unmap(paddr, reg->bit_width / 8); 291 - 292 - return 0; 293 - } 294 - EXPORT_SYMBOL_GPL(acpi_post_unmap_gar); 295 - 296 - #ifdef readq 297 - static inline u64 read64(const volatile void __iomem *addr) 298 - { 299 - return readq(addr); 300 - } 301 - #else 302 - static inline u64 read64(const volatile void __iomem *addr) 303 - { 304 - u64 l, h; 305 - l = readl(addr); 306 - h = readl(addr+4); 307 - return l | (h << 32); 308 - } 309 - #endif 310 - 311 - /* 312 - * Can be used in atomic (including NMI) or process context. RCU read 313 - * lock can only be released after the IO memory area accessing. 314 - */ 315 - static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width) 316 - { 317 - void __iomem *addr; 318 - 319 - rcu_read_lock(); 320 - addr = __acpi_ioremap_fast(paddr, width); 321 - switch (width) { 322 - case 8: 323 - *val = readb(addr); 324 - break; 325 - case 16: 326 - *val = readw(addr); 327 - break; 328 - case 32: 329 - *val = readl(addr); 330 - break; 331 - case 64: 332 - *val = read64(addr); 333 - break; 334 - default: 335 - return -EINVAL; 336 - } 337 - rcu_read_unlock(); 338 - 339 - return 0; 340 - } 341 - 342 - #ifdef writeq 343 - static inline void write64(u64 val, volatile void __iomem *addr) 344 - { 345 - writeq(val, addr); 346 - } 347 - #else 348 - static inline void write64(u64 val, volatile void __iomem *addr) 349 - { 350 - writel(val, addr); 351 - writel(val>>32, addr+4); 352 - } 353 - #endif 354 - 355 - static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width) 356 - { 357 - void __iomem *addr; 358 - 359 - rcu_read_lock(); 360 - addr = __acpi_ioremap_fast(paddr, width); 361 - switch (width) { 362 - case 8: 363 - writeb(val, addr); 364 - break; 365 - case 16: 366 - writew(val, addr); 367 - break; 368 - case 32: 369 - writel(val, addr); 370 - break; 371 - case 64: 372 - write64(val, addr); 373 - break; 374 - default: 375 - return -EINVAL; 376 - } 377 - rcu_read_unlock(); 378 - 379 - return 0; 380 - } 381 - 382 - /* GAR accessing in atomic (including NMI) or process context */ 383 - int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg) 384 - { 385 - u64 paddr; 386 - int rc; 387 - 388 - rc = acpi_check_gar(reg, &paddr, 1); 389 - if (rc) 390 - return rc; 391 - 392 - *val = 0; 393 - switch (reg->space_id) { 394 - case ACPI_ADR_SPACE_SYSTEM_MEMORY: 395 - return acpi_atomic_read_mem(paddr, val, reg->bit_width); 396 - case ACPI_ADR_SPACE_SYSTEM_IO: 397 - return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width); 398 - default: 399 - return -EINVAL; 400 - } 401 - } 402 - EXPORT_SYMBOL_GPL(acpi_atomic_read); 403 - 404 - int acpi_atomic_write(u64 val, struct acpi_generic_address *reg) 405 - { 406 - u64 paddr; 407 - int rc; 408 - 409 - rc = acpi_check_gar(reg, &paddr, 1); 410 - if (rc) 411 - return rc; 412 - 413 - switch (reg->space_id) { 414 - case ACPI_ADR_SPACE_SYSTEM_MEMORY: 415 - return acpi_atomic_write_mem(paddr, val, reg->bit_width); 416 - case ACPI_ADR_SPACE_SYSTEM_IO: 417 - return acpi_os_write_port(paddr, val, reg->bit_width); 418 - default: 419 - return -EINVAL; 420 - } 421 - } 422 - EXPORT_SYMBOL_GPL(acpi_atomic_write);
+150 -2
drivers/acpi/osl.c
··· 31 31 #include <linux/kernel.h> 32 32 #include <linux/slab.h> 33 33 #include <linux/mm.h> 34 + #include <linux/highmem.h> 34 35 #include <linux/pci.h> 35 36 #include <linux/interrupt.h> 36 37 #include <linux/kmod.h> ··· 322 321 return NULL; 323 322 } 324 323 324 + #ifndef CONFIG_IA64 325 + #define should_use_kmap(pfn) page_is_ram(pfn) 326 + #else 327 + /* ioremap will take care of cache attributes */ 328 + #define should_use_kmap(pfn) 0 329 + #endif 330 + 331 + static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz) 332 + { 333 + unsigned long pfn; 334 + 335 + pfn = pg_off >> PAGE_SHIFT; 336 + if (should_use_kmap(pfn)) { 337 + if (pg_sz > PAGE_SIZE) 338 + return NULL; 339 + return (void __iomem __force *)kmap(pfn_to_page(pfn)); 340 + } else 341 + return acpi_os_ioremap(pg_off, pg_sz); 342 + } 343 + 344 + static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) 345 + { 346 + unsigned long pfn; 347 + 348 + pfn = pg_off >> PAGE_SHIFT; 349 + if (page_is_ram(pfn)) 350 + kunmap(pfn_to_page(pfn)); 351 + else 352 + iounmap(vaddr); 353 + } 354 + 325 355 void __iomem *__init_refok 326 356 acpi_os_map_memory(acpi_physical_address phys, acpi_size size) 327 357 { ··· 385 353 386 354 pg_off = round_down(phys, PAGE_SIZE); 387 355 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; 388 - virt = acpi_os_ioremap(pg_off, pg_sz); 356 + virt = acpi_map(pg_off, pg_sz); 389 357 if (!virt) { 390 358 mutex_unlock(&acpi_ioremap_lock); 391 359 kfree(map); ··· 416 384 { 417 385 if (!map->refcount) { 418 386 synchronize_rcu(); 419 - iounmap(map->virt); 387 + acpi_unmap(map->phys, map->virt); 420 388 kfree(map); 421 389 } 422 390 } ··· 742 710 return AE_OK; 743 711 } 744 712 713 + #ifdef readq 714 + static inline u64 read64(const volatile void __iomem *addr) 715 + { 716 + return readq(addr); 717 + } 718 + #else 719 + static inline u64 read64(const volatile void __iomem *addr) 720 + { 721 + u64 l, h; 722 + l = readl(addr); 723 + h = readl(addr+4); 724 + return l | (h << 32); 725 + } 726 + #endif 727 + 728 + acpi_status 729 + acpi_os_read_memory64(acpi_physical_address phys_addr, u64 *value, u32 width) 730 + { 731 + void __iomem *virt_addr; 732 + unsigned int size = width / 8; 733 + bool unmap = false; 734 + u64 dummy; 735 + 736 + rcu_read_lock(); 737 + virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 738 + if (!virt_addr) { 739 + rcu_read_unlock(); 740 + virt_addr = acpi_os_ioremap(phys_addr, size); 741 + if (!virt_addr) 742 + return AE_BAD_ADDRESS; 743 + unmap = true; 744 + } 745 + 746 + if (!value) 747 + value = &dummy; 748 + 749 + switch (width) { 750 + case 8: 751 + *(u8 *) value = readb(virt_addr); 752 + break; 753 + case 16: 754 + *(u16 *) value = readw(virt_addr); 755 + break; 756 + case 32: 757 + *(u32 *) value = readl(virt_addr); 758 + break; 759 + case 64: 760 + *(u64 *) value = read64(virt_addr); 761 + break; 762 + default: 763 + BUG(); 764 + } 765 + 766 + if (unmap) 767 + iounmap(virt_addr); 768 + else 769 + rcu_read_unlock(); 770 + 771 + return AE_OK; 772 + } 773 + 745 774 acpi_status 746 775 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) 747 776 { ··· 829 736 break; 830 737 case 32: 831 738 writel(value, virt_addr); 739 + break; 740 + default: 741 + BUG(); 742 + } 743 + 744 + if (unmap) 745 + iounmap(virt_addr); 746 + else 747 + rcu_read_unlock(); 748 + 749 + return AE_OK; 750 + } 751 + 752 + #ifdef writeq 753 + static inline void write64(u64 val, volatile void __iomem *addr) 754 + { 755 + writeq(val, addr); 756 + } 757 + #else 758 + static inline void write64(u64 val, volatile void __iomem *addr) 759 + { 760 + writel(val, addr); 761 + writel(val>>32, addr+4); 762 + } 763 + #endif 764 + 765 + acpi_status 766 + acpi_os_write_memory64(acpi_physical_address phys_addr, u64 value, u32 width) 767 + { 768 + void __iomem *virt_addr; 769 + unsigned int size = width / 8; 770 + bool unmap = false; 771 + 772 + rcu_read_lock(); 773 + virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 774 + if (!virt_addr) { 775 + rcu_read_unlock(); 776 + virt_addr = acpi_os_ioremap(phys_addr, size); 777 + if (!virt_addr) 778 + return AE_BAD_ADDRESS; 779 + unmap = true; 780 + } 781 + 782 + switch (width) { 783 + case 8: 784 + writeb(value, virt_addr); 785 + break; 786 + case 16: 787 + writew(value, virt_addr); 788 + break; 789 + case 32: 790 + writel(value, virt_addr); 791 + break; 792 + case 64: 793 + write64(value, virt_addr); 832 794 break; 833 795 default: 834 796 BUG();
+114 -40
drivers/acpi/processor_driver.c
··· 84 84 static void acpi_processor_notify(struct acpi_device *device, u32 event); 85 85 static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr); 86 86 static int acpi_processor_handle_eject(struct acpi_processor *pr); 87 - 87 + static int acpi_processor_start(struct acpi_processor *pr); 88 88 89 89 static const struct acpi_device_id processor_device_ids[] = { 90 90 {ACPI_PROCESSOR_OBJECT_HID, 0}, ··· 423 423 struct acpi_processor *pr = per_cpu(processors, cpu); 424 424 425 425 if (action == CPU_ONLINE && pr) { 426 - acpi_processor_ppc_has_changed(pr, 0); 427 - acpi_processor_hotplug(pr); 428 - acpi_processor_reevaluate_tstate(pr, action); 429 - acpi_processor_tstate_has_changed(pr); 426 + /* CPU got physically hotplugged and onlined the first time: 427 + * Initialize missing things 428 + */ 429 + if (pr->flags.need_hotplug_init) { 430 + struct cpuidle_driver *idle_driver = 431 + cpuidle_get_driver(); 432 + 433 + printk(KERN_INFO "Will online and init hotplugged " 434 + "CPU: %d\n", pr->id); 435 + WARN(acpi_processor_start(pr), "Failed to start CPU:" 436 + " %d\n", pr->id); 437 + pr->flags.need_hotplug_init = 0; 438 + if (idle_driver && !strcmp(idle_driver->name, 439 + "intel_idle")) { 440 + intel_idle_cpu_init(pr->id); 441 + } 442 + /* Normal CPU soft online event */ 443 + } else { 444 + acpi_processor_ppc_has_changed(pr, 0); 445 + acpi_processor_cst_has_changed(pr); 446 + acpi_processor_reevaluate_tstate(pr, action); 447 + acpi_processor_tstate_has_changed(pr); 448 + } 430 449 } 431 450 if (action == CPU_DEAD && pr) { 432 451 /* invalidate the flag.throttling after one CPU is offline */ ··· 459 440 .notifier_call = acpi_cpu_soft_notify, 460 441 }; 461 442 443 + /* 444 + * acpi_processor_start() is called by the cpu_hotplug_notifier func: 445 + * acpi_cpu_soft_notify(). Getting it __cpuinit{data} is difficult, the 446 + * root cause seem to be that acpi_processor_uninstall_hotplug_notify() 447 + * is in the module_exit (__exit) func. Allowing acpi_processor_start() 448 + * to not be in __cpuinit section, but being called from __cpuinit funcs 449 + * via __ref looks like the right thing to do here. 450 + */ 451 + static __ref int acpi_processor_start(struct acpi_processor *pr) 452 + { 453 + struct acpi_device *device = per_cpu(processor_device_array, pr->id); 454 + int result = 0; 455 + 456 + #ifdef CONFIG_CPU_FREQ 457 + acpi_processor_ppc_has_changed(pr, 0); 458 + #endif 459 + acpi_processor_get_throttling_info(pr); 460 + acpi_processor_get_limit_info(pr); 461 + 462 + if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver) 463 + acpi_processor_power_init(pr, device); 464 + 465 + pr->cdev = thermal_cooling_device_register("Processor", device, 466 + &processor_cooling_ops); 467 + if (IS_ERR(pr->cdev)) { 468 + result = PTR_ERR(pr->cdev); 469 + goto err_power_exit; 470 + } 471 + 472 + dev_dbg(&device->dev, "registered as cooling_device%d\n", 473 + pr->cdev->id); 474 + 475 + result = sysfs_create_link(&device->dev.kobj, 476 + &pr->cdev->device.kobj, 477 + "thermal_cooling"); 478 + if (result) { 479 + printk(KERN_ERR PREFIX "Create sysfs link\n"); 480 + goto err_thermal_unregister; 481 + } 482 + result = sysfs_create_link(&pr->cdev->device.kobj, 483 + &device->dev.kobj, 484 + "device"); 485 + if (result) { 486 + printk(KERN_ERR PREFIX "Create sysfs link\n"); 487 + goto err_remove_sysfs_thermal; 488 + } 489 + 490 + return 0; 491 + 492 + err_remove_sysfs_thermal: 493 + sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); 494 + err_thermal_unregister: 495 + thermal_cooling_device_unregister(pr->cdev); 496 + err_power_exit: 497 + acpi_processor_power_exit(pr, device); 498 + 499 + return result; 500 + } 501 + 502 + /* 503 + * Do not put anything in here which needs the core to be online. 504 + * For example MSR access or setting up things which check for cpuinfo_x86 505 + * (cpu_data(cpu)) values, like CPU feature flags, family, model, etc. 506 + * Such things have to be put in and set up above in acpi_processor_start() 507 + */ 462 508 static int __cpuinit acpi_processor_add(struct acpi_device *device) 463 509 { 464 510 struct acpi_processor *pr = NULL; ··· 579 495 goto err_free_cpumask; 580 496 } 581 497 582 - #ifdef CONFIG_CPU_FREQ 583 - acpi_processor_ppc_has_changed(pr, 0); 584 - #endif 585 - acpi_processor_get_throttling_info(pr); 586 - acpi_processor_get_limit_info(pr); 498 + /* 499 + * Do not start hotplugged CPUs now, but when they 500 + * are onlined the first time 501 + */ 502 + if (pr->flags.need_hotplug_init) 503 + return 0; 587 504 588 - if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver) 589 - acpi_processor_power_init(pr, device); 505 + /* 506 + * Do not start hotplugged CPUs now, but when they 507 + * are onlined the first time 508 + */ 509 + if (pr->flags.need_hotplug_init) 510 + return 0; 590 511 591 - pr->cdev = thermal_cooling_device_register("Processor", device, 592 - &processor_cooling_ops); 593 - if (IS_ERR(pr->cdev)) { 594 - result = PTR_ERR(pr->cdev); 595 - goto err_power_exit; 596 - } 597 - 598 - dev_dbg(&device->dev, "registered as cooling_device%d\n", 599 - pr->cdev->id); 600 - 601 - result = sysfs_create_link(&device->dev.kobj, 602 - &pr->cdev->device.kobj, 603 - "thermal_cooling"); 604 - if (result) { 605 - printk(KERN_ERR PREFIX "Create sysfs link\n"); 606 - goto err_thermal_unregister; 607 - } 608 - result = sysfs_create_link(&pr->cdev->device.kobj, 609 - &device->dev.kobj, 610 - "device"); 611 - if (result) { 612 - printk(KERN_ERR PREFIX "Create sysfs link\n"); 512 + result = acpi_processor_start(pr); 513 + if (result) 613 514 goto err_remove_sysfs; 614 - } 615 515 616 516 return 0; 617 517 618 518 err_remove_sysfs: 619 - sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); 620 - err_thermal_unregister: 621 - thermal_cooling_device_unregister(pr->cdev); 622 - err_power_exit: 623 - acpi_processor_power_exit(pr, device); 624 519 sysfs_remove_link(&device->dev.kobj, "sysdev"); 625 520 err_free_cpumask: 626 521 free_cpumask_var(pr->throttling.shared_cpu_map); ··· 797 734 acpi_unmap_lsapic(pr->id); 798 735 return AE_ERROR; 799 736 } 737 + 738 + /* CPU got hot-plugged, but cpu_data is not initialized yet 739 + * Set flag to delay cpu_idle/throttling initialization 740 + * in: 741 + * acpi_processor_add() 742 + * acpi_processor_get_info() 743 + * and do it when the CPU gets online the first time 744 + * TBD: Cleanup above functions and try to do this more elegant. 745 + */ 746 + printk(KERN_INFO "CPU %d got hotplugged\n", pr->id); 747 + pr->flags.need_hotplug_init = 1; 800 748 801 749 return AE_OK; 802 750 }
+8
drivers/acpi/sleep.c
··· 438 438 }, 439 439 { 440 440 .callback = init_nvs_nosave, 441 + .ident = "Sony Vaio VPCCW29FX", 442 + .matches = { 443 + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), 444 + DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"), 445 + }, 446 + }, 447 + { 448 + .callback = init_nvs_nosave, 441 449 .ident = "Averatec AV1020-ED2", 442 450 .matches = { 443 451 DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
+1 -1
drivers/idle/intel_idle.c
··· 527 527 528 528 return 0; 529 529 } 530 - 530 + EXPORT_SYMBOL_GPL(intel_idle_cpu_init); 531 531 532 532 static int __init intel_idle_init(void) 533 533 {
+2 -2
drivers/thermal/thermal_sys.c
··· 1304 1304 .name = THERMAL_GENL_MCAST_GROUP_NAME, 1305 1305 }; 1306 1306 1307 - int generate_netlink_event(u32 orig, enum events event) 1307 + int thermal_generate_netlink_event(u32 orig, enum events event) 1308 1308 { 1309 1309 struct sk_buff *skb; 1310 1310 struct nlattr *attr; ··· 1363 1363 1364 1364 return result; 1365 1365 } 1366 - EXPORT_SYMBOL(generate_netlink_event); 1366 + EXPORT_SYMBOL(thermal_generate_netlink_event); 1367 1367 1368 1368 static int genetlink_init(void) 1369 1369 {
+4
include/acpi/acpiosxf.h
··· 218 218 */ 219 219 acpi_status 220 220 acpi_os_read_memory(acpi_physical_address address, u32 * value, u32 width); 221 + acpi_status 222 + acpi_os_read_memory64(acpi_physical_address address, u64 *value, u32 width); 221 223 222 224 acpi_status 223 225 acpi_os_write_memory(acpi_physical_address address, u32 value, u32 width); 226 + acpi_status 227 + acpi_os_write_memory64(acpi_physical_address address, u64 value, u32 width); 224 228 225 229 /* 226 230 * Platform and hardware-independent PCI configuration space access
-10
include/acpi/atomicio.h
··· 1 - #ifndef ACPI_ATOMIC_IO_H 2 - #define ACPI_ATOMIC_IO_H 3 - 4 - int acpi_pre_map_gar(struct acpi_generic_address *reg); 5 - int acpi_post_unmap_gar(struct acpi_generic_address *reg); 6 - 7 - int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg); 8 - int acpi_atomic_write(u64 val, struct acpi_generic_address *reg); 9 - 10 - #endif
+1
include/acpi/processor.h
··· 195 195 u8 has_cst:1; 196 196 u8 power_setup_done:1; 197 197 u8 bm_rld_set:1; 198 + u8 need_hotplug_init:1; 198 199 }; 199 200 200 201 struct acpi_processor {
+2 -2
include/linux/thermal.h
··· 152 152 void thermal_cooling_device_unregister(struct thermal_cooling_device *); 153 153 154 154 #ifdef CONFIG_NET 155 - extern int generate_netlink_event(u32 orig, enum events event); 155 + extern int thermal_generate_netlink_event(u32 orig, enum events event); 156 156 #else 157 - static inline int generate_netlink_event(u32 orig, enum events event) 157 + static inline int thermal_generate_netlink_event(u32 orig, enum events event) 158 158 { 159 159 return 0; 160 160 }