Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6: (34 commits)
ACPI: processor: push file static MADT pointer into internal map_madt_entry()
ACPI: processor: refactor internal map_lsapic_id()
ACPI: processor: refactor internal map_x2apic_id()
ACPI: processor: refactor internal map_lapic_id()
ACPI: processor: driver doesn't need to evaluate _PDC
ACPI: processor: remove early _PDC optin quirks
ACPI: processor: add internal processor_physically_present()
ACPI: processor: move acpi_get_cpuid into processor_core.c
ACPI: processor: export acpi_get_cpuid()
ACPI: processor: mv processor_pdc.c processor_core.c
ACPI: processor: mv processor_core.c processor_driver.c
ACPI: plan to delete "acpi=ht" boot option
ACPI: remove "acpi=ht" DMI blacklist
PNPACPI: add bus number support
PNPACPI: add window support
resource: add window support
resource: add bus number support
resource: expand IORESOURCE_TYPE_BITS to make room for bus resource type
acpiphp: Execute ACPI _REG method for hotadded devices
ACPI video: Be more liberal in validating _BQC behaviour
...

+1515 -1447
+7
Documentation/feature-removal-schedule.txt
··· 582 582 Who: Avi Kivity <avi@redhat.com> 583 583 584 584 ---------------------------- 585 + 586 + What: "acpi=ht" boot option 587 + When: 2.6.35 588 + Why: Useful in 2003, implementation is a hack. 589 + Generally invoked by accident today. 590 + Seen as doing more harm than good. 591 + Who: Len Brown <len.brown@intel.com>
-4
Documentation/kernel-parameters.txt
··· 200 200 acpi_display_output=video 201 201 See above. 202 202 203 - acpi_early_pdc_eval [HW,ACPI] Evaluate processor _PDC methods 204 - early. Needed on some platforms to properly 205 - initialize the EC. 206 - 207 203 acpi_irq_balance [HW,ACPI] 208 204 ACPI will balance active IRQs 209 205 default in APIC mode
+3
arch/ia64/kernel/acpi.c
··· 44 44 #include <linux/efi.h> 45 45 #include <linux/mmzone.h> 46 46 #include <linux/nodemask.h> 47 + #include <acpi/processor.h> 47 48 #include <asm/io.h> 48 49 #include <asm/iosapic.h> 49 50 #include <asm/machvec.h> ··· 907 906 908 907 cpu_set(cpu, cpu_present_map); 909 908 ia64_cpu_to_sapicid[cpu] = physid; 909 + 910 + acpi_processor_set_pdc(handle); 910 911 911 912 *pcpu = cpu; 912 913 return (0);
+6 -94
arch/x86/kernel/acpi/boot.c
··· 490 490 * ACPI based hotplug support for CPU 491 491 */ 492 492 #ifdef CONFIG_ACPI_HOTPLUG_CPU 493 + #include <acpi/processor.h> 493 494 494 495 static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 495 496 { ··· 567 566 retval = -EINVAL; 568 567 goto free_new_map; 569 568 } 569 + 570 + acpi_processor_set_pdc(handle); 570 571 571 572 cpu = cpumask_first(new_map); 572 573 acpi_map_cpu2node(handle, cpu, physid); ··· 1296 1293 } 1297 1294 1298 1295 /* 1299 - * Limit ACPI to CPU enumeration for HT 1300 - */ 1301 - static int __init force_acpi_ht(const struct dmi_system_id *d) 1302 - { 1303 - if (!acpi_force) { 1304 - printk(KERN_NOTICE "%s detected: force use of acpi=ht\n", 1305 - d->ident); 1306 - disable_acpi(); 1307 - acpi_ht = 1; 1308 - } else { 1309 - printk(KERN_NOTICE 1310 - "Warning: acpi=force overrules DMI blacklist: acpi=ht\n"); 1311 - } 1312 - return 0; 1313 - } 1314 - 1315 - /* 1316 1296 * Force ignoring BIOS IRQ0 pin2 override 1317 1297 */ 1318 1298 static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d) ··· 1327 1341 .matches = { 1328 1342 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), 1329 1343 DMI_MATCH(DMI_BOARD_NAME, "2629H1G"), 1330 - }, 1331 - }, 1332 - 1333 - /* 1334 - * Boxes that need acpi=ht 1335 - */ 1336 - { 1337 - .callback = force_acpi_ht, 1338 - .ident = "FSC Primergy T850", 1339 - .matches = { 1340 - DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), 1341 - DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"), 1342 - }, 1343 - }, 1344 - { 1345 - .callback = force_acpi_ht, 1346 - .ident = "HP VISUALIZE NT Workstation", 1347 - .matches = { 1348 - DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), 1349 - DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"), 1350 - }, 1351 - }, 1352 - { 1353 - .callback = force_acpi_ht, 1354 - .ident = "Compaq Workstation W8000", 1355 - .matches = { 1356 - DMI_MATCH(DMI_SYS_VENDOR, "Compaq"), 1357 - DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"), 1358 - }, 1359 - }, 1360 - { 1361 - .callback = force_acpi_ht, 1362 - .ident = "ASUS CUR-DLS", 1363 - .matches = { 1364 - DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), 1365 - DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"), 1366 - }, 1367 - }, 1368 - { 1369 - .callback = force_acpi_ht, 1370 - .ident = "ABIT i440BX-W83977", 1371 - .matches = { 1372 - DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"), 1373 - DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"), 1374 - }, 1375 - }, 1376 - { 1377 - .callback = force_acpi_ht, 1378 - .ident = "IBM Bladecenter", 1379 - .matches = { 1380 - DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), 1381 - DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"), 1382 - }, 1383 - }, 1384 - { 1385 - .callback = force_acpi_ht, 1386 - .ident = "IBM eServer xSeries 360", 1387 - .matches = { 1388 - DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), 1389 - DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"), 1390 - }, 1391 - }, 1392 - { 1393 - .callback = force_acpi_ht, 1394 - .ident = "IBM eserver xSeries 330", 1395 - .matches = { 1396 - DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), 1397 - DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"), 1398 - }, 1399 - }, 1400 - { 1401 - .callback = force_acpi_ht, 1402 - .ident = "IBM eserver xSeries 440", 1403 - .matches = { 1404 - DMI_MATCH(DMI_BOARD_VENDOR, "IBM"), 1405 - DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"), 1406 1344 }, 1407 1345 }, 1408 1346 ··· 1562 1652 } 1563 1653 /* Limit ACPI just to boot-time to enable HT */ 1564 1654 else if (strcmp(arg, "ht") == 0) { 1565 - if (!acpi_force) 1655 + if (!acpi_force) { 1656 + printk(KERN_WARNING "acpi=ht will be removed in Linux-2.6.35\n"); 1566 1657 disable_acpi(); 1658 + } 1567 1659 acpi_ht = 1; 1568 1660 } 1569 1661 /* acpi=rsdt use RSDT instead of XSDT */
+2 -2
drivers/acpi/Makefile
··· 32 32 # 33 33 acpi-y += bus.o glue.o 34 34 acpi-y += scan.o 35 - acpi-y += processor_pdc.o 35 + acpi-y += processor_core.o 36 36 acpi-y += ec.o 37 37 acpi-$(CONFIG_ACPI_DOCK) += dock.o 38 38 acpi-y += pci_root.o pci_link.o pci_irq.o pci_bind.o ··· 61 61 obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o 62 62 63 63 # processor has its own "processor." module_param namespace 64 - processor-y := processor_core.o processor_throttling.o 64 + processor-y := processor_driver.o processor_throttling.o 65 65 processor-y += processor_idle.o processor_thermal.o 66 66 processor-$(CONFIG_CPU_FREQ) += processor_perflib.o 67 67
+1 -2
drivers/acpi/acpica/exmutex.c
··· 375 375 return_ACPI_STATUS(AE_AML_MUTEX_NOT_ACQUIRED); 376 376 } 377 377 378 - /* Must have a valid thread ID */ 379 - 378 + /* Must have a valid thread. */ 380 379 if (!walk_state->thread) { 381 380 ACPI_ERROR((AE_INFO, 382 381 "Cannot release Mutex [%4.4s], null thread info",
+68 -18
drivers/acpi/battery.c
··· 54 54 #define ACPI_BATTERY_DEVICE_NAME "Battery" 55 55 #define ACPI_BATTERY_NOTIFY_STATUS 0x80 56 56 #define ACPI_BATTERY_NOTIFY_INFO 0x81 57 + #define ACPI_BATTERY_NOTIFY_THRESHOLD 0x82 57 58 58 59 #define _COMPONENT ACPI_BATTERY_COMPONENT 59 60 ··· 89 88 90 89 MODULE_DEVICE_TABLE(acpi, battery_device_ids); 91 90 92 - /* For buggy DSDTs that report negative 16-bit values for either charging 93 - * or discharging current and/or report 0 as 65536 due to bad math. 94 - */ 95 - #define QUIRK_SIGNED16_CURRENT 0x0001 91 + enum { 92 + ACPI_BATTERY_ALARM_PRESENT, 93 + ACPI_BATTERY_XINFO_PRESENT, 94 + /* For buggy DSDTs that report negative 16-bit values for either 95 + * charging or discharging current and/or report 0 as 65536 96 + * due to bad math. 97 + */ 98 + ACPI_BATTERY_QUIRK_SIGNED16_CURRENT, 99 + }; 96 100 97 101 struct acpi_battery { 98 102 struct mutex lock; ··· 115 109 int design_voltage; 116 110 int design_capacity_warning; 117 111 int design_capacity_low; 112 + int cycle_count; 113 + int measurement_accuracy; 114 + int max_sampling_time; 115 + int min_sampling_time; 116 + int max_averaging_interval; 117 + int min_averaging_interval; 118 118 int capacity_granularity_1; 119 119 int capacity_granularity_2; 120 120 int alarm; ··· 130 118 char oem_info[32]; 131 119 int state; 132 120 int power_unit; 133 - u8 alarm_present; 134 - long quirks; 121 + unsigned long flags; 135 122 }; 136 123 137 124 #define to_acpi_battery(x) container_of(x, struct acpi_battery, bat); ··· 209 198 case POWER_SUPPLY_PROP_TECHNOLOGY: 210 199 val->intval = acpi_battery_technology(battery); 211 200 break; 201 + case POWER_SUPPLY_PROP_CYCLE_COUNT: 202 + val->intval = battery->cycle_count; 203 + break; 212 204 case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: 213 205 val->intval = battery->design_voltage * 1000; 214 206 break; ··· 253 239 POWER_SUPPLY_PROP_STATUS, 254 240 POWER_SUPPLY_PROP_PRESENT, 255 241 POWER_SUPPLY_PROP_TECHNOLOGY, 242 + POWER_SUPPLY_PROP_CYCLE_COUNT, 256 243 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, 257 244 POWER_SUPPLY_PROP_VOLTAGE_NOW, 258 245 POWER_SUPPLY_PROP_CURRENT_NOW, ··· 269 254 POWER_SUPPLY_PROP_STATUS, 270 255 POWER_SUPPLY_PROP_PRESENT, 271 256 POWER_SUPPLY_PROP_TECHNOLOGY, 257 + POWER_SUPPLY_PROP_CYCLE_COUNT, 272 258 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, 273 259 POWER_SUPPLY_PROP_VOLTAGE_NOW, 274 260 POWER_SUPPLY_PROP_CURRENT_NOW, ··· 313 297 {offsetof(struct acpi_battery, design_voltage), 0}, 314 298 {offsetof(struct acpi_battery, design_capacity_warning), 0}, 315 299 {offsetof(struct acpi_battery, design_capacity_low), 0}, 300 + {offsetof(struct acpi_battery, capacity_granularity_1), 0}, 301 + {offsetof(struct acpi_battery, capacity_granularity_2), 0}, 302 + {offsetof(struct acpi_battery, model_number), 1}, 303 + {offsetof(struct acpi_battery, serial_number), 1}, 304 + {offsetof(struct acpi_battery, type), 1}, 305 + {offsetof(struct acpi_battery, oem_info), 1}, 306 + }; 307 + 308 + static struct acpi_offsets extended_info_offsets[] = { 309 + {offsetof(struct acpi_battery, power_unit), 0}, 310 + {offsetof(struct acpi_battery, design_capacity), 0}, 311 + {offsetof(struct acpi_battery, full_charge_capacity), 0}, 312 + {offsetof(struct acpi_battery, technology), 0}, 313 + {offsetof(struct acpi_battery, design_voltage), 0}, 314 + {offsetof(struct acpi_battery, design_capacity_warning), 0}, 315 + {offsetof(struct acpi_battery, design_capacity_low), 0}, 316 + {offsetof(struct acpi_battery, cycle_count), 0}, 317 + {offsetof(struct acpi_battery, measurement_accuracy), 0}, 318 + {offsetof(struct acpi_battery, max_sampling_time), 0}, 319 + {offsetof(struct acpi_battery, min_sampling_time), 0}, 320 + {offsetof(struct acpi_battery, max_averaging_interval), 0}, 321 + {offsetof(struct acpi_battery, min_averaging_interval), 0}, 316 322 {offsetof(struct acpi_battery, capacity_granularity_1), 0}, 317 323 {offsetof(struct acpi_battery, capacity_granularity_2), 0}, 318 324 {offsetof(struct acpi_battery, model_number), 1}, ··· 388 350 { 389 351 int result = -EFAULT; 390 352 acpi_status status = 0; 353 + char *name = test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags)? 354 + "_BIX" : "_BIF"; 355 + 391 356 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 392 357 393 358 if (!acpi_battery_present(battery)) 394 359 return 0; 395 360 mutex_lock(&battery->lock); 396 - status = acpi_evaluate_object(battery->device->handle, "_BIF", 397 - NULL, &buffer); 361 + status = acpi_evaluate_object(battery->device->handle, name, 362 + NULL, &buffer); 398 363 mutex_unlock(&battery->lock); 399 364 400 365 if (ACPI_FAILURE(status)) { 401 - ACPI_EXCEPTION((AE_INFO, status, "Evaluating _BIF")); 366 + ACPI_EXCEPTION((AE_INFO, status, "Evaluating %s", name)); 402 367 return -ENODEV; 403 368 } 404 - 405 - result = extract_package(battery, buffer.pointer, 406 - info_offsets, ARRAY_SIZE(info_offsets)); 369 + if (test_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags)) 370 + result = extract_package(battery, buffer.pointer, 371 + extended_info_offsets, 372 + ARRAY_SIZE(extended_info_offsets)); 373 + else 374 + result = extract_package(battery, buffer.pointer, 375 + info_offsets, ARRAY_SIZE(info_offsets)); 407 376 kfree(buffer.pointer); 408 377 return result; 409 378 } ··· 444 399 battery->update_time = jiffies; 445 400 kfree(buffer.pointer); 446 401 447 - if ((battery->quirks & QUIRK_SIGNED16_CURRENT) && 402 + if (test_bit(ACPI_BATTERY_QUIRK_SIGNED16_CURRENT, &battery->flags) && 448 403 battery->rate_now != -1) 449 404 battery->rate_now = abs((s16)battery->rate_now); 450 405 ··· 457 412 union acpi_object arg0 = { .type = ACPI_TYPE_INTEGER }; 458 413 struct acpi_object_list arg_list = { 1, &arg0 }; 459 414 460 - if (!acpi_battery_present(battery)|| !battery->alarm_present) 415 + if (!acpi_battery_present(battery) || 416 + !test_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags)) 461 417 return -ENODEV; 462 418 463 419 arg0.integer.value = battery->alarm; ··· 483 437 /* See if alarms are supported, and if so, set default */ 484 438 status = acpi_get_handle(battery->device->handle, "_BTP", &handle); 485 439 if (ACPI_FAILURE(status)) { 486 - battery->alarm_present = 0; 440 + clear_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags); 487 441 return 0; 488 442 } 489 - battery->alarm_present = 1; 443 + set_bit(ACPI_BATTERY_ALARM_PRESENT, &battery->flags); 490 444 if (!battery->alarm) 491 445 battery->alarm = battery->design_capacity_warning; 492 446 return acpi_battery_set_alarm(battery); ··· 556 510 557 511 static void acpi_battery_quirks(struct acpi_battery *battery) 558 512 { 559 - battery->quirks = 0; 560 513 if (dmi_name_in_vendors("Acer") && battery->power_unit) { 561 - battery->quirks |= QUIRK_SIGNED16_CURRENT; 514 + set_bit(ACPI_BATTERY_QUIRK_SIGNED16_CURRENT, &battery->flags); 562 515 } 563 516 } 564 517 ··· 635 590 seq_printf(seq, "design capacity low: %d %sh\n", 636 591 battery->design_capacity_low, 637 592 acpi_battery_units(battery)); 593 + seq_printf(seq, "cycle count: %i\n", battery->cycle_count); 638 594 seq_printf(seq, "capacity granularity 1: %d %sh\n", 639 595 battery->capacity_granularity_1, 640 596 acpi_battery_units(battery)); ··· 887 841 { 888 842 int result = 0; 889 843 struct acpi_battery *battery = NULL; 844 + acpi_handle handle; 890 845 if (!device) 891 846 return -EINVAL; 892 847 battery = kzalloc(sizeof(struct acpi_battery), GFP_KERNEL); ··· 898 851 strcpy(acpi_device_class(device), ACPI_BATTERY_CLASS); 899 852 device->driver_data = battery; 900 853 mutex_init(&battery->lock); 854 + if (ACPI_SUCCESS(acpi_get_handle(battery->device->handle, 855 + "_BIX", &handle))) 856 + set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags); 901 857 acpi_battery_update(battery); 902 858 #ifdef CONFIG_ACPI_PROCFS_POWER 903 859 result = acpi_battery_add_fs(device);
+5 -5
drivers/acpi/bus.c
··· 190 190 * Get the device's power state either directly (via _PSC) or 191 191 * indirectly (via power resources). 192 192 */ 193 - if (device->power.flags.explicit_get) { 193 + if (device->power.flags.power_resources) { 194 + result = acpi_power_get_inferred_state(device); 195 + if (result) 196 + return result; 197 + } else if (device->power.flags.explicit_get) { 194 198 status = acpi_evaluate_integer(device->handle, "_PSC", 195 199 NULL, &psc); 196 200 if (ACPI_FAILURE(status)) 197 201 return -ENODEV; 198 202 device->power.state = (int)psc; 199 - } else if (device->power.flags.power_resources) { 200 - result = acpi_power_get_inferred_state(device); 201 - if (result) 202 - return result; 203 203 } 204 204 205 205 *state = device->power.state;
+32 -1
drivers/acpi/ec.c
··· 76 76 enum { 77 77 EC_FLAGS_QUERY_PENDING, /* Query is pending */ 78 78 EC_FLAGS_GPE_STORM, /* GPE storm detected */ 79 - EC_FLAGS_HANDLERS_INSTALLED /* Handlers for GPE and 79 + EC_FLAGS_HANDLERS_INSTALLED, /* Handlers for GPE and 80 80 * OpReg are installed */ 81 + EC_FLAGS_FROZEN, /* Transactions are suspended */ 81 82 }; 82 83 83 84 /* If we find an EC via the ECDT, we need to keep a ptr to its context */ ··· 292 291 if (t->rdata) 293 292 memset(t->rdata, 0, t->rlen); 294 293 mutex_lock(&ec->lock); 294 + if (test_bit(EC_FLAGS_FROZEN, &ec->flags)) { 295 + status = -EINVAL; 296 + goto unlock; 297 + } 295 298 if (ec->global_lock) { 296 299 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 297 300 if (ACPI_FAILURE(status)) { ··· 457 452 } 458 453 459 454 EXPORT_SYMBOL(ec_transaction); 455 + 456 + void acpi_ec_suspend_transactions(void) 457 + { 458 + struct acpi_ec *ec = first_ec; 459 + 460 + if (!ec) 461 + return; 462 + 463 + mutex_lock(&ec->lock); 464 + /* Prevent transactions from being carried out */ 465 + set_bit(EC_FLAGS_FROZEN, &ec->flags); 466 + mutex_unlock(&ec->lock); 467 + } 468 + 469 + void acpi_ec_resume_transactions(void) 470 + { 471 + struct acpi_ec *ec = first_ec; 472 + 473 + if (!ec) 474 + return; 475 + 476 + mutex_lock(&ec->lock); 477 + /* Allow transactions to be carried out again */ 478 + clear_bit(EC_FLAGS_FROZEN, &ec->flags); 479 + mutex_unlock(&ec->lock); 480 + } 460 481 461 482 static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 * data) 462 483 {
+2
drivers/acpi/internal.h
··· 49 49 int acpi_ec_init(void); 50 50 int acpi_ec_ecdt_probe(void); 51 51 int acpi_boot_ec_enable(void); 52 + void acpi_ec_suspend_transactions(void); 53 + void acpi_ec_resume_transactions(void); 52 54 53 55 /*-------------------------------------------------------------------------- 54 56 Suspend/Resume
+178 -966
drivers/acpi/processor_core.c
··· 1 1 /* 2 - * acpi_processor.c - ACPI Processor Driver ($Revision: 71 $) 2 + * Copyright (C) 2005 Intel Corporation 3 + * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. 3 4 * 4 - * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 - * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 - * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 7 - * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 - * - Added processor hotplug support 9 - * 10 - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 - * 12 - * This program is free software; you can redistribute it and/or modify 13 - * it under the terms of the GNU General Public License as published by 14 - * the Free Software Foundation; either version 2 of the License, or (at 15 - * your option) any later version. 16 - * 17 - * This program is distributed in the hope that it will be useful, but 18 - * WITHOUT ANY WARRANTY; without even the implied warranty of 19 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 - * General Public License for more details. 21 - * 22 - * You should have received a copy of the GNU General Public License along 23 - * with this program; if not, write to the Free Software Foundation, Inc., 24 - * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 25 - * 26 - * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 27 - * TBD: 28 - * 1. Make # power states dynamic. 29 - * 2. Support duty_cycle values that span bit 4. 30 - * 3. Optimize by having scheduler determine business instead of 31 - * having us try to calculate it here. 32 - * 4. Need C1 timing -- must modify kernel (IRQ handler) to get this. 5 + * Alex Chiang <achiang@hp.com> 6 + * - Unified x86/ia64 implementations 7 + * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 8 + * - Added _PDC for platforms with Intel CPUs 33 9 */ 34 - 35 - #include <linux/kernel.h> 36 - #include <linux/module.h> 37 - #include <linux/init.h> 38 - #include <linux/types.h> 39 - #include <linux/pci.h> 40 - #include <linux/pm.h> 41 - #include <linux/cpufreq.h> 42 - #include <linux/cpu.h> 43 - #include <linux/proc_fs.h> 44 - #include <linux/seq_file.h> 45 10 #include <linux/dmi.h> 46 - #include <linux/moduleparam.h> 47 - #include <linux/cpuidle.h> 48 11 49 - #include <asm/io.h> 50 - #include <asm/system.h> 51 - #include <asm/cpu.h> 52 - #include <asm/delay.h> 53 - #include <asm/uaccess.h> 54 - #include <asm/processor.h> 55 - #include <asm/smp.h> 56 - #include <asm/acpi.h> 57 - 58 - #include <acpi/acpi_bus.h> 59 12 #include <acpi/acpi_drivers.h> 60 13 #include <acpi/processor.h> 61 14 62 - #define PREFIX "ACPI: " 15 + #include "internal.h" 63 16 64 - #define ACPI_PROCESSOR_CLASS "processor" 65 - #define ACPI_PROCESSOR_DEVICE_NAME "Processor" 66 - #define ACPI_PROCESSOR_FILE_INFO "info" 67 - #define ACPI_PROCESSOR_FILE_THROTTLING "throttling" 68 - #define ACPI_PROCESSOR_FILE_LIMIT "limit" 69 - #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80 70 - #define ACPI_PROCESSOR_NOTIFY_POWER 0x81 71 - #define ACPI_PROCESSOR_NOTIFY_THROTTLING 0x82 72 - 73 - #define ACPI_PROCESSOR_LIMIT_USER 0 74 - #define ACPI_PROCESSOR_LIMIT_THERMAL 1 75 - 17 + #define PREFIX "ACPI: " 76 18 #define _COMPONENT ACPI_PROCESSOR_COMPONENT 77 19 ACPI_MODULE_NAME("processor_core"); 78 20 79 - MODULE_AUTHOR("Paul Diefenbaugh"); 80 - MODULE_DESCRIPTION("ACPI Processor Driver"); 81 - MODULE_LICENSE("GPL"); 21 + static int set_no_mwait(const struct dmi_system_id *id) 22 + { 23 + printk(KERN_NOTICE PREFIX "%s detected - " 24 + "disabling mwait for CPU C-states\n", id->ident); 25 + idle_nomwait = 1; 26 + return 0; 27 + } 82 28 83 - static int acpi_processor_add(struct acpi_device *device); 84 - static int acpi_processor_remove(struct acpi_device *device, int type); 85 - #ifdef CONFIG_ACPI_PROCFS 86 - static int acpi_processor_info_open_fs(struct inode *inode, struct file *file); 87 - #endif 88 - static void acpi_processor_notify(struct acpi_device *device, u32 event); 89 - static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu); 90 - static int acpi_processor_handle_eject(struct acpi_processor *pr); 91 - 92 - 93 - static const struct acpi_device_id processor_device_ids[] = { 94 - {ACPI_PROCESSOR_OBJECT_HID, 0}, 95 - {"ACPI0007", 0}, 96 - {"", 0}, 97 - }; 98 - MODULE_DEVICE_TABLE(acpi, processor_device_ids); 99 - 100 - static struct acpi_driver acpi_processor_driver = { 101 - .name = "processor", 102 - .class = ACPI_PROCESSOR_CLASS, 103 - .ids = processor_device_ids, 104 - .ops = { 105 - .add = acpi_processor_add, 106 - .remove = acpi_processor_remove, 107 - .suspend = acpi_processor_suspend, 108 - .resume = acpi_processor_resume, 109 - .notify = acpi_processor_notify, 110 - }, 29 + static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = { 30 + { 31 + set_no_mwait, "IFL91 board", { 32 + DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"), 33 + DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"), 34 + DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"), 35 + DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL}, 36 + { 37 + set_no_mwait, "Extensa 5220", { 38 + DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), 39 + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 40 + DMI_MATCH(DMI_PRODUCT_VERSION, "0100"), 41 + DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL}, 42 + {}, 111 43 }; 112 44 113 - #define INSTALL_NOTIFY_HANDLER 1 114 - #define UNINSTALL_NOTIFY_HANDLER 2 115 - #ifdef CONFIG_ACPI_PROCFS 116 - static const struct file_operations acpi_processor_info_fops = { 117 - .owner = THIS_MODULE, 118 - .open = acpi_processor_info_open_fs, 119 - .read = seq_read, 120 - .llseek = seq_lseek, 121 - .release = single_release, 122 - }; 123 - #endif 124 - 125 - DEFINE_PER_CPU(struct acpi_processor *, processors); 126 - EXPORT_PER_CPU_SYMBOL(processors); 127 - 128 - struct acpi_processor_errata errata __read_mostly; 129 - 130 - /* -------------------------------------------------------------------------- 131 - Errata Handling 132 - -------------------------------------------------------------------------- */ 133 - 134 - static int acpi_processor_errata_piix4(struct pci_dev *dev) 135 - { 136 - u8 value1 = 0; 137 - u8 value2 = 0; 138 - 139 - 140 - if (!dev) 141 - return -EINVAL; 142 - 143 - /* 144 - * Note that 'dev' references the PIIX4 ACPI Controller. 145 - */ 146 - 147 - switch (dev->revision) { 148 - case 0: 149 - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n")); 150 - break; 151 - case 1: 152 - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n")); 153 - break; 154 - case 2: 155 - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n")); 156 - break; 157 - case 3: 158 - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n")); 159 - break; 160 - default: 161 - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n")); 162 - break; 163 - } 164 - 165 - switch (dev->revision) { 166 - 167 - case 0: /* PIIX4 A-step */ 168 - case 1: /* PIIX4 B-step */ 169 - /* 170 - * See specification changes #13 ("Manual Throttle Duty Cycle") 171 - * and #14 ("Enabling and Disabling Manual Throttle"), plus 172 - * erratum #5 ("STPCLK# Deassertion Time") from the January 173 - * 2002 PIIX4 specification update. Applies to only older 174 - * PIIX4 models. 175 - */ 176 - errata.piix4.throttle = 1; 177 - 178 - case 2: /* PIIX4E */ 179 - case 3: /* PIIX4M */ 180 - /* 181 - * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA 182 - * Livelock") from the January 2002 PIIX4 specification update. 183 - * Applies to all PIIX4 models. 184 - */ 185 - 186 - /* 187 - * BM-IDE 188 - * ------ 189 - * Find the PIIX4 IDE Controller and get the Bus Master IDE 190 - * Status register address. We'll use this later to read 191 - * each IDE controller's DMA status to make sure we catch all 192 - * DMA activity. 193 - */ 194 - dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 195 - PCI_DEVICE_ID_INTEL_82371AB, 196 - PCI_ANY_ID, PCI_ANY_ID, NULL); 197 - if (dev) { 198 - errata.piix4.bmisx = pci_resource_start(dev, 4); 199 - pci_dev_put(dev); 200 - } 201 - 202 - /* 203 - * Type-F DMA 204 - * ---------- 205 - * Find the PIIX4 ISA Controller and read the Motherboard 206 - * DMA controller's status to see if Type-F (Fast) DMA mode 207 - * is enabled (bit 7) on either channel. Note that we'll 208 - * disable C3 support if this is enabled, as some legacy 209 - * devices won't operate well if fast DMA is disabled. 210 - */ 211 - dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 212 - PCI_DEVICE_ID_INTEL_82371AB_0, 213 - PCI_ANY_ID, PCI_ANY_ID, NULL); 214 - if (dev) { 215 - pci_read_config_byte(dev, 0x76, &value1); 216 - pci_read_config_byte(dev, 0x77, &value2); 217 - if ((value1 & 0x80) || (value2 & 0x80)) 218 - errata.piix4.fdma = 1; 219 - pci_dev_put(dev); 220 - } 221 - 222 - break; 223 - } 224 - 225 - if (errata.piix4.bmisx) 226 - ACPI_DEBUG_PRINT((ACPI_DB_INFO, 227 - "Bus master activity detection (BM-IDE) erratum enabled\n")); 228 - if (errata.piix4.fdma) 229 - ACPI_DEBUG_PRINT((ACPI_DB_INFO, 230 - "Type-F DMA livelock erratum (C3 disabled)\n")); 231 - 232 - return 0; 233 - } 234 - 235 - static int acpi_processor_errata(struct acpi_processor *pr) 236 - { 237 - int result = 0; 238 - struct pci_dev *dev = NULL; 239 - 240 - 241 - if (!pr) 242 - return -EINVAL; 243 - 244 - /* 245 - * PIIX4 246 - */ 247 - dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 248 - PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID, 249 - PCI_ANY_ID, NULL); 250 - if (dev) { 251 - result = acpi_processor_errata_piix4(dev); 252 - pci_dev_put(dev); 253 - } 254 - 255 - return result; 256 - } 257 - 258 - /* -------------------------------------------------------------------------- 259 - FS Interface (/proc) 260 - -------------------------------------------------------------------------- */ 261 - 262 - #ifdef CONFIG_ACPI_PROCFS 263 - static struct proc_dir_entry *acpi_processor_dir = NULL; 264 - 265 - static int acpi_processor_info_seq_show(struct seq_file *seq, void *offset) 266 - { 267 - struct acpi_processor *pr = seq->private; 268 - 269 - 270 - if (!pr) 271 - goto end; 272 - 273 - seq_printf(seq, "processor id: %d\n" 274 - "acpi id: %d\n" 275 - "bus mastering control: %s\n" 276 - "power management: %s\n" 277 - "throttling control: %s\n" 278 - "limit interface: %s\n", 279 - pr->id, 280 - pr->acpi_id, 281 - pr->flags.bm_control ? "yes" : "no", 282 - pr->flags.power ? "yes" : "no", 283 - pr->flags.throttling ? "yes" : "no", 284 - pr->flags.limit ? "yes" : "no"); 285 - 286 - end: 287 - return 0; 288 - } 289 - 290 - static int acpi_processor_info_open_fs(struct inode *inode, struct file *file) 291 - { 292 - return single_open(file, acpi_processor_info_seq_show, 293 - PDE(inode)->data); 294 - } 295 - 296 - static int __cpuinit acpi_processor_add_fs(struct acpi_device *device) 297 - { 298 - struct proc_dir_entry *entry = NULL; 299 - 300 - 301 - if (!acpi_device_dir(device)) { 302 - acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), 303 - acpi_processor_dir); 304 - if (!acpi_device_dir(device)) 305 - return -ENODEV; 306 - } 307 - 308 - /* 'info' [R] */ 309 - entry = proc_create_data(ACPI_PROCESSOR_FILE_INFO, 310 - S_IRUGO, acpi_device_dir(device), 311 - &acpi_processor_info_fops, 312 - acpi_driver_data(device)); 313 - if (!entry) 314 - return -EIO; 315 - 316 - /* 'throttling' [R/W] */ 317 - entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING, 318 - S_IFREG | S_IRUGO | S_IWUSR, 319 - acpi_device_dir(device), 320 - &acpi_processor_throttling_fops, 321 - acpi_driver_data(device)); 322 - if (!entry) 323 - return -EIO; 324 - 325 - /* 'limit' [R/W] */ 326 - entry = proc_create_data(ACPI_PROCESSOR_FILE_LIMIT, 327 - S_IFREG | S_IRUGO | S_IWUSR, 328 - acpi_device_dir(device), 329 - &acpi_processor_limit_fops, 330 - acpi_driver_data(device)); 331 - if (!entry) 332 - return -EIO; 333 - return 0; 334 - } 335 - static int acpi_processor_remove_fs(struct acpi_device *device) 336 - { 337 - 338 - if (acpi_device_dir(device)) { 339 - remove_proc_entry(ACPI_PROCESSOR_FILE_INFO, 340 - acpi_device_dir(device)); 341 - remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING, 342 - acpi_device_dir(device)); 343 - remove_proc_entry(ACPI_PROCESSOR_FILE_LIMIT, 344 - acpi_device_dir(device)); 345 - remove_proc_entry(acpi_device_bid(device), acpi_processor_dir); 346 - acpi_device_dir(device) = NULL; 347 - } 348 - 349 - return 0; 350 - } 351 - #else 352 - static inline int acpi_processor_add_fs(struct acpi_device *device) 353 - { 354 - return 0; 355 - } 356 - static inline int acpi_processor_remove_fs(struct acpi_device *device) 357 - { 358 - return 0; 359 - } 360 - #endif 361 - 362 - /* Use the acpiid in MADT to map cpus in case of SMP */ 363 - 364 - #ifndef CONFIG_SMP 365 - static int get_cpu_id(acpi_handle handle, int type, u32 acpi_id) { return -1; } 366 - #else 367 - 368 - static struct acpi_table_madt *madt; 369 - 45 + #ifdef CONFIG_SMP 370 46 static int map_lapic_id(struct acpi_subtable_header *entry, 371 47 u32 acpi_id, int *apic_id) 372 48 { 373 49 struct acpi_madt_local_apic *lapic = 374 50 (struct acpi_madt_local_apic *)entry; 375 - if ((lapic->lapic_flags & ACPI_MADT_ENABLED) && 376 - lapic->processor_id == acpi_id) { 377 - *apic_id = lapic->id; 378 - return 1; 379 - } 380 - return 0; 51 + 52 + if (!(lapic->lapic_flags & ACPI_MADT_ENABLED)) 53 + return 0; 54 + 55 + if (lapic->processor_id != acpi_id) 56 + return 0; 57 + 58 + *apic_id = lapic->id; 59 + return 1; 381 60 } 382 61 383 62 static int map_x2apic_id(struct acpi_subtable_header *entry, ··· 64 385 { 65 386 struct acpi_madt_local_x2apic *apic = 66 387 (struct acpi_madt_local_x2apic *)entry; 67 - u32 tmp = apic->local_apic_id; 68 388 69 - /* Only check enabled APICs*/ 70 389 if (!(apic->lapic_flags & ACPI_MADT_ENABLED)) 71 390 return 0; 72 391 73 - /* Device statement declaration type */ 74 - if (device_declaration) { 75 - if (apic->uid == acpi_id) 76 - goto found; 392 + if (device_declaration && (apic->uid == acpi_id)) { 393 + *apic_id = apic->local_apic_id; 394 + return 1; 77 395 } 78 396 79 397 return 0; 80 - found: 81 - *apic_id = tmp; 82 - return 1; 83 398 } 84 399 85 400 static int map_lsapic_id(struct acpi_subtable_header *entry, ··· 81 408 { 82 409 struct acpi_madt_local_sapic *lsapic = 83 410 (struct acpi_madt_local_sapic *)entry; 84 - u32 tmp = (lsapic->id << 8) | lsapic->eid; 85 411 86 - /* Only check enabled APICs*/ 87 412 if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED)) 88 413 return 0; 89 414 90 - /* Device statement declaration type */ 91 415 if (device_declaration) { 92 - if (entry->length < 16) 93 - printk(KERN_ERR PREFIX 94 - "Invalid LSAPIC with Device type processor (SAPIC ID %#x)\n", 95 - tmp); 96 - else if (lsapic->uid == acpi_id) 97 - goto found; 98 - /* Processor statement declaration type */ 99 - } else if (lsapic->processor_id == acpi_id) 100 - goto found; 416 + if ((entry->length < 16) || (lsapic->uid != acpi_id)) 417 + return 0; 418 + } else if (lsapic->processor_id != acpi_id) 419 + return 0; 101 420 102 - return 0; 103 - found: 104 - *apic_id = tmp; 421 + *apic_id = (lsapic->id << 8) | lsapic->eid; 105 422 return 1; 106 423 } 107 424 108 425 static int map_madt_entry(int type, u32 acpi_id) 109 426 { 110 427 unsigned long madt_end, entry; 428 + static struct acpi_table_madt *madt; 429 + static int read_madt; 111 430 int apic_id = -1; 431 + 432 + if (!read_madt) { 433 + if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0, 434 + (struct acpi_table_header **)&madt))) 435 + madt = NULL; 436 + read_madt++; 437 + } 112 438 113 439 if (!madt) 114 440 return apic_id; ··· 168 496 return apic_id; 169 497 } 170 498 171 - static int get_cpu_id(acpi_handle handle, int type, u32 acpi_id) 499 + int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) 172 500 { 173 501 int i; 174 502 int apic_id = -1; ··· 185 513 } 186 514 return -1; 187 515 } 516 + EXPORT_SYMBOL_GPL(acpi_get_cpuid); 188 517 #endif 189 518 190 - /* -------------------------------------------------------------------------- 191 - Driver Interface 192 - -------------------------------------------------------------------------- */ 193 - 194 - static int acpi_processor_get_info(struct acpi_device *device) 519 + static bool processor_physically_present(acpi_handle handle) 195 520 { 196 - acpi_status status = 0; 521 + int cpuid, type; 522 + u32 acpi_id; 523 + acpi_status status; 524 + acpi_object_type acpi_type; 525 + unsigned long long tmp; 197 526 union acpi_object object = { 0 }; 198 527 struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 199 - struct acpi_processor *pr; 200 - int cpu_index, device_declaration = 0; 201 - static int cpu0_initialized; 202 528 203 - pr = acpi_driver_data(device); 204 - if (!pr) 205 - return -EINVAL; 206 - 207 - if (num_online_cpus() > 1) 208 - errata.smp = TRUE; 209 - 210 - acpi_processor_errata(pr); 211 - 212 - /* 213 - * Check to see if we have bus mastering arbitration control. This 214 - * is required for proper C3 usage (to maintain cache coherency). 215 - */ 216 - if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) { 217 - pr->flags.bm_control = 1; 218 - ACPI_DEBUG_PRINT((ACPI_DB_INFO, 219 - "Bus mastering arbitration control present\n")); 220 - } else 221 - ACPI_DEBUG_PRINT((ACPI_DB_INFO, 222 - "No bus mastering arbitration control\n")); 223 - 224 - if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) { 225 - /* Declared with "Processor" statement; match ProcessorID */ 226 - status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); 227 - if (ACPI_FAILURE(status)) { 228 - printk(KERN_ERR PREFIX "Evaluating processor object\n"); 229 - return -ENODEV; 230 - } 231 - 232 - /* 233 - * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP. 234 - * >>> 'acpi_get_processor_id(acpi_id, &id)' in 235 - * arch/xxx/acpi.c 236 - */ 237 - pr->acpi_id = object.processor.proc_id; 238 - } else { 239 - /* 240 - * Declared with "Device" statement; match _UID. 241 - * Note that we don't handle string _UIDs yet. 242 - */ 243 - unsigned long long value; 244 - status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID, 245 - NULL, &value); 246 - if (ACPI_FAILURE(status)) { 247 - printk(KERN_ERR PREFIX 248 - "Evaluating processor _UID [%#x]\n", status); 249 - return -ENODEV; 250 - } 251 - device_declaration = 1; 252 - pr->acpi_id = value; 253 - } 254 - cpu_index = get_cpu_id(pr->handle, device_declaration, pr->acpi_id); 255 - 256 - /* Handle UP system running SMP kernel, with no LAPIC in MADT */ 257 - if (!cpu0_initialized && (cpu_index == -1) && 258 - (num_online_cpus() == 1)) { 259 - cpu_index = 0; 260 - } 261 - 262 - cpu0_initialized = 1; 263 - 264 - pr->id = cpu_index; 265 - 266 - /* 267 - * Extra Processor objects may be enumerated on MP systems with 268 - * less than the max # of CPUs. They should be ignored _iff 269 - * they are physically not present. 270 - */ 271 - if (pr->id == -1) { 272 - if (ACPI_FAILURE 273 - (acpi_processor_hotadd_init(pr->handle, &pr->id))) { 274 - return -ENODEV; 275 - } 276 - } 277 - /* 278 - * On some boxes several processors use the same processor bus id. 279 - * But they are located in different scope. For example: 280 - * \_SB.SCK0.CPU0 281 - * \_SB.SCK1.CPU0 282 - * Rename the processor device bus id. And the new bus id will be 283 - * generated as the following format: 284 - * CPU+CPU ID. 285 - */ 286 - sprintf(acpi_device_bid(device), "CPU%X", pr->id); 287 - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id, 288 - pr->acpi_id)); 289 - 290 - if (!object.processor.pblk_address) 291 - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n")); 292 - else if (object.processor.pblk_length != 6) 293 - printk(KERN_ERR PREFIX "Invalid PBLK length [%d]\n", 294 - object.processor.pblk_length); 295 - else { 296 - pr->throttling.address = object.processor.pblk_address; 297 - pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset; 298 - pr->throttling.duty_width = acpi_gbl_FADT.duty_width; 299 - 300 - pr->pblk = object.processor.pblk_address; 301 - 302 - /* 303 - * We don't care about error returns - we just try to mark 304 - * these reserved so that nobody else is confused into thinking 305 - * that this region might be unused.. 306 - * 307 - * (In particular, allocating the IO range for Cardbus) 308 - */ 309 - request_region(pr->throttling.address, 6, "ACPI CPU throttle"); 310 - } 311 - 312 - /* 313 - * If ACPI describes a slot number for this CPU, we can use it 314 - * ensure we get the right value in the "physical id" field 315 - * of /proc/cpuinfo 316 - */ 317 - status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer); 318 - if (ACPI_SUCCESS(status)) 319 - arch_fix_phys_package_id(pr->id, object.integer.value); 320 - 321 - return 0; 322 - } 323 - 324 - static DEFINE_PER_CPU(void *, processor_device_array); 325 - 326 - static void acpi_processor_notify(struct acpi_device *device, u32 event) 327 - { 328 - struct acpi_processor *pr = acpi_driver_data(device); 329 - int saved; 330 - 331 - if (!pr) 332 - return; 333 - 334 - switch (event) { 335 - case ACPI_PROCESSOR_NOTIFY_PERFORMANCE: 336 - saved = pr->performance_platform_limit; 337 - acpi_processor_ppc_has_changed(pr, 1); 338 - if (saved == pr->performance_platform_limit) 339 - break; 340 - acpi_bus_generate_proc_event(device, event, 341 - pr->performance_platform_limit); 342 - acpi_bus_generate_netlink_event(device->pnp.device_class, 343 - dev_name(&device->dev), event, 344 - pr->performance_platform_limit); 345 - break; 346 - case ACPI_PROCESSOR_NOTIFY_POWER: 347 - acpi_processor_cst_has_changed(pr); 348 - acpi_bus_generate_proc_event(device, event, 0); 349 - acpi_bus_generate_netlink_event(device->pnp.device_class, 350 - dev_name(&device->dev), event, 0); 351 - break; 352 - case ACPI_PROCESSOR_NOTIFY_THROTTLING: 353 - acpi_processor_tstate_has_changed(pr); 354 - acpi_bus_generate_proc_event(device, event, 0); 355 - acpi_bus_generate_netlink_event(device->pnp.device_class, 356 - dev_name(&device->dev), event, 0); 357 - default: 358 - ACPI_DEBUG_PRINT((ACPI_DB_INFO, 359 - "Unsupported event [0x%x]\n", event)); 360 - break; 361 - } 362 - 363 - return; 364 - } 365 - 366 - static int acpi_cpu_soft_notify(struct notifier_block *nfb, 367 - unsigned long action, void *hcpu) 368 - { 369 - unsigned int cpu = (unsigned long)hcpu; 370 - struct acpi_processor *pr = per_cpu(processors, cpu); 371 - 372 - if (action == CPU_ONLINE && pr) { 373 - acpi_processor_ppc_has_changed(pr, 0); 374 - acpi_processor_cst_has_changed(pr); 375 - acpi_processor_tstate_has_changed(pr); 376 - } 377 - return NOTIFY_OK; 378 - } 379 - 380 - static struct notifier_block acpi_cpu_notifier = 381 - { 382 - .notifier_call = acpi_cpu_soft_notify, 383 - }; 384 - 385 - static int __cpuinit acpi_processor_add(struct acpi_device *device) 386 - { 387 - struct acpi_processor *pr = NULL; 388 - int result = 0; 389 - struct sys_device *sysdev; 390 - 391 - pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL); 392 - if (!pr) 393 - return -ENOMEM; 394 - 395 - if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { 396 - kfree(pr); 397 - return -ENOMEM; 398 - } 399 - 400 - pr->handle = device->handle; 401 - strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); 402 - strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); 403 - device->driver_data = pr; 404 - 405 - result = acpi_processor_get_info(device); 406 - if (result) { 407 - /* Processor is physically not present */ 408 - return 0; 409 - } 410 - 411 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0)); 412 - 413 - /* 414 - * Buggy BIOS check 415 - * ACPI id of processors can be reported wrongly by the BIOS. 416 - * Don't trust it blindly 417 - */ 418 - if (per_cpu(processor_device_array, pr->id) != NULL && 419 - per_cpu(processor_device_array, pr->id) != device) { 420 - printk(KERN_WARNING "BIOS reported wrong ACPI id " 421 - "for the processor\n"); 422 - result = -ENODEV; 423 - goto err_free_cpumask; 424 - } 425 - per_cpu(processor_device_array, pr->id) = device; 426 - 427 - per_cpu(processors, pr->id) = pr; 428 - 429 - result = acpi_processor_add_fs(device); 430 - if (result) 431 - goto err_free_cpumask; 432 - 433 - sysdev = get_cpu_sysdev(pr->id); 434 - if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) { 435 - result = -EFAULT; 436 - goto err_remove_fs; 437 - } 438 - 439 - /* _PDC call should be done before doing anything else (if reqd.). */ 440 - acpi_processor_set_pdc(pr->handle); 441 - 442 - #ifdef CONFIG_CPU_FREQ 443 - acpi_processor_ppc_has_changed(pr, 0); 444 - #endif 445 - acpi_processor_get_throttling_info(pr); 446 - acpi_processor_get_limit_info(pr); 447 - 448 - 449 - acpi_processor_power_init(pr, device); 450 - 451 - pr->cdev = thermal_cooling_device_register("Processor", device, 452 - &processor_cooling_ops); 453 - if (IS_ERR(pr->cdev)) { 454 - result = PTR_ERR(pr->cdev); 455 - goto err_power_exit; 456 - } 457 - 458 - dev_dbg(&device->dev, "registered as cooling_device%d\n", 459 - pr->cdev->id); 460 - 461 - result = sysfs_create_link(&device->dev.kobj, 462 - &pr->cdev->device.kobj, 463 - "thermal_cooling"); 464 - if (result) { 465 - printk(KERN_ERR PREFIX "Create sysfs link\n"); 466 - goto err_thermal_unregister; 467 - } 468 - result = sysfs_create_link(&pr->cdev->device.kobj, 469 - &device->dev.kobj, 470 - "device"); 471 - if (result) { 472 - printk(KERN_ERR PREFIX "Create sysfs link\n"); 473 - goto err_remove_sysfs; 474 - } 475 - 476 - return 0; 477 - 478 - err_remove_sysfs: 479 - sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); 480 - err_thermal_unregister: 481 - thermal_cooling_device_unregister(pr->cdev); 482 - err_power_exit: 483 - acpi_processor_power_exit(pr, device); 484 - err_remove_fs: 485 - acpi_processor_remove_fs(device); 486 - err_free_cpumask: 487 - free_cpumask_var(pr->throttling.shared_cpu_map); 488 - 489 - return result; 490 - } 491 - 492 - static int acpi_processor_remove(struct acpi_device *device, int type) 493 - { 494 - struct acpi_processor *pr = NULL; 495 - 496 - 497 - if (!device || !acpi_driver_data(device)) 498 - return -EINVAL; 499 - 500 - pr = acpi_driver_data(device); 501 - 502 - if (pr->id >= nr_cpu_ids) 503 - goto free; 504 - 505 - if (type == ACPI_BUS_REMOVAL_EJECT) { 506 - if (acpi_processor_handle_eject(pr)) 507 - return -EINVAL; 508 - } 509 - 510 - acpi_processor_power_exit(pr, device); 511 - 512 - sysfs_remove_link(&device->dev.kobj, "sysdev"); 513 - 514 - acpi_processor_remove_fs(device); 515 - 516 - if (pr->cdev) { 517 - sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); 518 - sysfs_remove_link(&pr->cdev->device.kobj, "device"); 519 - thermal_cooling_device_unregister(pr->cdev); 520 - pr->cdev = NULL; 521 - } 522 - 523 - per_cpu(processors, pr->id) = NULL; 524 - per_cpu(processor_device_array, pr->id) = NULL; 525 - 526 - free: 527 - free_cpumask_var(pr->throttling.shared_cpu_map); 528 - kfree(pr); 529 - 530 - return 0; 531 - } 532 - 533 - #ifdef CONFIG_ACPI_HOTPLUG_CPU 534 - /**************************************************************************** 535 - * Acpi processor hotplug support * 536 - ****************************************************************************/ 537 - 538 - static int is_processor_present(acpi_handle handle) 539 - { 540 - acpi_status status; 541 - unsigned long long sta = 0; 542 - 543 - 544 - status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); 545 - 546 - if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT)) 547 - return 1; 548 - 549 - /* 550 - * _STA is mandatory for a processor that supports hot plug 551 - */ 552 - if (status == AE_NOT_FOUND) 553 - ACPI_DEBUG_PRINT((ACPI_DB_INFO, 554 - "Processor does not support hot plug\n")); 555 - else 556 - ACPI_EXCEPTION((AE_INFO, status, 557 - "Processor Device is not present")); 558 - return 0; 559 - } 560 - 561 - static 562 - int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device) 563 - { 564 - acpi_handle phandle; 565 - struct acpi_device *pdev; 566 - 567 - 568 - if (acpi_get_parent(handle, &phandle)) { 569 - return -ENODEV; 570 - } 571 - 572 - if (acpi_bus_get_device(phandle, &pdev)) { 573 - return -ENODEV; 574 - } 575 - 576 - if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_PROCESSOR)) { 577 - return -ENODEV; 578 - } 579 - 580 - return 0; 581 - } 582 - 583 - static void __ref acpi_processor_hotplug_notify(acpi_handle handle, 584 - u32 event, void *data) 585 - { 586 - struct acpi_processor *pr; 587 - struct acpi_device *device = NULL; 588 - int result; 589 - 590 - 591 - switch (event) { 592 - case ACPI_NOTIFY_BUS_CHECK: 593 - case ACPI_NOTIFY_DEVICE_CHECK: 594 - ACPI_DEBUG_PRINT((ACPI_DB_INFO, 595 - "Processor driver received %s event\n", 596 - (event == ACPI_NOTIFY_BUS_CHECK) ? 597 - "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK")); 598 - 599 - if (!is_processor_present(handle)) 600 - break; 601 - 602 - if (acpi_bus_get_device(handle, &device)) { 603 - result = acpi_processor_device_add(handle, &device); 604 - if (result) 605 - printk(KERN_ERR PREFIX 606 - "Unable to add the device\n"); 607 - break; 608 - } 609 - break; 610 - case ACPI_NOTIFY_EJECT_REQUEST: 611 - ACPI_DEBUG_PRINT((ACPI_DB_INFO, 612 - "received ACPI_NOTIFY_EJECT_REQUEST\n")); 613 - 614 - if (acpi_bus_get_device(handle, &device)) { 615 - printk(KERN_ERR PREFIX 616 - "Device don't exist, dropping EJECT\n"); 617 - break; 618 - } 619 - pr = acpi_driver_data(device); 620 - if (!pr) { 621 - printk(KERN_ERR PREFIX 622 - "Driver data is NULL, dropping EJECT\n"); 623 - return; 624 - } 625 - break; 626 - default: 627 - ACPI_DEBUG_PRINT((ACPI_DB_INFO, 628 - "Unsupported event [0x%x]\n", event)); 629 - break; 630 - } 631 - 632 - return; 633 - } 634 - 635 - static acpi_status 636 - processor_walk_namespace_cb(acpi_handle handle, 637 - u32 lvl, void *context, void **rv) 638 - { 639 - acpi_status status; 640 - int *action = context; 641 - acpi_object_type type = 0; 642 - 643 - status = acpi_get_type(handle, &type); 529 + status = acpi_get_type(handle, &acpi_type); 644 530 if (ACPI_FAILURE(status)) 645 - return (AE_OK); 531 + return false; 646 532 647 - if (type != ACPI_TYPE_PROCESSOR) 648 - return (AE_OK); 649 - 650 - switch (*action) { 651 - case INSTALL_NOTIFY_HANDLER: 652 - acpi_install_notify_handler(handle, 653 - ACPI_SYSTEM_NOTIFY, 654 - acpi_processor_hotplug_notify, 655 - NULL); 533 + switch (acpi_type) { 534 + case ACPI_TYPE_PROCESSOR: 535 + status = acpi_evaluate_object(handle, NULL, NULL, &buffer); 536 + if (ACPI_FAILURE(status)) 537 + return false; 538 + acpi_id = object.processor.proc_id; 656 539 break; 657 - case UNINSTALL_NOTIFY_HANDLER: 658 - acpi_remove_notify_handler(handle, 659 - ACPI_SYSTEM_NOTIFY, 660 - acpi_processor_hotplug_notify); 540 + case ACPI_TYPE_DEVICE: 541 + status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp); 542 + if (ACPI_FAILURE(status)) 543 + return false; 544 + acpi_id = tmp; 661 545 break; 662 546 default: 663 - break; 547 + return false; 664 548 } 665 549 666 - return (AE_OK); 550 + type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0; 551 + cpuid = acpi_get_cpuid(handle, type, acpi_id); 552 + 553 + if (cpuid == -1) 554 + return false; 555 + 556 + return true; 667 557 } 668 558 669 - static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) 559 + static void acpi_set_pdc_bits(u32 *buf) 670 560 { 561 + buf[0] = ACPI_PDC_REVISION_ID; 562 + buf[1] = 1; 671 563 672 - if (!is_processor_present(handle)) { 673 - return AE_ERROR; 564 + /* Enable coordination with firmware's _TSD info */ 565 + buf[2] = ACPI_PDC_SMP_T_SWCOORD; 566 + 567 + /* Twiddle arch-specific bits needed for _PDC */ 568 + arch_acpi_set_pdc_bits(buf); 569 + } 570 + 571 + static struct acpi_object_list *acpi_processor_alloc_pdc(void) 572 + { 573 + struct acpi_object_list *obj_list; 574 + union acpi_object *obj; 575 + u32 *buf; 576 + 577 + /* allocate and initialize pdc. It will be used later. */ 578 + obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL); 579 + if (!obj_list) { 580 + printk(KERN_ERR "Memory allocation error\n"); 581 + return NULL; 674 582 } 675 583 676 - if (acpi_map_lsapic(handle, p_cpu)) 677 - return AE_ERROR; 678 - 679 - if (arch_register_cpu(*p_cpu)) { 680 - acpi_unmap_lsapic(*p_cpu); 681 - return AE_ERROR; 584 + obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL); 585 + if (!obj) { 586 + printk(KERN_ERR "Memory allocation error\n"); 587 + kfree(obj_list); 588 + return NULL; 682 589 } 683 590 684 - return AE_OK; 685 - } 591 + buf = kmalloc(12, GFP_KERNEL); 592 + if (!buf) { 593 + printk(KERN_ERR "Memory allocation error\n"); 594 + kfree(obj); 595 + kfree(obj_list); 596 + return NULL; 597 + } 686 598 687 - static int acpi_processor_handle_eject(struct acpi_processor *pr) 688 - { 689 - if (cpu_online(pr->id)) 690 - cpu_down(pr->id); 599 + acpi_set_pdc_bits(buf); 691 600 692 - arch_unregister_cpu(pr->id); 693 - acpi_unmap_lsapic(pr->id); 694 - return (0); 695 - } 696 - #else 697 - static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) 698 - { 699 - return AE_ERROR; 700 - } 701 - static int acpi_processor_handle_eject(struct acpi_processor *pr) 702 - { 703 - return (-EINVAL); 704 - } 705 - #endif 601 + obj->type = ACPI_TYPE_BUFFER; 602 + obj->buffer.length = 12; 603 + obj->buffer.pointer = (u8 *) buf; 604 + obj_list->count = 1; 605 + obj_list->pointer = obj; 706 606 707 - static 708 - void acpi_processor_install_hotplug_notify(void) 709 - { 710 - #ifdef CONFIG_ACPI_HOTPLUG_CPU 711 - int action = INSTALL_NOTIFY_HANDLER; 712 - acpi_walk_namespace(ACPI_TYPE_PROCESSOR, 713 - ACPI_ROOT_OBJECT, 714 - ACPI_UINT32_MAX, 715 - processor_walk_namespace_cb, NULL, &action, NULL); 716 - #endif 717 - register_hotcpu_notifier(&acpi_cpu_notifier); 718 - } 719 - 720 - static 721 - void acpi_processor_uninstall_hotplug_notify(void) 722 - { 723 - #ifdef CONFIG_ACPI_HOTPLUG_CPU 724 - int action = UNINSTALL_NOTIFY_HANDLER; 725 - acpi_walk_namespace(ACPI_TYPE_PROCESSOR, 726 - ACPI_ROOT_OBJECT, 727 - ACPI_UINT32_MAX, 728 - processor_walk_namespace_cb, NULL, &action, NULL); 729 - #endif 730 - unregister_hotcpu_notifier(&acpi_cpu_notifier); 607 + return obj_list; 731 608 } 732 609 733 610 /* 734 - * We keep the driver loaded even when ACPI is not running. 735 - * This is needed for the powernow-k8 driver, that works even without 736 - * ACPI, but needs symbols from this driver 611 + * _PDC is required for a BIOS-OS handshake for most of the newer 612 + * ACPI processor features. 737 613 */ 738 - 739 - static int __init acpi_processor_init(void) 614 + static int 615 + acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in) 740 616 { 741 - int result = 0; 617 + acpi_status status = AE_OK; 742 618 743 - if (acpi_disabled) 744 - return 0; 619 + if (idle_nomwait) { 620 + /* 621 + * If mwait is disabled for CPU C-states, the C2C3_FFH access 622 + * mode will be disabled in the parameter of _PDC object. 623 + * Of course C1_FFH access mode will also be disabled. 624 + */ 625 + union acpi_object *obj; 626 + u32 *buffer = NULL; 745 627 746 - memset(&errata, 0, sizeof(errata)); 628 + obj = pdc_in->pointer; 629 + buffer = (u32 *)(obj->buffer.pointer); 630 + buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH); 747 631 748 - #ifdef CONFIG_SMP 749 - if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0, 750 - (struct acpi_table_header **)&madt))) 751 - madt = NULL; 752 - #endif 753 - #ifdef CONFIG_ACPI_PROCFS 754 - acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir); 755 - if (!acpi_processor_dir) 756 - return -ENOMEM; 757 - #endif 758 - result = cpuidle_register_driver(&acpi_idle_driver); 759 - if (result < 0) 760 - goto out_proc; 632 + } 633 + status = acpi_evaluate_object(handle, "_PDC", pdc_in, NULL); 761 634 762 - result = acpi_bus_register_driver(&acpi_processor_driver); 763 - if (result < 0) 764 - goto out_cpuidle; 635 + if (ACPI_FAILURE(status)) 636 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, 637 + "Could not evaluate _PDC, using legacy perf. control.\n")); 765 638 766 - acpi_processor_install_hotplug_notify(); 767 - 768 - acpi_thermal_cpufreq_init(); 769 - 770 - acpi_processor_ppc_init(); 771 - 772 - acpi_processor_throttling_init(); 773 - 774 - return 0; 775 - 776 - out_cpuidle: 777 - cpuidle_unregister_driver(&acpi_idle_driver); 778 - 779 - out_proc: 780 - #ifdef CONFIG_ACPI_PROCFS 781 - remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); 782 - #endif 783 - 784 - return result; 639 + return status; 785 640 } 786 641 787 - static void __exit acpi_processor_exit(void) 642 + void acpi_processor_set_pdc(acpi_handle handle) 788 643 { 789 - if (acpi_disabled) 644 + struct acpi_object_list *obj_list; 645 + 646 + if (arch_has_acpi_pdc() == false) 790 647 return; 791 648 792 - acpi_processor_ppc_exit(); 649 + obj_list = acpi_processor_alloc_pdc(); 650 + if (!obj_list) 651 + return; 793 652 794 - acpi_thermal_cpufreq_exit(); 653 + acpi_processor_eval_pdc(handle, obj_list); 795 654 796 - acpi_processor_uninstall_hotplug_notify(); 655 + kfree(obj_list->pointer->buffer.pointer); 656 + kfree(obj_list->pointer); 657 + kfree(obj_list); 658 + } 659 + EXPORT_SYMBOL_GPL(acpi_processor_set_pdc); 797 660 798 - acpi_bus_unregister_driver(&acpi_processor_driver); 661 + static acpi_status 662 + early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv) 663 + { 664 + if (processor_physically_present(handle) == false) 665 + return AE_OK; 799 666 800 - cpuidle_unregister_driver(&acpi_idle_driver); 801 - 802 - #ifdef CONFIG_ACPI_PROCFS 803 - remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); 804 - #endif 805 - 806 - return; 667 + acpi_processor_set_pdc(handle); 668 + return AE_OK; 807 669 } 808 670 809 - module_init(acpi_processor_init); 810 - module_exit(acpi_processor_exit); 671 + void __init acpi_early_processor_set_pdc(void) 672 + { 673 + /* 674 + * Check whether the system is DMI table. If yes, OSPM 675 + * should not use mwait for CPU-states. 676 + */ 677 + dmi_check_system(processor_idle_dmi_table); 811 678 812 - EXPORT_SYMBOL(acpi_processor_set_thermal_limit); 813 - 814 - MODULE_ALIAS("processor"); 679 + acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, 680 + ACPI_UINT32_MAX, 681 + early_init_pdc, NULL, NULL, NULL); 682 + }
+978
drivers/acpi/processor_driver.c
··· 1 + /* 2 + * acpi_processor.c - ACPI Processor Driver ($Revision: 71 $) 3 + * 4 + * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 + * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 + * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de> 7 + * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 + * - Added processor hotplug support 9 + * 10 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 11 + * 12 + * This program is free software; you can redistribute it and/or modify 13 + * it under the terms of the GNU General Public License as published by 14 + * the Free Software Foundation; either version 2 of the License, or (at 15 + * your option) any later version. 16 + * 17 + * This program is distributed in the hope that it will be useful, but 18 + * WITHOUT ANY WARRANTY; without even the implied warranty of 19 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 + * General Public License for more details. 21 + * 22 + * You should have received a copy of the GNU General Public License along 23 + * with this program; if not, write to the Free Software Foundation, Inc., 24 + * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 25 + * 26 + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 27 + * TBD: 28 + * 1. Make # power states dynamic. 29 + * 2. Support duty_cycle values that span bit 4. 30 + * 3. Optimize by having scheduler determine business instead of 31 + * having us try to calculate it here. 32 + * 4. Need C1 timing -- must modify kernel (IRQ handler) to get this. 33 + */ 34 + 35 + #include <linux/kernel.h> 36 + #include <linux/module.h> 37 + #include <linux/init.h> 38 + #include <linux/types.h> 39 + #include <linux/pci.h> 40 + #include <linux/pm.h> 41 + #include <linux/cpufreq.h> 42 + #include <linux/cpu.h> 43 + #include <linux/proc_fs.h> 44 + #include <linux/seq_file.h> 45 + #include <linux/dmi.h> 46 + #include <linux/moduleparam.h> 47 + #include <linux/cpuidle.h> 48 + 49 + #include <asm/io.h> 50 + #include <asm/system.h> 51 + #include <asm/cpu.h> 52 + #include <asm/delay.h> 53 + #include <asm/uaccess.h> 54 + #include <asm/processor.h> 55 + #include <asm/smp.h> 56 + #include <asm/acpi.h> 57 + 58 + #include <acpi/acpi_bus.h> 59 + #include <acpi/acpi_drivers.h> 60 + #include <acpi/processor.h> 61 + 62 + #define PREFIX "ACPI: " 63 + 64 + #define ACPI_PROCESSOR_CLASS "processor" 65 + #define ACPI_PROCESSOR_DEVICE_NAME "Processor" 66 + #define ACPI_PROCESSOR_FILE_INFO "info" 67 + #define ACPI_PROCESSOR_FILE_THROTTLING "throttling" 68 + #define ACPI_PROCESSOR_FILE_LIMIT "limit" 69 + #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80 70 + #define ACPI_PROCESSOR_NOTIFY_POWER 0x81 71 + #define ACPI_PROCESSOR_NOTIFY_THROTTLING 0x82 72 + 73 + #define ACPI_PROCESSOR_LIMIT_USER 0 74 + #define ACPI_PROCESSOR_LIMIT_THERMAL 1 75 + 76 + #define _COMPONENT ACPI_PROCESSOR_COMPONENT 77 + ACPI_MODULE_NAME("processor_driver"); 78 + 79 + MODULE_AUTHOR("Paul Diefenbaugh"); 80 + MODULE_DESCRIPTION("ACPI Processor Driver"); 81 + MODULE_LICENSE("GPL"); 82 + 83 + static int acpi_processor_add(struct acpi_device *device); 84 + static int acpi_processor_remove(struct acpi_device *device, int type); 85 + #ifdef CONFIG_ACPI_PROCFS 86 + static int acpi_processor_info_open_fs(struct inode *inode, struct file *file); 87 + #endif 88 + static void acpi_processor_notify(struct acpi_device *device, u32 event); 89 + static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu); 90 + static int acpi_processor_handle_eject(struct acpi_processor *pr); 91 + 92 + 93 + static const struct acpi_device_id processor_device_ids[] = { 94 + {ACPI_PROCESSOR_OBJECT_HID, 0}, 95 + {"ACPI0007", 0}, 96 + {"", 0}, 97 + }; 98 + MODULE_DEVICE_TABLE(acpi, processor_device_ids); 99 + 100 + static struct acpi_driver acpi_processor_driver = { 101 + .name = "processor", 102 + .class = ACPI_PROCESSOR_CLASS, 103 + .ids = processor_device_ids, 104 + .ops = { 105 + .add = acpi_processor_add, 106 + .remove = acpi_processor_remove, 107 + .suspend = acpi_processor_suspend, 108 + .resume = acpi_processor_resume, 109 + .notify = acpi_processor_notify, 110 + }, 111 + }; 112 + 113 + #define INSTALL_NOTIFY_HANDLER 1 114 + #define UNINSTALL_NOTIFY_HANDLER 2 115 + #ifdef CONFIG_ACPI_PROCFS 116 + static const struct file_operations acpi_processor_info_fops = { 117 + .owner = THIS_MODULE, 118 + .open = acpi_processor_info_open_fs, 119 + .read = seq_read, 120 + .llseek = seq_lseek, 121 + .release = single_release, 122 + }; 123 + #endif 124 + 125 + DEFINE_PER_CPU(struct acpi_processor *, processors); 126 + EXPORT_PER_CPU_SYMBOL(processors); 127 + 128 + struct acpi_processor_errata errata __read_mostly; 129 + 130 + /* -------------------------------------------------------------------------- 131 + Errata Handling 132 + -------------------------------------------------------------------------- */ 133 + 134 + static int acpi_processor_errata_piix4(struct pci_dev *dev) 135 + { 136 + u8 value1 = 0; 137 + u8 value2 = 0; 138 + 139 + 140 + if (!dev) 141 + return -EINVAL; 142 + 143 + /* 144 + * Note that 'dev' references the PIIX4 ACPI Controller. 145 + */ 146 + 147 + switch (dev->revision) { 148 + case 0: 149 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 A-step\n")); 150 + break; 151 + case 1: 152 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4 B-step\n")); 153 + break; 154 + case 2: 155 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4E\n")); 156 + break; 157 + case 3: 158 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found PIIX4M\n")); 159 + break; 160 + default: 161 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found unknown PIIX4\n")); 162 + break; 163 + } 164 + 165 + switch (dev->revision) { 166 + 167 + case 0: /* PIIX4 A-step */ 168 + case 1: /* PIIX4 B-step */ 169 + /* 170 + * See specification changes #13 ("Manual Throttle Duty Cycle") 171 + * and #14 ("Enabling and Disabling Manual Throttle"), plus 172 + * erratum #5 ("STPCLK# Deassertion Time") from the January 173 + * 2002 PIIX4 specification update. Applies to only older 174 + * PIIX4 models. 175 + */ 176 + errata.piix4.throttle = 1; 177 + 178 + case 2: /* PIIX4E */ 179 + case 3: /* PIIX4M */ 180 + /* 181 + * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA 182 + * Livelock") from the January 2002 PIIX4 specification update. 183 + * Applies to all PIIX4 models. 184 + */ 185 + 186 + /* 187 + * BM-IDE 188 + * ------ 189 + * Find the PIIX4 IDE Controller and get the Bus Master IDE 190 + * Status register address. We'll use this later to read 191 + * each IDE controller's DMA status to make sure we catch all 192 + * DMA activity. 193 + */ 194 + dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 195 + PCI_DEVICE_ID_INTEL_82371AB, 196 + PCI_ANY_ID, PCI_ANY_ID, NULL); 197 + if (dev) { 198 + errata.piix4.bmisx = pci_resource_start(dev, 4); 199 + pci_dev_put(dev); 200 + } 201 + 202 + /* 203 + * Type-F DMA 204 + * ---------- 205 + * Find the PIIX4 ISA Controller and read the Motherboard 206 + * DMA controller's status to see if Type-F (Fast) DMA mode 207 + * is enabled (bit 7) on either channel. Note that we'll 208 + * disable C3 support if this is enabled, as some legacy 209 + * devices won't operate well if fast DMA is disabled. 210 + */ 211 + dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 212 + PCI_DEVICE_ID_INTEL_82371AB_0, 213 + PCI_ANY_ID, PCI_ANY_ID, NULL); 214 + if (dev) { 215 + pci_read_config_byte(dev, 0x76, &value1); 216 + pci_read_config_byte(dev, 0x77, &value2); 217 + if ((value1 & 0x80) || (value2 & 0x80)) 218 + errata.piix4.fdma = 1; 219 + pci_dev_put(dev); 220 + } 221 + 222 + break; 223 + } 224 + 225 + if (errata.piix4.bmisx) 226 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, 227 + "Bus master activity detection (BM-IDE) erratum enabled\n")); 228 + if (errata.piix4.fdma) 229 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, 230 + "Type-F DMA livelock erratum (C3 disabled)\n")); 231 + 232 + return 0; 233 + } 234 + 235 + static int acpi_processor_errata(struct acpi_processor *pr) 236 + { 237 + int result = 0; 238 + struct pci_dev *dev = NULL; 239 + 240 + 241 + if (!pr) 242 + return -EINVAL; 243 + 244 + /* 245 + * PIIX4 246 + */ 247 + dev = pci_get_subsys(PCI_VENDOR_ID_INTEL, 248 + PCI_DEVICE_ID_INTEL_82371AB_3, PCI_ANY_ID, 249 + PCI_ANY_ID, NULL); 250 + if (dev) { 251 + result = acpi_processor_errata_piix4(dev); 252 + pci_dev_put(dev); 253 + } 254 + 255 + return result; 256 + } 257 + 258 + /* -------------------------------------------------------------------------- 259 + FS Interface (/proc) 260 + -------------------------------------------------------------------------- */ 261 + 262 + #ifdef CONFIG_ACPI_PROCFS 263 + static struct proc_dir_entry *acpi_processor_dir = NULL; 264 + 265 + static int acpi_processor_info_seq_show(struct seq_file *seq, void *offset) 266 + { 267 + struct acpi_processor *pr = seq->private; 268 + 269 + 270 + if (!pr) 271 + goto end; 272 + 273 + seq_printf(seq, "processor id: %d\n" 274 + "acpi id: %d\n" 275 + "bus mastering control: %s\n" 276 + "power management: %s\n" 277 + "throttling control: %s\n" 278 + "limit interface: %s\n", 279 + pr->id, 280 + pr->acpi_id, 281 + pr->flags.bm_control ? "yes" : "no", 282 + pr->flags.power ? "yes" : "no", 283 + pr->flags.throttling ? "yes" : "no", 284 + pr->flags.limit ? "yes" : "no"); 285 + 286 + end: 287 + return 0; 288 + } 289 + 290 + static int acpi_processor_info_open_fs(struct inode *inode, struct file *file) 291 + { 292 + return single_open(file, acpi_processor_info_seq_show, 293 + PDE(inode)->data); 294 + } 295 + 296 + static int __cpuinit acpi_processor_add_fs(struct acpi_device *device) 297 + { 298 + struct proc_dir_entry *entry = NULL; 299 + 300 + 301 + if (!acpi_device_dir(device)) { 302 + acpi_device_dir(device) = proc_mkdir(acpi_device_bid(device), 303 + acpi_processor_dir); 304 + if (!acpi_device_dir(device)) 305 + return -ENODEV; 306 + } 307 + 308 + /* 'info' [R] */ 309 + entry = proc_create_data(ACPI_PROCESSOR_FILE_INFO, 310 + S_IRUGO, acpi_device_dir(device), 311 + &acpi_processor_info_fops, 312 + acpi_driver_data(device)); 313 + if (!entry) 314 + return -EIO; 315 + 316 + /* 'throttling' [R/W] */ 317 + entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING, 318 + S_IFREG | S_IRUGO | S_IWUSR, 319 + acpi_device_dir(device), 320 + &acpi_processor_throttling_fops, 321 + acpi_driver_data(device)); 322 + if (!entry) 323 + return -EIO; 324 + 325 + /* 'limit' [R/W] */ 326 + entry = proc_create_data(ACPI_PROCESSOR_FILE_LIMIT, 327 + S_IFREG | S_IRUGO | S_IWUSR, 328 + acpi_device_dir(device), 329 + &acpi_processor_limit_fops, 330 + acpi_driver_data(device)); 331 + if (!entry) 332 + return -EIO; 333 + return 0; 334 + } 335 + static int acpi_processor_remove_fs(struct acpi_device *device) 336 + { 337 + 338 + if (acpi_device_dir(device)) { 339 + remove_proc_entry(ACPI_PROCESSOR_FILE_INFO, 340 + acpi_device_dir(device)); 341 + remove_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING, 342 + acpi_device_dir(device)); 343 + remove_proc_entry(ACPI_PROCESSOR_FILE_LIMIT, 344 + acpi_device_dir(device)); 345 + remove_proc_entry(acpi_device_bid(device), acpi_processor_dir); 346 + acpi_device_dir(device) = NULL; 347 + } 348 + 349 + return 0; 350 + } 351 + #else 352 + static inline int acpi_processor_add_fs(struct acpi_device *device) 353 + { 354 + return 0; 355 + } 356 + static inline int acpi_processor_remove_fs(struct acpi_device *device) 357 + { 358 + return 0; 359 + } 360 + #endif 361 + 362 + /* -------------------------------------------------------------------------- 363 + Driver Interface 364 + -------------------------------------------------------------------------- */ 365 + 366 + static int acpi_processor_get_info(struct acpi_device *device) 367 + { 368 + acpi_status status = 0; 369 + union acpi_object object = { 0 }; 370 + struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; 371 + struct acpi_processor *pr; 372 + int cpu_index, device_declaration = 0; 373 + static int cpu0_initialized; 374 + 375 + pr = acpi_driver_data(device); 376 + if (!pr) 377 + return -EINVAL; 378 + 379 + if (num_online_cpus() > 1) 380 + errata.smp = TRUE; 381 + 382 + acpi_processor_errata(pr); 383 + 384 + /* 385 + * Check to see if we have bus mastering arbitration control. This 386 + * is required for proper C3 usage (to maintain cache coherency). 387 + */ 388 + if (acpi_gbl_FADT.pm2_control_block && acpi_gbl_FADT.pm2_control_length) { 389 + pr->flags.bm_control = 1; 390 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, 391 + "Bus mastering arbitration control present\n")); 392 + } else 393 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, 394 + "No bus mastering arbitration control\n")); 395 + 396 + if (!strcmp(acpi_device_hid(device), ACPI_PROCESSOR_OBJECT_HID)) { 397 + /* Declared with "Processor" statement; match ProcessorID */ 398 + status = acpi_evaluate_object(pr->handle, NULL, NULL, &buffer); 399 + if (ACPI_FAILURE(status)) { 400 + printk(KERN_ERR PREFIX "Evaluating processor object\n"); 401 + return -ENODEV; 402 + } 403 + 404 + /* 405 + * TBD: Synch processor ID (via LAPIC/LSAPIC structures) on SMP. 406 + * >>> 'acpi_get_processor_id(acpi_id, &id)' in 407 + * arch/xxx/acpi.c 408 + */ 409 + pr->acpi_id = object.processor.proc_id; 410 + } else { 411 + /* 412 + * Declared with "Device" statement; match _UID. 413 + * Note that we don't handle string _UIDs yet. 414 + */ 415 + unsigned long long value; 416 + status = acpi_evaluate_integer(pr->handle, METHOD_NAME__UID, 417 + NULL, &value); 418 + if (ACPI_FAILURE(status)) { 419 + printk(KERN_ERR PREFIX 420 + "Evaluating processor _UID [%#x]\n", status); 421 + return -ENODEV; 422 + } 423 + device_declaration = 1; 424 + pr->acpi_id = value; 425 + } 426 + cpu_index = acpi_get_cpuid(pr->handle, device_declaration, pr->acpi_id); 427 + 428 + /* Handle UP system running SMP kernel, with no LAPIC in MADT */ 429 + if (!cpu0_initialized && (cpu_index == -1) && 430 + (num_online_cpus() == 1)) { 431 + cpu_index = 0; 432 + } 433 + 434 + cpu0_initialized = 1; 435 + 436 + pr->id = cpu_index; 437 + 438 + /* 439 + * Extra Processor objects may be enumerated on MP systems with 440 + * less than the max # of CPUs. They should be ignored _iff 441 + * they are physically not present. 442 + */ 443 + if (pr->id == -1) { 444 + if (ACPI_FAILURE 445 + (acpi_processor_hotadd_init(pr->handle, &pr->id))) { 446 + return -ENODEV; 447 + } 448 + } 449 + /* 450 + * On some boxes several processors use the same processor bus id. 451 + * But they are located in different scope. For example: 452 + * \_SB.SCK0.CPU0 453 + * \_SB.SCK1.CPU0 454 + * Rename the processor device bus id. And the new bus id will be 455 + * generated as the following format: 456 + * CPU+CPU ID. 457 + */ 458 + sprintf(acpi_device_bid(device), "CPU%X", pr->id); 459 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Processor [%d:%d]\n", pr->id, 460 + pr->acpi_id)); 461 + 462 + if (!object.processor.pblk_address) 463 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No PBLK (NULL address)\n")); 464 + else if (object.processor.pblk_length != 6) 465 + printk(KERN_ERR PREFIX "Invalid PBLK length [%d]\n", 466 + object.processor.pblk_length); 467 + else { 468 + pr->throttling.address = object.processor.pblk_address; 469 + pr->throttling.duty_offset = acpi_gbl_FADT.duty_offset; 470 + pr->throttling.duty_width = acpi_gbl_FADT.duty_width; 471 + 472 + pr->pblk = object.processor.pblk_address; 473 + 474 + /* 475 + * We don't care about error returns - we just try to mark 476 + * these reserved so that nobody else is confused into thinking 477 + * that this region might be unused.. 478 + * 479 + * (In particular, allocating the IO range for Cardbus) 480 + */ 481 + request_region(pr->throttling.address, 6, "ACPI CPU throttle"); 482 + } 483 + 484 + /* 485 + * If ACPI describes a slot number for this CPU, we can use it 486 + * ensure we get the right value in the "physical id" field 487 + * of /proc/cpuinfo 488 + */ 489 + status = acpi_evaluate_object(pr->handle, "_SUN", NULL, &buffer); 490 + if (ACPI_SUCCESS(status)) 491 + arch_fix_phys_package_id(pr->id, object.integer.value); 492 + 493 + return 0; 494 + } 495 + 496 + static DEFINE_PER_CPU(void *, processor_device_array); 497 + 498 + static void acpi_processor_notify(struct acpi_device *device, u32 event) 499 + { 500 + struct acpi_processor *pr = acpi_driver_data(device); 501 + int saved; 502 + 503 + if (!pr) 504 + return; 505 + 506 + switch (event) { 507 + case ACPI_PROCESSOR_NOTIFY_PERFORMANCE: 508 + saved = pr->performance_platform_limit; 509 + acpi_processor_ppc_has_changed(pr, 1); 510 + if (saved == pr->performance_platform_limit) 511 + break; 512 + acpi_bus_generate_proc_event(device, event, 513 + pr->performance_platform_limit); 514 + acpi_bus_generate_netlink_event(device->pnp.device_class, 515 + dev_name(&device->dev), event, 516 + pr->performance_platform_limit); 517 + break; 518 + case ACPI_PROCESSOR_NOTIFY_POWER: 519 + acpi_processor_cst_has_changed(pr); 520 + acpi_bus_generate_proc_event(device, event, 0); 521 + acpi_bus_generate_netlink_event(device->pnp.device_class, 522 + dev_name(&device->dev), event, 0); 523 + break; 524 + case ACPI_PROCESSOR_NOTIFY_THROTTLING: 525 + acpi_processor_tstate_has_changed(pr); 526 + acpi_bus_generate_proc_event(device, event, 0); 527 + acpi_bus_generate_netlink_event(device->pnp.device_class, 528 + dev_name(&device->dev), event, 0); 529 + default: 530 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, 531 + "Unsupported event [0x%x]\n", event)); 532 + break; 533 + } 534 + 535 + return; 536 + } 537 + 538 + static int acpi_cpu_soft_notify(struct notifier_block *nfb, 539 + unsigned long action, void *hcpu) 540 + { 541 + unsigned int cpu = (unsigned long)hcpu; 542 + struct acpi_processor *pr = per_cpu(processors, cpu); 543 + 544 + if (action == CPU_ONLINE && pr) { 545 + acpi_processor_ppc_has_changed(pr, 0); 546 + acpi_processor_cst_has_changed(pr); 547 + acpi_processor_tstate_has_changed(pr); 548 + } 549 + return NOTIFY_OK; 550 + } 551 + 552 + static struct notifier_block acpi_cpu_notifier = 553 + { 554 + .notifier_call = acpi_cpu_soft_notify, 555 + }; 556 + 557 + static int __cpuinit acpi_processor_add(struct acpi_device *device) 558 + { 559 + struct acpi_processor *pr = NULL; 560 + int result = 0; 561 + struct sys_device *sysdev; 562 + 563 + pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL); 564 + if (!pr) 565 + return -ENOMEM; 566 + 567 + if (!zalloc_cpumask_var(&pr->throttling.shared_cpu_map, GFP_KERNEL)) { 568 + kfree(pr); 569 + return -ENOMEM; 570 + } 571 + 572 + pr->handle = device->handle; 573 + strcpy(acpi_device_name(device), ACPI_PROCESSOR_DEVICE_NAME); 574 + strcpy(acpi_device_class(device), ACPI_PROCESSOR_CLASS); 575 + device->driver_data = pr; 576 + 577 + result = acpi_processor_get_info(device); 578 + if (result) { 579 + /* Processor is physically not present */ 580 + return 0; 581 + } 582 + 583 + BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0)); 584 + 585 + /* 586 + * Buggy BIOS check 587 + * ACPI id of processors can be reported wrongly by the BIOS. 588 + * Don't trust it blindly 589 + */ 590 + if (per_cpu(processor_device_array, pr->id) != NULL && 591 + per_cpu(processor_device_array, pr->id) != device) { 592 + printk(KERN_WARNING "BIOS reported wrong ACPI id " 593 + "for the processor\n"); 594 + result = -ENODEV; 595 + goto err_free_cpumask; 596 + } 597 + per_cpu(processor_device_array, pr->id) = device; 598 + 599 + per_cpu(processors, pr->id) = pr; 600 + 601 + result = acpi_processor_add_fs(device); 602 + if (result) 603 + goto err_free_cpumask; 604 + 605 + sysdev = get_cpu_sysdev(pr->id); 606 + if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) { 607 + result = -EFAULT; 608 + goto err_remove_fs; 609 + } 610 + 611 + #ifdef CONFIG_CPU_FREQ 612 + acpi_processor_ppc_has_changed(pr, 0); 613 + #endif 614 + acpi_processor_get_throttling_info(pr); 615 + acpi_processor_get_limit_info(pr); 616 + 617 + 618 + acpi_processor_power_init(pr, device); 619 + 620 + pr->cdev = thermal_cooling_device_register("Processor", device, 621 + &processor_cooling_ops); 622 + if (IS_ERR(pr->cdev)) { 623 + result = PTR_ERR(pr->cdev); 624 + goto err_power_exit; 625 + } 626 + 627 + dev_dbg(&device->dev, "registered as cooling_device%d\n", 628 + pr->cdev->id); 629 + 630 + result = sysfs_create_link(&device->dev.kobj, 631 + &pr->cdev->device.kobj, 632 + "thermal_cooling"); 633 + if (result) { 634 + printk(KERN_ERR PREFIX "Create sysfs link\n"); 635 + goto err_thermal_unregister; 636 + } 637 + result = sysfs_create_link(&pr->cdev->device.kobj, 638 + &device->dev.kobj, 639 + "device"); 640 + if (result) { 641 + printk(KERN_ERR PREFIX "Create sysfs link\n"); 642 + goto err_remove_sysfs; 643 + } 644 + 645 + return 0; 646 + 647 + err_remove_sysfs: 648 + sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); 649 + err_thermal_unregister: 650 + thermal_cooling_device_unregister(pr->cdev); 651 + err_power_exit: 652 + acpi_processor_power_exit(pr, device); 653 + err_remove_fs: 654 + acpi_processor_remove_fs(device); 655 + err_free_cpumask: 656 + free_cpumask_var(pr->throttling.shared_cpu_map); 657 + 658 + return result; 659 + } 660 + 661 + static int acpi_processor_remove(struct acpi_device *device, int type) 662 + { 663 + struct acpi_processor *pr = NULL; 664 + 665 + 666 + if (!device || !acpi_driver_data(device)) 667 + return -EINVAL; 668 + 669 + pr = acpi_driver_data(device); 670 + 671 + if (pr->id >= nr_cpu_ids) 672 + goto free; 673 + 674 + if (type == ACPI_BUS_REMOVAL_EJECT) { 675 + if (acpi_processor_handle_eject(pr)) 676 + return -EINVAL; 677 + } 678 + 679 + acpi_processor_power_exit(pr, device); 680 + 681 + sysfs_remove_link(&device->dev.kobj, "sysdev"); 682 + 683 + acpi_processor_remove_fs(device); 684 + 685 + if (pr->cdev) { 686 + sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); 687 + sysfs_remove_link(&pr->cdev->device.kobj, "device"); 688 + thermal_cooling_device_unregister(pr->cdev); 689 + pr->cdev = NULL; 690 + } 691 + 692 + per_cpu(processors, pr->id) = NULL; 693 + per_cpu(processor_device_array, pr->id) = NULL; 694 + 695 + free: 696 + free_cpumask_var(pr->throttling.shared_cpu_map); 697 + kfree(pr); 698 + 699 + return 0; 700 + } 701 + 702 + #ifdef CONFIG_ACPI_HOTPLUG_CPU 703 + /**************************************************************************** 704 + * Acpi processor hotplug support * 705 + ****************************************************************************/ 706 + 707 + static int is_processor_present(acpi_handle handle) 708 + { 709 + acpi_status status; 710 + unsigned long long sta = 0; 711 + 712 + 713 + status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); 714 + 715 + if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_PRESENT)) 716 + return 1; 717 + 718 + /* 719 + * _STA is mandatory for a processor that supports hot plug 720 + */ 721 + if (status == AE_NOT_FOUND) 722 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, 723 + "Processor does not support hot plug\n")); 724 + else 725 + ACPI_EXCEPTION((AE_INFO, status, 726 + "Processor Device is not present")); 727 + return 0; 728 + } 729 + 730 + static 731 + int acpi_processor_device_add(acpi_handle handle, struct acpi_device **device) 732 + { 733 + acpi_handle phandle; 734 + struct acpi_device *pdev; 735 + 736 + 737 + if (acpi_get_parent(handle, &phandle)) { 738 + return -ENODEV; 739 + } 740 + 741 + if (acpi_bus_get_device(phandle, &pdev)) { 742 + return -ENODEV; 743 + } 744 + 745 + if (acpi_bus_add(device, pdev, handle, ACPI_BUS_TYPE_PROCESSOR)) { 746 + return -ENODEV; 747 + } 748 + 749 + return 0; 750 + } 751 + 752 + static void __ref acpi_processor_hotplug_notify(acpi_handle handle, 753 + u32 event, void *data) 754 + { 755 + struct acpi_processor *pr; 756 + struct acpi_device *device = NULL; 757 + int result; 758 + 759 + 760 + switch (event) { 761 + case ACPI_NOTIFY_BUS_CHECK: 762 + case ACPI_NOTIFY_DEVICE_CHECK: 763 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, 764 + "Processor driver received %s event\n", 765 + (event == ACPI_NOTIFY_BUS_CHECK) ? 766 + "ACPI_NOTIFY_BUS_CHECK" : "ACPI_NOTIFY_DEVICE_CHECK")); 767 + 768 + if (!is_processor_present(handle)) 769 + break; 770 + 771 + if (acpi_bus_get_device(handle, &device)) { 772 + result = acpi_processor_device_add(handle, &device); 773 + if (result) 774 + printk(KERN_ERR PREFIX 775 + "Unable to add the device\n"); 776 + break; 777 + } 778 + break; 779 + case ACPI_NOTIFY_EJECT_REQUEST: 780 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, 781 + "received ACPI_NOTIFY_EJECT_REQUEST\n")); 782 + 783 + if (acpi_bus_get_device(handle, &device)) { 784 + printk(KERN_ERR PREFIX 785 + "Device don't exist, dropping EJECT\n"); 786 + break; 787 + } 788 + pr = acpi_driver_data(device); 789 + if (!pr) { 790 + printk(KERN_ERR PREFIX 791 + "Driver data is NULL, dropping EJECT\n"); 792 + return; 793 + } 794 + break; 795 + default: 796 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, 797 + "Unsupported event [0x%x]\n", event)); 798 + break; 799 + } 800 + 801 + return; 802 + } 803 + 804 + static acpi_status 805 + processor_walk_namespace_cb(acpi_handle handle, 806 + u32 lvl, void *context, void **rv) 807 + { 808 + acpi_status status; 809 + int *action = context; 810 + acpi_object_type type = 0; 811 + 812 + status = acpi_get_type(handle, &type); 813 + if (ACPI_FAILURE(status)) 814 + return (AE_OK); 815 + 816 + if (type != ACPI_TYPE_PROCESSOR) 817 + return (AE_OK); 818 + 819 + switch (*action) { 820 + case INSTALL_NOTIFY_HANDLER: 821 + acpi_install_notify_handler(handle, 822 + ACPI_SYSTEM_NOTIFY, 823 + acpi_processor_hotplug_notify, 824 + NULL); 825 + break; 826 + case UNINSTALL_NOTIFY_HANDLER: 827 + acpi_remove_notify_handler(handle, 828 + ACPI_SYSTEM_NOTIFY, 829 + acpi_processor_hotplug_notify); 830 + break; 831 + default: 832 + break; 833 + } 834 + 835 + return (AE_OK); 836 + } 837 + 838 + static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) 839 + { 840 + 841 + if (!is_processor_present(handle)) { 842 + return AE_ERROR; 843 + } 844 + 845 + if (acpi_map_lsapic(handle, p_cpu)) 846 + return AE_ERROR; 847 + 848 + if (arch_register_cpu(*p_cpu)) { 849 + acpi_unmap_lsapic(*p_cpu); 850 + return AE_ERROR; 851 + } 852 + 853 + return AE_OK; 854 + } 855 + 856 + static int acpi_processor_handle_eject(struct acpi_processor *pr) 857 + { 858 + if (cpu_online(pr->id)) 859 + cpu_down(pr->id); 860 + 861 + arch_unregister_cpu(pr->id); 862 + acpi_unmap_lsapic(pr->id); 863 + return (0); 864 + } 865 + #else 866 + static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu) 867 + { 868 + return AE_ERROR; 869 + } 870 + static int acpi_processor_handle_eject(struct acpi_processor *pr) 871 + { 872 + return (-EINVAL); 873 + } 874 + #endif 875 + 876 + static 877 + void acpi_processor_install_hotplug_notify(void) 878 + { 879 + #ifdef CONFIG_ACPI_HOTPLUG_CPU 880 + int action = INSTALL_NOTIFY_HANDLER; 881 + acpi_walk_namespace(ACPI_TYPE_PROCESSOR, 882 + ACPI_ROOT_OBJECT, 883 + ACPI_UINT32_MAX, 884 + processor_walk_namespace_cb, NULL, &action, NULL); 885 + #endif 886 + register_hotcpu_notifier(&acpi_cpu_notifier); 887 + } 888 + 889 + static 890 + void acpi_processor_uninstall_hotplug_notify(void) 891 + { 892 + #ifdef CONFIG_ACPI_HOTPLUG_CPU 893 + int action = UNINSTALL_NOTIFY_HANDLER; 894 + acpi_walk_namespace(ACPI_TYPE_PROCESSOR, 895 + ACPI_ROOT_OBJECT, 896 + ACPI_UINT32_MAX, 897 + processor_walk_namespace_cb, NULL, &action, NULL); 898 + #endif 899 + unregister_hotcpu_notifier(&acpi_cpu_notifier); 900 + } 901 + 902 + /* 903 + * We keep the driver loaded even when ACPI is not running. 904 + * This is needed for the powernow-k8 driver, that works even without 905 + * ACPI, but needs symbols from this driver 906 + */ 907 + 908 + static int __init acpi_processor_init(void) 909 + { 910 + int result = 0; 911 + 912 + if (acpi_disabled) 913 + return 0; 914 + 915 + memset(&errata, 0, sizeof(errata)); 916 + 917 + #ifdef CONFIG_ACPI_PROCFS 918 + acpi_processor_dir = proc_mkdir(ACPI_PROCESSOR_CLASS, acpi_root_dir); 919 + if (!acpi_processor_dir) 920 + return -ENOMEM; 921 + #endif 922 + result = cpuidle_register_driver(&acpi_idle_driver); 923 + if (result < 0) 924 + goto out_proc; 925 + 926 + result = acpi_bus_register_driver(&acpi_processor_driver); 927 + if (result < 0) 928 + goto out_cpuidle; 929 + 930 + acpi_processor_install_hotplug_notify(); 931 + 932 + acpi_thermal_cpufreq_init(); 933 + 934 + acpi_processor_ppc_init(); 935 + 936 + acpi_processor_throttling_init(); 937 + 938 + return 0; 939 + 940 + out_cpuidle: 941 + cpuidle_unregister_driver(&acpi_idle_driver); 942 + 943 + out_proc: 944 + #ifdef CONFIG_ACPI_PROCFS 945 + remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); 946 + #endif 947 + 948 + return result; 949 + } 950 + 951 + static void __exit acpi_processor_exit(void) 952 + { 953 + if (acpi_disabled) 954 + return; 955 + 956 + acpi_processor_ppc_exit(); 957 + 958 + acpi_thermal_cpufreq_exit(); 959 + 960 + acpi_processor_uninstall_hotplug_notify(); 961 + 962 + acpi_bus_unregister_driver(&acpi_processor_driver); 963 + 964 + cpuidle_unregister_driver(&acpi_idle_driver); 965 + 966 + #ifdef CONFIG_ACPI_PROCFS 967 + remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); 968 + #endif 969 + 970 + return; 971 + } 972 + 973 + module_init(acpi_processor_init); 974 + module_exit(acpi_processor_exit); 975 + 976 + EXPORT_SYMBOL(acpi_processor_set_thermal_limit); 977 + 978 + MODULE_ALIAS("processor");
-209
drivers/acpi/processor_pdc.c
··· 1 - /* 2 - * Copyright (C) 2005 Intel Corporation 3 - * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. 4 - * 5 - * Alex Chiang <achiang@hp.com> 6 - * - Unified x86/ia64 implementations 7 - * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 8 - * - Added _PDC for platforms with Intel CPUs 9 - */ 10 - #include <linux/dmi.h> 11 - 12 - #include <acpi/acpi_drivers.h> 13 - #include <acpi/processor.h> 14 - 15 - #include "internal.h" 16 - 17 - #define PREFIX "ACPI: " 18 - #define _COMPONENT ACPI_PROCESSOR_COMPONENT 19 - ACPI_MODULE_NAME("processor_pdc"); 20 - 21 - static int set_no_mwait(const struct dmi_system_id *id) 22 - { 23 - printk(KERN_NOTICE PREFIX "%s detected - " 24 - "disabling mwait for CPU C-states\n", id->ident); 25 - idle_nomwait = 1; 26 - return 0; 27 - } 28 - 29 - static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = { 30 - { 31 - set_no_mwait, "IFL91 board", { 32 - DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"), 33 - DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"), 34 - DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"), 35 - DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL}, 36 - { 37 - set_no_mwait, "Extensa 5220", { 38 - DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), 39 - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), 40 - DMI_MATCH(DMI_PRODUCT_VERSION, "0100"), 41 - DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL}, 42 - {}, 43 - }; 44 - 45 - static void acpi_set_pdc_bits(u32 *buf) 46 - { 47 - buf[0] = ACPI_PDC_REVISION_ID; 48 - buf[1] = 1; 49 - 50 - /* Enable coordination with firmware's _TSD info */ 51 - buf[2] = ACPI_PDC_SMP_T_SWCOORD; 52 - 53 - /* Twiddle arch-specific bits needed for _PDC */ 54 - arch_acpi_set_pdc_bits(buf); 55 - } 56 - 57 - static struct acpi_object_list *acpi_processor_alloc_pdc(void) 58 - { 59 - struct acpi_object_list *obj_list; 60 - union acpi_object *obj; 61 - u32 *buf; 62 - 63 - /* allocate and initialize pdc. It will be used later. */ 64 - obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL); 65 - if (!obj_list) { 66 - printk(KERN_ERR "Memory allocation error\n"); 67 - return NULL; 68 - } 69 - 70 - obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL); 71 - if (!obj) { 72 - printk(KERN_ERR "Memory allocation error\n"); 73 - kfree(obj_list); 74 - return NULL; 75 - } 76 - 77 - buf = kmalloc(12, GFP_KERNEL); 78 - if (!buf) { 79 - printk(KERN_ERR "Memory allocation error\n"); 80 - kfree(obj); 81 - kfree(obj_list); 82 - return NULL; 83 - } 84 - 85 - acpi_set_pdc_bits(buf); 86 - 87 - obj->type = ACPI_TYPE_BUFFER; 88 - obj->buffer.length = 12; 89 - obj->buffer.pointer = (u8 *) buf; 90 - obj_list->count = 1; 91 - obj_list->pointer = obj; 92 - 93 - return obj_list; 94 - } 95 - 96 - /* 97 - * _PDC is required for a BIOS-OS handshake for most of the newer 98 - * ACPI processor features. 99 - */ 100 - static int 101 - acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in) 102 - { 103 - acpi_status status = AE_OK; 104 - 105 - if (idle_nomwait) { 106 - /* 107 - * If mwait is disabled for CPU C-states, the C2C3_FFH access 108 - * mode will be disabled in the parameter of _PDC object. 109 - * Of course C1_FFH access mode will also be disabled. 110 - */ 111 - union acpi_object *obj; 112 - u32 *buffer = NULL; 113 - 114 - obj = pdc_in->pointer; 115 - buffer = (u32 *)(obj->buffer.pointer); 116 - buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH); 117 - 118 - } 119 - status = acpi_evaluate_object(handle, "_PDC", pdc_in, NULL); 120 - 121 - if (ACPI_FAILURE(status)) 122 - ACPI_DEBUG_PRINT((ACPI_DB_INFO, 123 - "Could not evaluate _PDC, using legacy perf. control.\n")); 124 - 125 - return status; 126 - } 127 - 128 - static int early_pdc_done; 129 - 130 - void acpi_processor_set_pdc(acpi_handle handle) 131 - { 132 - struct acpi_object_list *obj_list; 133 - 134 - if (arch_has_acpi_pdc() == false) 135 - return; 136 - 137 - if (early_pdc_done) 138 - return; 139 - 140 - obj_list = acpi_processor_alloc_pdc(); 141 - if (!obj_list) 142 - return; 143 - 144 - acpi_processor_eval_pdc(handle, obj_list); 145 - 146 - kfree(obj_list->pointer->buffer.pointer); 147 - kfree(obj_list->pointer); 148 - kfree(obj_list); 149 - } 150 - EXPORT_SYMBOL_GPL(acpi_processor_set_pdc); 151 - 152 - static int early_pdc_optin; 153 - static int set_early_pdc_optin(const struct dmi_system_id *id) 154 - { 155 - early_pdc_optin = 1; 156 - return 0; 157 - } 158 - 159 - static int param_early_pdc_optin(char *s) 160 - { 161 - early_pdc_optin = 1; 162 - return 1; 163 - } 164 - __setup("acpi_early_pdc_eval", param_early_pdc_optin); 165 - 166 - static struct dmi_system_id __cpuinitdata early_pdc_optin_table[] = { 167 - { 168 - set_early_pdc_optin, "HP Envy", { 169 - DMI_MATCH(DMI_BIOS_VENDOR, "Hewlett-Packard"), 170 - DMI_MATCH(DMI_PRODUCT_NAME, "HP Envy") }, NULL}, 171 - { 172 - set_early_pdc_optin, "HP Pavilion dv6", { 173 - DMI_MATCH(DMI_BIOS_VENDOR, "Hewlett-Packard"), 174 - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6") }, NULL}, 175 - { 176 - set_early_pdc_optin, "HP Pavilion dv7", { 177 - DMI_MATCH(DMI_BIOS_VENDOR, "Hewlett-Packard"), 178 - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv7") }, NULL}, 179 - {}, 180 - }; 181 - 182 - static acpi_status 183 - early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv) 184 - { 185 - acpi_processor_set_pdc(handle); 186 - return AE_OK; 187 - } 188 - 189 - void __init acpi_early_processor_set_pdc(void) 190 - { 191 - /* 192 - * Check whether the system is DMI table. If yes, OSPM 193 - * should not use mwait for CPU-states. 194 - */ 195 - dmi_check_system(processor_idle_dmi_table); 196 - 197 - /* 198 - * Allow systems to opt-in to early _PDC evaluation. 199 - */ 200 - dmi_check_system(early_pdc_optin_table); 201 - if (!early_pdc_optin) 202 - return; 203 - 204 - acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, 205 - ACPI_UINT32_MAX, 206 - early_init_pdc, NULL, NULL, NULL); 207 - 208 - early_pdc_done = 1; 209 - }
-3
drivers/acpi/processor_throttling.c
··· 1133 1133 int result = 0; 1134 1134 struct acpi_processor_throttling *pthrottling; 1135 1135 1136 - if (!pr) 1137 - return -EINVAL; 1138 - 1139 1136 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1140 1137 "pblk_address[0x%08x] duty_offset[%d] duty_width[%d]\n", 1141 1138 pr->throttling.address,
+5
drivers/acpi/sbs.c
··· 217 217 case POWER_SUPPLY_PROP_TECHNOLOGY: 218 218 val->intval = acpi_battery_technology(battery); 219 219 break; 220 + case POWER_SUPPLY_PROP_CYCLE_COUNT: 221 + val->intval = battery->cycle_count; 222 + break; 220 223 case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: 221 224 val->intval = battery->design_voltage * 222 225 acpi_battery_vscale(battery) * 1000; ··· 279 276 POWER_SUPPLY_PROP_STATUS, 280 277 POWER_SUPPLY_PROP_PRESENT, 281 278 POWER_SUPPLY_PROP_TECHNOLOGY, 279 + POWER_SUPPLY_PROP_CYCLE_COUNT, 282 280 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, 283 281 POWER_SUPPLY_PROP_VOLTAGE_NOW, 284 282 POWER_SUPPLY_PROP_CURRENT_NOW, ··· 564 560 battery->design_voltage * acpi_battery_vscale(battery)); 565 561 seq_printf(seq, "design capacity warning: unknown\n"); 566 562 seq_printf(seq, "design capacity low: unknown\n"); 563 + seq_printf(seq, "cycle count: %i\n", battery->cycle_count); 567 564 seq_printf(seq, "capacity granularity 1: unknown\n"); 568 565 seq_printf(seq, "capacity granularity 2: unknown\n"); 569 566 seq_printf(seq, "model number: %s\n", battery->device_name);
+14 -5
drivers/acpi/sleep.c
··· 552 552 hibernate_nvs_restore(); 553 553 } 554 554 555 - static void acpi_pm_enable_gpes(void) 555 + static int acpi_pm_pre_restore(void) 556 556 { 557 + acpi_disable_all_gpes(); 558 + acpi_os_wait_events_complete(NULL); 559 + acpi_ec_suspend_transactions(); 560 + return 0; 561 + } 562 + 563 + static void acpi_pm_restore_cleanup(void) 564 + { 565 + acpi_ec_resume_transactions(); 557 566 acpi_enable_all_runtime_gpes(); 558 567 } 559 568 ··· 574 565 .prepare = acpi_pm_prepare, 575 566 .enter = acpi_hibernation_enter, 576 567 .leave = acpi_hibernation_leave, 577 - .pre_restore = acpi_pm_disable_gpes, 578 - .restore_cleanup = acpi_pm_enable_gpes, 568 + .pre_restore = acpi_pm_pre_restore, 569 + .restore_cleanup = acpi_pm_restore_cleanup, 579 570 }; 580 571 581 572 /** ··· 627 618 .prepare = acpi_pm_disable_gpes, 628 619 .enter = acpi_hibernation_enter, 629 620 .leave = acpi_hibernation_leave, 630 - .pre_restore = acpi_pm_disable_gpes, 631 - .restore_cleanup = acpi_pm_enable_gpes, 621 + .pre_restore = acpi_pm_pre_restore, 622 + .restore_cleanup = acpi_pm_restore_cleanup, 632 623 .recover = acpi_pm_finish, 633 624 }; 634 625 #endif /* CONFIG_HIBERNATION */
+27 -9
drivers/acpi/thermal.c
··· 368 368 int valid = 0; 369 369 int i; 370 370 371 - /* Critical Shutdown (required) */ 371 + /* Critical Shutdown */ 372 372 if (flag & ACPI_TRIPS_CRITICAL) { 373 373 status = acpi_evaluate_integer(tz->device->handle, 374 374 "_CRT", NULL, &tmp); ··· 379 379 * Below zero (Celsius) values clearly aren't right for sure.. 380 380 * ... so lets discard those as invalid. 381 381 */ 382 - if (ACPI_FAILURE(status) || 383 - tz->trips.critical.temperature <= 2732) { 382 + if (ACPI_FAILURE(status)) { 384 383 tz->trips.critical.flags.valid = 0; 385 - ACPI_EXCEPTION((AE_INFO, status, 386 - "No or invalid critical threshold")); 387 - return -ENODEV; 384 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, 385 + "No critical threshold\n")); 386 + } else if (tmp <= 2732) { 387 + printk(KERN_WARNING FW_BUG "Invalid critical threshold " 388 + "(%llu)\n", tmp); 389 + tz->trips.critical.flags.valid = 0; 388 390 } else { 389 391 tz->trips.critical.flags.valid = 1; 390 392 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 391 - "Found critical threshold [%lu]\n", 392 - tz->trips.critical.temperature)); 393 + "Found critical threshold [%lu]\n", 394 + tz->trips.critical.temperature)); 393 395 } 394 396 if (tz->trips.critical.flags.valid == 1) { 395 397 if (crt == -1) { ··· 577 575 578 576 static int acpi_thermal_get_trip_points(struct acpi_thermal *tz) 579 577 { 580 - return acpi_thermal_trips_update(tz, ACPI_TRIPS_INIT); 578 + int i, valid, ret = acpi_thermal_trips_update(tz, ACPI_TRIPS_INIT); 579 + 580 + if (ret) 581 + return ret; 582 + 583 + valid = tz->trips.critical.flags.valid | 584 + tz->trips.hot.flags.valid | 585 + tz->trips.passive.flags.valid; 586 + 587 + for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE; i++) 588 + valid |= tz->trips.active[i].flags.valid; 589 + 590 + if (!valid) { 591 + printk(KERN_WARNING FW_BUG "No valid trip found\n"); 592 + return -ENODEV; 593 + } 594 + return 0; 581 595 } 582 596 583 597 static void acpi_thermal_check(void *data)
-45
drivers/acpi/utils.c
··· 289 289 290 290 EXPORT_SYMBOL(acpi_evaluate_integer); 291 291 292 - #if 0 293 - acpi_status 294 - acpi_evaluate_string(acpi_handle handle, 295 - acpi_string pathname, 296 - acpi_object_list * arguments, acpi_string * data) 297 - { 298 - acpi_status status = AE_OK; 299 - acpi_object *element = NULL; 300 - acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 301 - 302 - 303 - if (!data) 304 - return AE_BAD_PARAMETER; 305 - 306 - status = acpi_evaluate_object(handle, pathname, arguments, &buffer); 307 - if (ACPI_FAILURE(status)) { 308 - acpi_util_eval_error(handle, pathname, status); 309 - return status; 310 - } 311 - 312 - element = (acpi_object *) buffer.pointer; 313 - 314 - if ((element->type != ACPI_TYPE_STRING) 315 - || (element->type != ACPI_TYPE_BUFFER) 316 - || !element->string.length) { 317 - acpi_util_eval_error(handle, pathname, AE_BAD_DATA); 318 - return AE_BAD_DATA; 319 - } 320 - 321 - *data = kzalloc(element->string.length + 1, GFP_KERNEL); 322 - if (!data) { 323 - printk(KERN_ERR PREFIX "Memory allocation\n"); 324 - return -ENOMEM; 325 - } 326 - 327 - memcpy(*data, element->string.pointer, element->string.length); 328 - 329 - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Return value [%s]\n", *data)); 330 - 331 - kfree(buffer.pointer); 332 - 333 - return AE_OK; 334 - } 335 - #endif 336 - 337 292 acpi_status 338 293 acpi_evaluate_reference(acpi_handle handle, 339 294 acpi_string pathname,
+17 -11
drivers/acpi/video.c
··· 327 327 int level); 328 328 static int acpi_video_device_lcd_get_level_current( 329 329 struct acpi_video_device *device, 330 - unsigned long long *level); 330 + unsigned long long *level, int init); 331 331 static int acpi_video_get_next_level(struct acpi_video_device *device, 332 332 u32 level_current, u32 event); 333 333 static int acpi_video_switch_brightness(struct acpi_video_device *device, ··· 345 345 struct acpi_video_device *vd = 346 346 (struct acpi_video_device *)bl_get_data(bd); 347 347 348 - if (acpi_video_device_lcd_get_level_current(vd, &cur_level)) 348 + if (acpi_video_device_lcd_get_level_current(vd, &cur_level, 0)) 349 349 return -EINVAL; 350 350 for (i = 2; i < vd->brightness->count; i++) { 351 351 if (vd->brightness->levels[i] == cur_level) ··· 414 414 unsigned long long level; 415 415 int offset; 416 416 417 - if (acpi_video_device_lcd_get_level_current(video, &level)) 417 + if (acpi_video_device_lcd_get_level_current(video, &level, 0)) 418 418 return -EINVAL; 419 419 for (offset = 2; offset < video->brightness->count; offset++) 420 420 if (level == video->brightness->levels[offset]) { ··· 609 609 610 610 static int 611 611 acpi_video_device_lcd_get_level_current(struct acpi_video_device *device, 612 - unsigned long long *level) 612 + unsigned long long *level, int init) 613 613 { 614 614 acpi_status status = AE_OK; 615 615 int i; ··· 633 633 device->brightness->curr = *level; 634 634 return 0; 635 635 } 636 - /* BQC returned an invalid level. Stop using it. */ 637 - ACPI_WARNING((AE_INFO, "%s returned an invalid level", 638 - buf)); 639 - device->cap._BQC = device->cap._BCQ = 0; 636 + if (!init) { 637 + /* 638 + * BQC returned an invalid level. 639 + * Stop using it. 640 + */ 641 + ACPI_WARNING((AE_INFO, 642 + "%s returned an invalid level", 643 + buf)); 644 + device->cap._BQC = device->cap._BCQ = 0; 645 + } 640 646 } else { 641 647 /* Fixme: 642 648 * should we return an error or ignore this failure? ··· 898 892 if (!device->cap._BQC) 899 893 goto set_level; 900 894 901 - result = acpi_video_device_lcd_get_level_current(device, &level_old); 895 + result = acpi_video_device_lcd_get_level_current(device, &level_old, 1); 902 896 if (result) 903 897 goto out_free_levels; 904 898 ··· 909 903 if (result) 910 904 goto out_free_levels; 911 905 912 - result = acpi_video_device_lcd_get_level_current(device, &level); 906 + result = acpi_video_device_lcd_get_level_current(device, &level, 0); 913 907 if (result) 914 908 goto out_free_levels; 915 909 ··· 2002 1996 goto out; 2003 1997 2004 1998 result = acpi_video_device_lcd_get_level_current(device, 2005 - &level_current); 1999 + &level_current, 0); 2006 2000 if (result) 2007 2001 goto out; 2008 2002
+19
drivers/pci/hotplug/acpiphp_glue.c
··· 749 749 return retval; 750 750 } 751 751 752 + static void acpiphp_set_acpi_region(struct acpiphp_slot *slot) 753 + { 754 + struct acpiphp_func *func; 755 + union acpi_object params[2]; 756 + struct acpi_object_list arg_list; 757 + 758 + list_for_each_entry(func, &slot->funcs, sibling) { 759 + arg_list.count = 2; 760 + arg_list.pointer = params; 761 + params[0].type = ACPI_TYPE_INTEGER; 762 + params[0].integer.value = ACPI_ADR_SPACE_PCI_CONFIG; 763 + params[1].type = ACPI_TYPE_INTEGER; 764 + params[1].integer.value = 1; 765 + /* _REG is optional, we don't care about if there is failure */ 766 + acpi_evaluate_object(func->handle, "_REG", &arg_list, NULL); 767 + } 768 + } 769 + 752 770 /** 753 771 * enable_device - enable, configure a slot 754 772 * @slot: slot to be enabled ··· 823 805 pci_bus_assign_resources(bus); 824 806 acpiphp_sanitize_bus(bus); 825 807 acpiphp_set_hpp_values(bus); 808 + acpiphp_set_acpi_region(slot); 826 809 pci_enable_bridges(bus); 827 810 pci_bus_add_devices(bus); 828 811
+47 -44
drivers/platform/x86/sony-laptop.c
··· 145 145 struct input_dev *key_dev; 146 146 struct kfifo fifo; 147 147 spinlock_t fifo_lock; 148 - struct workqueue_struct *wq; 148 + struct timer_list release_key_timer; 149 149 }; 150 150 151 151 static struct sony_laptop_input_s sony_laptop_input = { ··· 299 299 }; 300 300 301 301 /* release buttons after a short delay if pressed */ 302 - static void do_sony_laptop_release_key(struct work_struct *work) 302 + static void do_sony_laptop_release_key(unsigned long unused) 303 303 { 304 304 struct sony_laptop_keypress kp; 305 + unsigned long flags; 305 306 306 - while (kfifo_out_locked(&sony_laptop_input.fifo, (unsigned char *)&kp, 307 - sizeof(kp), &sony_laptop_input.fifo_lock) 308 - == sizeof(kp)) { 309 - msleep(10); 307 + spin_lock_irqsave(&sony_laptop_input.fifo_lock, flags); 308 + 309 + if (kfifo_out(&sony_laptop_input.fifo, 310 + (unsigned char *)&kp, sizeof(kp)) == sizeof(kp)) { 310 311 input_report_key(kp.dev, kp.key, 0); 311 312 input_sync(kp.dev); 312 313 } 314 + 315 + /* If there is something in the fifo schedule next release. */ 316 + if (kfifo_len(&sony_laptop_input.fifo) != 0) 317 + mod_timer(&sony_laptop_input.release_key_timer, 318 + jiffies + msecs_to_jiffies(10)); 319 + 320 + spin_unlock_irqrestore(&sony_laptop_input.fifo_lock, flags); 313 321 } 314 - static DECLARE_WORK(sony_laptop_release_key_work, 315 - do_sony_laptop_release_key); 316 322 317 323 /* forward event to the input subsystem */ 318 324 static void sony_laptop_report_input_event(u8 event) ··· 372 366 /* we emit the scancode so we can always remap the key */ 373 367 input_event(kp.dev, EV_MSC, MSC_SCAN, event); 374 368 input_sync(kp.dev); 375 - kfifo_in_locked(&sony_laptop_input.fifo, 376 - (unsigned char *)&kp, sizeof(kp), 377 - &sony_laptop_input.fifo_lock); 378 369 379 - if (!work_pending(&sony_laptop_release_key_work)) 380 - queue_work(sony_laptop_input.wq, 381 - &sony_laptop_release_key_work); 370 + /* schedule key release */ 371 + kfifo_in_locked(&sony_laptop_input.fifo, 372 + (unsigned char *)&kp, sizeof(kp), 373 + &sony_laptop_input.fifo_lock); 374 + mod_timer(&sony_laptop_input.release_key_timer, 375 + jiffies + msecs_to_jiffies(10)); 382 376 } else 383 377 dprintk("unknown input event %.2x\n", event); 384 378 } ··· 396 390 397 391 /* kfifo */ 398 392 spin_lock_init(&sony_laptop_input.fifo_lock); 399 - error = 400 - kfifo_alloc(&sony_laptop_input.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL); 393 + error = kfifo_alloc(&sony_laptop_input.fifo, 394 + SONY_LAPTOP_BUF_SIZE, GFP_KERNEL); 401 395 if (error) { 402 396 printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n"); 403 397 goto err_dec_users; 404 398 } 405 399 406 - /* init workqueue */ 407 - sony_laptop_input.wq = create_singlethread_workqueue("sony-laptop"); 408 - if (!sony_laptop_input.wq) { 409 - printk(KERN_ERR DRV_PFX 410 - "Unable to create workqueue.\n"); 411 - error = -ENXIO; 412 - goto err_free_kfifo; 413 - } 400 + setup_timer(&sony_laptop_input.release_key_timer, 401 + do_sony_laptop_release_key, 0); 414 402 415 403 /* input keys */ 416 404 key_dev = input_allocate_device(); 417 405 if (!key_dev) { 418 406 error = -ENOMEM; 419 - goto err_destroy_wq; 407 + goto err_free_kfifo; 420 408 } 421 409 422 410 key_dev->name = "Sony Vaio Keys"; ··· 419 419 key_dev->dev.parent = &acpi_device->dev; 420 420 421 421 /* Initialize the Input Drivers: special keys */ 422 - set_bit(EV_KEY, key_dev->evbit); 423 - set_bit(EV_MSC, key_dev->evbit); 424 - set_bit(MSC_SCAN, key_dev->mscbit); 422 + input_set_capability(key_dev, EV_MSC, MSC_SCAN); 423 + 424 + __set_bit(EV_KEY, key_dev->evbit); 425 425 key_dev->keycodesize = sizeof(sony_laptop_input_keycode_map[0]); 426 426 key_dev->keycodemax = ARRAY_SIZE(sony_laptop_input_keycode_map); 427 427 key_dev->keycode = &sony_laptop_input_keycode_map; 428 - for (i = 0; i < ARRAY_SIZE(sony_laptop_input_keycode_map); i++) { 429 - if (sony_laptop_input_keycode_map[i] != KEY_RESERVED) { 430 - set_bit(sony_laptop_input_keycode_map[i], 431 - key_dev->keybit); 432 - } 433 - } 428 + for (i = 0; i < ARRAY_SIZE(sony_laptop_input_keycode_map); i++) 429 + __set_bit(sony_laptop_input_keycode_map[i], key_dev->keybit); 430 + __clear_bit(KEY_RESERVED, key_dev->keybit); 434 431 435 432 error = input_register_device(key_dev); 436 433 if (error) ··· 447 450 jog_dev->id.vendor = PCI_VENDOR_ID_SONY; 448 451 key_dev->dev.parent = &acpi_device->dev; 449 452 450 - jog_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); 451 - jog_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_MIDDLE); 452 - jog_dev->relbit[0] = BIT_MASK(REL_WHEEL); 453 + input_set_capability(jog_dev, EV_KEY, BTN_MIDDLE); 454 + input_set_capability(jog_dev, EV_REL, REL_WHEEL); 453 455 454 456 error = input_register_device(jog_dev); 455 457 if (error) ··· 469 473 err_free_keydev: 470 474 input_free_device(key_dev); 471 475 472 - err_destroy_wq: 473 - destroy_workqueue(sony_laptop_input.wq); 474 - 475 476 err_free_kfifo: 476 477 kfifo_free(&sony_laptop_input.fifo); 477 478 ··· 479 486 480 487 static void sony_laptop_remove_input(void) 481 488 { 482 - /* cleanup only after the last user has gone */ 489 + struct sony_laptop_keypress kp = { NULL }; 490 + 491 + /* Cleanup only after the last user has gone */ 483 492 if (!atomic_dec_and_test(&sony_laptop_input.users)) 484 493 return; 485 494 486 - /* flush workqueue first */ 487 - flush_workqueue(sony_laptop_input.wq); 495 + del_timer_sync(&sony_laptop_input.release_key_timer); 496 + 497 + /* 498 + * Generate key-up events for remaining keys. Note that we don't 499 + * need locking since nobody is adding new events to the kfifo. 500 + */ 501 + while (kfifo_out(&sony_laptop_input.fifo, 502 + (unsigned char *)&kp, sizeof(kp)) == sizeof(kp)) { 503 + input_report_key(kp.dev, kp.key, 0); 504 + input_sync(kp.dev); 505 + } 488 506 489 507 /* destroy input devs */ 490 508 input_unregister_device(sony_laptop_input.key_dev); ··· 506 502 sony_laptop_input.jog_dev = NULL; 507 503 } 508 504 509 - destroy_workqueue(sony_laptop_input.wq); 510 505 kfifo_free(&sony_laptop_input.fifo); 511 506 } 512 507
+3
drivers/pnp/base.h
··· 166 166 struct pnp_resource *pnp_add_mem_resource(struct pnp_dev *dev, 167 167 resource_size_t start, 168 168 resource_size_t end, int flags); 169 + struct pnp_resource *pnp_add_bus_resource(struct pnp_dev *dev, 170 + resource_size_t start, 171 + resource_size_t end); 169 172 170 173 extern int pnp_debug; 171 174
+5 -2
drivers/pnp/interface.c
··· 278 278 switch (pnp_resource_type(res)) { 279 279 case IORESOURCE_IO: 280 280 case IORESOURCE_MEM: 281 - pnp_printf(buffer, " %#llx-%#llx\n", 281 + case IORESOURCE_BUS: 282 + pnp_printf(buffer, " %#llx-%#llx%s\n", 282 283 (unsigned long long) res->start, 283 - (unsigned long long) res->end); 284 + (unsigned long long) res->end, 285 + res->flags & IORESOURCE_WINDOW ? 286 + " window" : ""); 284 287 break; 285 288 case IORESOURCE_IRQ: 286 289 case IORESOURCE_DMA:
+34 -15
drivers/pnp/pnpacpi/rsparser.c
··· 177 177 } 178 178 179 179 static void pnpacpi_parse_allocated_ioresource(struct pnp_dev *dev, u64 start, 180 - u64 len, int io_decode) 180 + u64 len, int io_decode, 181 + int window) 181 182 { 182 183 int flags = 0; 183 184 u64 end = start + len - 1; ··· 187 186 flags |= IORESOURCE_IO_16BIT_ADDR; 188 187 if (len == 0 || end >= 0x10003) 189 188 flags |= IORESOURCE_DISABLED; 189 + if (window) 190 + flags |= IORESOURCE_WINDOW; 190 191 191 192 pnp_add_io_resource(dev, start, end, flags); 192 193 } ··· 250 247 251 248 static void pnpacpi_parse_allocated_memresource(struct pnp_dev *dev, 252 249 u64 start, u64 len, 253 - int write_protect) 250 + int write_protect, int window) 254 251 { 255 252 int flags = 0; 256 253 u64 end = start + len - 1; ··· 259 256 flags |= IORESOURCE_DISABLED; 260 257 if (write_protect == ACPI_READ_WRITE_MEMORY) 261 258 flags |= IORESOURCE_MEM_WRITEABLE; 259 + if (window) 260 + flags |= IORESOURCE_WINDOW; 262 261 263 262 pnp_add_mem_resource(dev, start, end, flags); 263 + } 264 + 265 + static void pnpacpi_parse_allocated_busresource(struct pnp_dev *dev, 266 + u64 start, u64 len) 267 + { 268 + u64 end = start + len - 1; 269 + 270 + pnp_add_bus_resource(dev, start, end); 264 271 } 265 272 266 273 static void pnpacpi_parse_allocated_address_space(struct pnp_dev *dev, ··· 278 265 { 279 266 struct acpi_resource_address64 addr, *p = &addr; 280 267 acpi_status status; 268 + int window; 281 269 282 270 status = acpi_resource_to_address64(res, p); 283 271 if (!ACPI_SUCCESS(status)) { ··· 287 273 return; 288 274 } 289 275 290 - if (p->producer_consumer == ACPI_PRODUCER) 291 - return; 276 + window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0; 292 277 293 278 if (p->resource_type == ACPI_MEMORY_RANGE) 294 279 pnpacpi_parse_allocated_memresource(dev, 295 280 p->minimum, p->address_length, 296 - p->info.mem.write_protect); 281 + p->info.mem.write_protect, window); 297 282 else if (p->resource_type == ACPI_IO_RANGE) 298 283 pnpacpi_parse_allocated_ioresource(dev, 299 284 p->minimum, p->address_length, 300 285 p->granularity == 0xfff ? ACPI_DECODE_10 : 301 - ACPI_DECODE_16); 286 + ACPI_DECODE_16, window); 287 + else if (p->resource_type == ACPI_BUS_NUMBER_RANGE) 288 + pnpacpi_parse_allocated_busresource(dev, p->minimum, 289 + p->address_length); 302 290 } 303 291 304 292 static void pnpacpi_parse_allocated_ext_address_space(struct pnp_dev *dev, 305 293 struct acpi_resource *res) 306 294 { 307 295 struct acpi_resource_extended_address64 *p = &res->data.ext_address64; 296 + int window; 308 297 309 - if (p->producer_consumer == ACPI_PRODUCER) 310 - return; 298 + window = (p->producer_consumer == ACPI_PRODUCER) ? 1 : 0; 311 299 312 300 if (p->resource_type == ACPI_MEMORY_RANGE) 313 301 pnpacpi_parse_allocated_memresource(dev, 314 302 p->minimum, p->address_length, 315 - p->info.mem.write_protect); 303 + p->info.mem.write_protect, window); 316 304 else if (p->resource_type == ACPI_IO_RANGE) 317 305 pnpacpi_parse_allocated_ioresource(dev, 318 306 p->minimum, p->address_length, 319 307 p->granularity == 0xfff ? ACPI_DECODE_10 : 320 - ACPI_DECODE_16); 308 + ACPI_DECODE_16, window); 309 + else if (p->resource_type == ACPI_BUS_NUMBER_RANGE) 310 + pnpacpi_parse_allocated_busresource(dev, p->minimum, 311 + p->address_length); 321 312 } 322 313 323 314 static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, ··· 387 368 pnpacpi_parse_allocated_ioresource(dev, 388 369 io->minimum, 389 370 io->address_length, 390 - io->io_decode); 371 + io->io_decode, 0); 391 372 break; 392 373 393 374 case ACPI_RESOURCE_TYPE_START_DEPENDENT: ··· 399 380 pnpacpi_parse_allocated_ioresource(dev, 400 381 fixed_io->address, 401 382 fixed_io->address_length, 402 - ACPI_DECODE_10); 383 + ACPI_DECODE_10, 0); 403 384 break; 404 385 405 386 case ACPI_RESOURCE_TYPE_VENDOR: ··· 415 396 pnpacpi_parse_allocated_memresource(dev, 416 397 memory24->minimum, 417 398 memory24->address_length, 418 - memory24->write_protect); 399 + memory24->write_protect, 0); 419 400 break; 420 401 case ACPI_RESOURCE_TYPE_MEMORY32: 421 402 memory32 = &res->data.memory32; 422 403 pnpacpi_parse_allocated_memresource(dev, 423 404 memory32->minimum, 424 405 memory32->address_length, 425 - memory32->write_protect); 406 + memory32->write_protect, 0); 426 407 break; 427 408 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: 428 409 fixed_memory32 = &res->data.fixed_memory32; 429 410 pnpacpi_parse_allocated_memresource(dev, 430 411 fixed_memory32->address, 431 412 fixed_memory32->address_length, 432 - fixed_memory32->write_protect); 413 + fixed_memory32->write_protect, 0); 433 414 break; 434 415 case ACPI_RESOURCE_TYPE_ADDRESS16: 435 416 case ACPI_RESOURCE_TYPE_ADDRESS32:
+26 -1
drivers/pnp/resource.c
··· 470 470 unsigned long pnp_resource_type(struct resource *res) 471 471 { 472 472 return res->flags & (IORESOURCE_IO | IORESOURCE_MEM | 473 - IORESOURCE_IRQ | IORESOURCE_DMA); 473 + IORESOURCE_IRQ | IORESOURCE_DMA | 474 + IORESOURCE_BUS); 474 475 } 475 476 476 477 struct resource *pnp_get_resource(struct pnp_dev *dev, ··· 584 583 585 584 res = &pnp_res->res; 586 585 res->flags = IORESOURCE_MEM | flags; 586 + res->start = start; 587 + res->end = end; 588 + 589 + pnp_dbg(&dev->dev, " add %pr\n", res); 590 + return pnp_res; 591 + } 592 + 593 + struct pnp_resource *pnp_add_bus_resource(struct pnp_dev *dev, 594 + resource_size_t start, 595 + resource_size_t end) 596 + { 597 + struct pnp_resource *pnp_res; 598 + struct resource *res; 599 + 600 + pnp_res = pnp_new_resource(dev); 601 + if (!pnp_res) { 602 + dev_err(&dev->dev, "can't add resource for BUS %#llx-%#llx\n", 603 + (unsigned long long) start, 604 + (unsigned long long) end); 605 + return NULL; 606 + } 607 + 608 + res = &pnp_res->res; 609 + res->flags = IORESOURCE_BUS; 587 610 res->start = start; 588 611 res->end = end; 589 612
+3 -1
drivers/pnp/support.c
··· 69 69 return "irq"; 70 70 case IORESOURCE_DMA: 71 71 return "dma"; 72 + case IORESOURCE_BUS: 73 + return "bus"; 72 74 } 73 - return NULL; 75 + return "unknown"; 74 76 } 75 77 76 78 void dbg_pnp_show_resources(struct pnp_dev *dev, char *desc)
+1
drivers/power/power_supply_sysfs.c
··· 99 99 POWER_SUPPLY_ATTR(present), 100 100 POWER_SUPPLY_ATTR(online), 101 101 POWER_SUPPLY_ATTR(technology), 102 + POWER_SUPPLY_ATTR(cycle_count), 102 103 POWER_SUPPLY_ATTR(voltage_max), 103 104 POWER_SUPPLY_ATTR(voltage_min), 104 105 POWER_SUPPLY_ATTR(voltage_max_design),
+9 -1
include/acpi/processor.h
··· 320 320 321 321 #endif /* CONFIG_CPU_FREQ */ 322 322 323 - /* in processor_pdc.c */ 323 + /* in processor_core.c */ 324 324 void acpi_processor_set_pdc(acpi_handle handle); 325 + #ifdef CONFIG_SMP 326 + int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id); 327 + #else 328 + static inline int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) 329 + { 330 + return -1; 331 + } 332 + #endif 325 333 326 334 /* in processor_throttling.c */ 327 335 int acpi_processor_tstate_has_changed(struct acpi_processor *pr);
+10 -8
include/linux/ioport.h
··· 34 34 */ 35 35 #define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */ 36 36 37 - #define IORESOURCE_TYPE_BITS 0x00000f00 /* Resource type */ 37 + #define IORESOURCE_TYPE_BITS 0x00001f00 /* Resource type */ 38 38 #define IORESOURCE_IO 0x00000100 39 39 #define IORESOURCE_MEM 0x00000200 40 40 #define IORESOURCE_IRQ 0x00000400 41 41 #define IORESOURCE_DMA 0x00000800 42 + #define IORESOURCE_BUS 0x00001000 42 43 43 - #define IORESOURCE_PREFETCH 0x00001000 /* No side effects */ 44 - #define IORESOURCE_READONLY 0x00002000 45 - #define IORESOURCE_CACHEABLE 0x00004000 46 - #define IORESOURCE_RANGELENGTH 0x00008000 47 - #define IORESOURCE_SHADOWABLE 0x00010000 44 + #define IORESOURCE_PREFETCH 0x00002000 /* No side effects */ 45 + #define IORESOURCE_READONLY 0x00004000 46 + #define IORESOURCE_CACHEABLE 0x00008000 47 + #define IORESOURCE_RANGELENGTH 0x00010000 48 + #define IORESOURCE_SHADOWABLE 0x00020000 48 49 49 - #define IORESOURCE_SIZEALIGN 0x00020000 /* size indicates alignment */ 50 - #define IORESOURCE_STARTALIGN 0x00040000 /* start field is alignment */ 50 + #define IORESOURCE_SIZEALIGN 0x00040000 /* size indicates alignment */ 51 + #define IORESOURCE_STARTALIGN 0x00080000 /* start field is alignment */ 51 52 52 53 #define IORESOURCE_MEM_64 0x00100000 54 + #define IORESOURCE_WINDOW 0x00200000 /* forwarded by bridge */ 53 55 54 56 #define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */ 55 57 #define IORESOURCE_DISABLED 0x10000000
+1
include/linux/power_supply.h
··· 82 82 POWER_SUPPLY_PROP_PRESENT, 83 83 POWER_SUPPLY_PROP_ONLINE, 84 84 POWER_SUPPLY_PROP_TECHNOLOGY, 85 + POWER_SUPPLY_PROP_CYCLE_COUNT, 85 86 POWER_SUPPLY_PROP_VOLTAGE_MAX, 86 87 POWER_SUPPLY_PROP_VOLTAGE_MIN, 87 88 POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
+12 -1
lib/vsprintf.c
··· 609 609 .precision = -1, 610 610 .flags = SPECIAL | SMALL | ZEROPAD, 611 611 }; 612 + static const struct printf_spec bus_spec = { 613 + .base = 16, 614 + .field_width = 2, 615 + .precision = -1, 616 + .flags = SMALL | ZEROPAD, 617 + }; 612 618 static const struct printf_spec dec_spec = { 613 619 .base = 10, 614 620 .precision = -1, ··· 635 629 * 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */ 636 630 #define RSRC_BUF_SIZE ((2 * sizeof(resource_size_t)) + 4) 637 631 #define FLAG_BUF_SIZE (2 * sizeof(res->flags)) 638 - #define DECODED_BUF_SIZE sizeof("[mem - 64bit pref disabled]") 632 + #define DECODED_BUF_SIZE sizeof("[mem - 64bit pref window disabled]") 639 633 #define RAW_BUF_SIZE sizeof("[mem - flags 0x]") 640 634 char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE, 641 635 2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)]; ··· 657 651 } else if (res->flags & IORESOURCE_DMA) { 658 652 p = string(p, pend, "dma ", str_spec); 659 653 specp = &dec_spec; 654 + } else if (res->flags & IORESOURCE_BUS) { 655 + p = string(p, pend, "bus ", str_spec); 656 + specp = &bus_spec; 660 657 } else { 661 658 p = string(p, pend, "??? ", str_spec); 662 659 specp = &mem_spec; ··· 675 666 p = string(p, pend, " 64bit", str_spec); 676 667 if (res->flags & IORESOURCE_PREFETCH) 677 668 p = string(p, pend, " pref", str_spec); 669 + if (res->flags & IORESOURCE_WINDOW) 670 + p = string(p, pend, " window", str_spec); 678 671 if (res->flags & IORESOURCE_DISABLED) 679 672 p = string(p, pend, " disabled", str_spec); 680 673 } else {