Merge master.kernel.org:/pub/scm/linux/kernel/git/lenb/to-linus

+971 -232
+7
arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
··· 442 (u32) data->acpi_data.states[i].transition_latency); 443 444 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); 445 return (result); 446 447 err_freqfree:
··· 442 (u32) data->acpi_data.states[i].transition_latency); 443 444 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); 445 + 446 + /* 447 + * the first call to ->target() should result in us actually 448 + * writing something to the appropriate registers. 449 + */ 450 + data->resume = 1; 451 + 452 return (result); 453 454 err_freqfree:
+1
arch/i386/pci/acpi.c
··· 30 acpi_irq_penalty_init(); 31 pcibios_scanned++; 32 pcibios_enable_irq = acpi_pci_irq_enable; 33 34 if (pci_routeirq) { 35 /*
··· 30 acpi_irq_penalty_init(); 31 pcibios_scanned++; 32 pcibios_enable_irq = acpi_pci_irq_enable; 33 + pcibios_disable_irq = acpi_pci_irq_disable; 34 35 if (pci_routeirq) { 36 /*
+6
arch/i386/pci/common.c
··· 254 255 return pcibios_enable_irq(dev); 256 }
··· 254 255 return pcibios_enable_irq(dev); 256 } 257 + 258 + void pcibios_disable_device (struct pci_dev *dev) 259 + { 260 + if (pcibios_disable_irq) 261 + pcibios_disable_irq(dev); 262 + }
+1
arch/i386/pci/irq.c
··· 56 }; 57 58 int (*pcibios_enable_irq)(struct pci_dev *dev) = NULL; 59 60 /* 61 * Check passed address for the PCI IRQ Routing Table signature
··· 56 }; 57 58 int (*pcibios_enable_irq)(struct pci_dev *dev) = NULL; 59 + void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL; 60 61 /* 62 * Check passed address for the PCI IRQ Routing Table signature
+1
arch/i386/pci/pci.h
··· 73 extern spinlock_t pci_config_lock; 74 75 extern int (*pcibios_enable_irq)(struct pci_dev *dev);
··· 73 extern spinlock_t pci_config_lock; 74 75 extern int (*pcibios_enable_irq)(struct pci_dev *dev); 76 + extern void (*pcibios_disable_irq)(struct pci_dev *dev);
+721 -170
drivers/acpi/ec.c
··· 59 #define ACPI_EC_DELAY 50 /* Wait 50ms max. during EC ops */ 60 #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ 61 62 #define ACPI_EC_COMMAND_READ 0x80 63 #define ACPI_EC_COMMAND_WRITE 0x81 64 #define ACPI_EC_BURST_ENABLE 0x82 65 #define ACPI_EC_BURST_DISABLE 0x83 66 #define ACPI_EC_COMMAND_QUERY 0x84 67 68 - static int acpi_ec_add (struct acpi_device *device); 69 static int acpi_ec_remove (struct acpi_device *device, int type); 70 static int acpi_ec_start (struct acpi_device *device); 71 static int acpi_ec_stop (struct acpi_device *device, int type); 72 73 static struct acpi_driver acpi_ec_driver = { 74 .name = ACPI_EC_DRIVER_NAME, 75 .class = ACPI_EC_CLASS, 76 .ids = ACPI_EC_HID, 77 .ops = { 78 - .add = acpi_ec_add, 79 .remove = acpi_ec_remove, 80 .start = acpi_ec_start, 81 .stop = acpi_ec_stop, 82 }, 83 }; 84 85 - struct acpi_ec { 86 - acpi_handle handle; 87 - unsigned long uid; 88 - unsigned long gpe_bit; 89 - struct acpi_generic_address status_addr; 90 - struct acpi_generic_address command_addr; 91 - struct acpi_generic_address data_addr; 92 - unsigned long global_lock; 93 - unsigned int expect_event; 94 - atomic_t leaving_burst; /* 0 : No, 1 : Yes, 2: abort*/ 95 - atomic_t pending_gpe; 96 - struct semaphore sem; 97 - wait_queue_head_t wait; 98 }; 99 100 /* If we find an EC via the ECDT, we need to keep a ptr to its context */ 101 - static struct acpi_ec *ec_ecdt; 102 103 /* External interfaces use first EC only, so remember */ 104 static struct acpi_device *first_ec; 105 106 /* -------------------------------------------------------------------------- 107 Transaction Management 108 -------------------------------------------------------------------------- */ 109 110 - static inline u32 acpi_ec_read_status(struct acpi_ec *ec) 111 { 112 u32 status = 0; 113 114 - acpi_hw_low_level_read(8, &status, &ec->status_addr); 115 return status; 116 } 117 118 - static int acpi_ec_wait(struct acpi_ec *ec, unsigned int event) 119 { 120 int result = 0; 121 122 ACPI_FUNCTION_TRACE("acpi_ec_wait"); 123 124 - ec->expect_event = event; 125 smp_mb(); 126 127 - result = wait_event_interruptible_timeout(ec->wait, 128 - !ec->expect_event, 129 msecs_to_jiffies(ACPI_EC_DELAY)); 130 131 - ec->expect_event = 0; 132 smp_mb(); 133 134 if (result < 0){ ··· 269 270 static int 271 acpi_ec_enter_burst_mode ( 272 - struct acpi_ec *ec) 273 { 274 u32 tmp = 0; 275 int status = 0; ··· 279 status = acpi_ec_read_status(ec); 280 if (status != -EINVAL && 281 !(status & ACPI_EC_FLAG_BURST)){ 282 - acpi_hw_low_level_write(8, ACPI_EC_BURST_ENABLE, &ec->command_addr); 283 status = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 284 if (status){ 285 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 286 return_VALUE(-EINVAL); 287 } 288 - acpi_hw_low_level_read(8, &tmp, &ec->data_addr); 289 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 290 if(tmp != 0x90 ) {/* Burst ACK byte*/ 291 return_VALUE(-EINVAL); 292 } 293 } 294 295 - atomic_set(&ec->leaving_burst , 0); 296 return_VALUE(0); 297 } 298 299 static int 300 acpi_ec_leave_burst_mode ( 301 - struct acpi_ec *ec) 302 { 303 int status =0; 304 305 ACPI_FUNCTION_TRACE("acpi_ec_leave_burst_mode"); 306 307 - atomic_set(&ec->leaving_burst , 1); 308 status = acpi_ec_read_status(ec); 309 if (status != -EINVAL && 310 (status & ACPI_EC_FLAG_BURST)){ 311 - acpi_hw_low_level_write(8, ACPI_EC_BURST_DISABLE, &ec->command_addr); 312 status = acpi_ec_wait(ec, ACPI_EC_FLAG_IBF); 313 if (status){ 314 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 315 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,"------->wait fail\n")); 316 return_VALUE(-EINVAL); 317 } 318 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 319 status = acpi_ec_read_status(ec); 320 } 321 ··· 324 325 static int 326 acpi_ec_read ( 327 - struct acpi_ec *ec, 328 u8 address, 329 u32 *data) 330 { ··· 463 retry: 464 *data = 0; 465 466 - if (ec->global_lock) { 467 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 468 if (ACPI_FAILURE(status)) 469 return_VALUE(-ENODEV); 470 } 471 472 WARN_ON(in_interrupt()); 473 - down(&ec->sem); 474 475 if(acpi_ec_enter_burst_mode(ec)) 476 goto end; 477 478 - acpi_hw_low_level_write(8, ACPI_EC_COMMAND_READ, &ec->command_addr); 479 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 480 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 481 if (status) { 482 goto end; 483 } 484 485 - acpi_hw_low_level_write(8, address, &ec->data_addr); 486 status= acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 487 if (status){ 488 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 489 goto end; 490 } 491 492 - acpi_hw_low_level_read(8, data, &ec->data_addr); 493 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 494 495 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Read [%02x] from address [%02x]\n", 496 *data, address)); 497 498 end: 499 acpi_ec_leave_burst_mode(ec); 500 - up(&ec->sem); 501 502 - if (ec->global_lock) 503 acpi_release_global_lock(glk); 504 505 - if(atomic_read(&ec->leaving_burst) == 2){ 506 ACPI_DEBUG_PRINT((ACPI_DB_INFO,"aborted, retry ...\n")); 507 - while(atomic_read(&ec->pending_gpe)){ 508 msleep(1); 509 } 510 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 511 goto retry; 512 } 513 ··· 516 517 518 static int 519 - acpi_ec_write ( 520 - struct acpi_ec *ec, 521 u8 address, 522 u8 data) 523 { ··· 530 if (!ec) 531 return_VALUE(-EINVAL); 532 retry: 533 - if (ec->global_lock) { 534 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 535 if (ACPI_FAILURE(status)) 536 return_VALUE(-ENODEV); 537 } 538 539 WARN_ON(in_interrupt()); 540 - down(&ec->sem); 541 542 if(acpi_ec_enter_burst_mode(ec)) 543 goto end; ··· 545 status = acpi_ec_read_status(ec); 546 if (status != -EINVAL && 547 !(status & ACPI_EC_FLAG_BURST)){ 548 - acpi_hw_low_level_write(8, ACPI_EC_BURST_ENABLE, &ec->command_addr); 549 status = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 550 if (status) 551 goto end; 552 - acpi_hw_low_level_read(8, &tmp, &ec->data_addr); 553 if(tmp != 0x90 ) /* Burst ACK byte*/ 554 goto end; 555 } 556 /*Now we are in burst mode*/ 557 558 - acpi_hw_low_level_write(8, ACPI_EC_COMMAND_WRITE, &ec->command_addr); 559 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 560 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 561 if (status){ 562 goto end; 563 } 564 565 - acpi_hw_low_level_write(8, address, &ec->data_addr); 566 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 567 if (status){ 568 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 569 goto end; 570 } 571 572 - acpi_hw_low_level_write(8, data, &ec->data_addr); 573 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 574 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 575 if (status) 576 goto end; 577 ··· 580 581 end: 582 acpi_ec_leave_burst_mode(ec); 583 - up(&ec->sem); 584 585 - if (ec->global_lock) 586 acpi_release_global_lock(glk); 587 588 - if(atomic_read(&ec->leaving_burst) == 2){ 589 ACPI_DEBUG_PRINT((ACPI_DB_INFO,"aborted, retry ...\n")); 590 - while(atomic_read(&ec->pending_gpe)){ 591 msleep(1); 592 } 593 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 594 goto retry; 595 } 596 ··· 603 int 604 ec_read(u8 addr, u8 *val) 605 { 606 - struct acpi_ec *ec; 607 int err; 608 u32 temp_data; 609 ··· 626 int 627 ec_write(u8 addr, u8 val) 628 { 629 - struct acpi_ec *ec; 630 int err; 631 632 if (!first_ec) ··· 640 } 641 EXPORT_SYMBOL(ec_write); 642 643 - 644 static int 645 acpi_ec_query ( 646 - struct acpi_ec *ec, 647 u32 *data) 648 { 649 int status = 0; ··· 711 return_VALUE(-EINVAL); 712 *data = 0; 713 714 - if (ec->global_lock) { 715 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 716 if (ACPI_FAILURE(status)) 717 return_VALUE(-ENODEV); 718 } 719 720 - down(&ec->sem); 721 if(acpi_ec_enter_burst_mode(ec)) 722 goto end; 723 /* ··· 725 * Note that successful completion of the query causes the ACPI_EC_SCI 726 * bit to be cleared (and thus clearing the interrupt source). 727 */ 728 - acpi_hw_low_level_write(8, ACPI_EC_COMMAND_QUERY, &ec->command_addr); 729 status = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 730 if (status){ 731 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 732 goto end; 733 } 734 735 - acpi_hw_low_level_read(8, data, &ec->data_addr); 736 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 737 if (!*data) 738 status = -ENODATA; 739 740 end: 741 acpi_ec_leave_burst_mode(ec); 742 - up(&ec->sem); 743 744 - if (ec->global_lock) 745 acpi_release_global_lock(glk); 746 747 - if(atomic_read(&ec->leaving_burst) == 2){ 748 ACPI_DEBUG_PRINT((ACPI_DB_INFO,"aborted, retry ...\n")); 749 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 750 status = -ENODATA; 751 } 752 return_VALUE(status); ··· 757 Event Management 758 -------------------------------------------------------------------------- */ 759 760 - struct acpi_ec_query_data { 761 acpi_handle handle; 762 u8 data; 763 }; ··· 766 acpi_ec_gpe_query ( 767 void *ec_cxt) 768 { 769 - struct acpi_ec *ec = (struct acpi_ec *) ec_cxt; 770 u32 value; 771 int result = -ENODATA; 772 static char object_name[5] = {'_','Q','0','0','\0'}; ··· 838 839 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluating %s\n", object_name)); 840 841 - acpi_evaluate_object(ec->handle, object_name, NULL, NULL); 842 end: 843 - atomic_dec(&ec->pending_gpe); 844 return; 845 } 846 ··· 848 acpi_ec_gpe_handler ( 849 void *data) 850 { 851 acpi_status status = AE_OK; 852 - u32 value; 853 - struct acpi_ec *ec = (struct acpi_ec *) data; 854 855 if (!ec) 856 return ACPI_INTERRUPT_NOT_HANDLED; 857 858 - acpi_disable_gpe(NULL, ec->gpe_bit, ACPI_ISR); 859 860 value = acpi_ec_read_status(ec); 861 862 if((value & ACPI_EC_FLAG_IBF) && 863 !(value & ACPI_EC_FLAG_BURST) && 864 - (atomic_read(&ec->leaving_burst) == 0)) { 865 /* 866 * the embedded controller disables 867 * burst mode for any reason other 868 * than the burst disable command 869 * to process critical event. 870 */ 871 - atomic_set(&ec->leaving_burst , 2); /* block current pending transaction 872 and retry */ 873 - wake_up(&ec->wait); 874 }else { 875 - if ((ec->expect_event == ACPI_EC_EVENT_OBF && 876 (value & ACPI_EC_FLAG_OBF)) || 877 - (ec->expect_event == ACPI_EC_EVENT_IBE && 878 !(value & ACPI_EC_FLAG_IBF))) { 879 - ec->expect_event = 0; 880 - wake_up(&ec->wait); 881 return ACPI_INTERRUPT_HANDLED; 882 } 883 } 884 885 if (value & ACPI_EC_FLAG_SCI){ 886 - atomic_add(1, &ec->pending_gpe) ; 887 status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE, 888 acpi_ec_gpe_query, ec); 889 return status == AE_OK ? 890 ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED; 891 } 892 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_ISR); 893 return status == AE_OK ? 894 ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED; 895 } ··· 955 void *region_context) 956 { 957 int result = 0; 958 - struct acpi_ec *ec = NULL; 959 u64 temp = *value; 960 acpi_integer f_v = 0; 961 int i = 0; ··· 970 return_VALUE(AE_BAD_PARAMETER); 971 } 972 973 - ec = (struct acpi_ec *) handler_context; 974 975 next_byte: 976 switch (function) { ··· 1031 static int 1032 acpi_ec_read_info (struct seq_file *seq, void *offset) 1033 { 1034 - struct acpi_ec *ec = (struct acpi_ec *) seq->private; 1035 1036 ACPI_FUNCTION_TRACE("acpi_ec_read_info"); 1037 ··· 1039 goto end; 1040 1041 seq_printf(seq, "gpe bit: 0x%02x\n", 1042 - (u32) ec->gpe_bit); 1043 seq_printf(seq, "ports: 0x%02x, 0x%02x\n", 1044 - (u32) ec->status_addr.address, (u32) ec->data_addr.address); 1045 seq_printf(seq, "use global lock: %s\n", 1046 - ec->global_lock?"yes":"no"); 1047 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 1048 1049 end: 1050 return_VALUE(0); ··· 1067 acpi_ec_add_fs ( 1068 struct acpi_device *device) 1069 { 1070 - struct proc_dir_entry *entry; 1071 1072 ACPI_FUNCTION_TRACE("acpi_ec_add_fs"); 1073 ··· 1114 Driver Interface 1115 -------------------------------------------------------------------------- */ 1116 1117 static int 1118 - acpi_ec_add ( 1119 struct acpi_device *device) 1120 { 1121 - int result; 1122 - acpi_status status; 1123 - struct acpi_ec *ec; 1124 unsigned long uid; 1125 1126 ACPI_FUNCTION_TRACE("acpi_ec_add"); ··· 1129 if (!device) 1130 return_VALUE(-EINVAL); 1131 1132 - ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); 1133 if (!ec) 1134 return_VALUE(-ENOMEM); 1135 - memset(ec, 0, sizeof(struct acpi_ec)); 1136 1137 - ec->handle = device->handle; 1138 - ec->uid = -1; 1139 - atomic_set(&ec->pending_gpe, 0); 1140 - atomic_set(&ec->leaving_burst , 1); 1141 - init_MUTEX(&ec->sem); 1142 - init_waitqueue_head(&ec->wait); 1143 strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME); 1144 strcpy(acpi_device_class(device), ACPI_EC_CLASS); 1145 acpi_driver_data(device) = ec; 1146 1147 /* Use the global lock for all EC transactions? */ 1148 - acpi_evaluate_integer(ec->handle, "_GLK", NULL, &ec->global_lock); 1149 1150 /* If our UID matches the UID for the ECDT-enumerated EC, 1151 we now have the *real* EC info, so kill the makeshift one.*/ 1152 - acpi_evaluate_integer(ec->handle, "_UID", NULL, &uid); 1153 - if (ec_ecdt && ec_ecdt->uid == uid) { 1154 acpi_remove_address_space_handler(ACPI_ROOT_OBJECT, 1155 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler); 1156 - 1157 - acpi_remove_gpe_handler(NULL, ec_ecdt->gpe_bit, &acpi_ec_gpe_handler); 1158 1159 kfree(ec_ecdt); 1160 } 1161 1162 /* Get GPE bit assignment (EC events). */ 1163 /* TODO: Add support for _GPE returning a package */ 1164 - status = acpi_evaluate_integer(ec->handle, "_GPE", NULL, &ec->gpe_bit); 1165 if (ACPI_FAILURE(status)) { 1166 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 1167 "Error obtaining GPE bit assignment\n")); ··· 1172 1173 printk(KERN_INFO PREFIX "%s [%s] (gpe %d)\n", 1174 acpi_device_name(device), acpi_device_bid(device), 1175 - (u32) ec->gpe_bit); 1176 1177 if (!first_ec) 1178 first_ec = device; ··· 1261 struct acpi_device *device, 1262 int type) 1263 { 1264 - struct acpi_ec *ec; 1265 1266 ACPI_FUNCTION_TRACE("acpi_ec_remove"); 1267 ··· 1283 struct acpi_resource *resource, 1284 void *context) 1285 { 1286 - struct acpi_ec *ec = (struct acpi_ec *) context; 1287 struct acpi_generic_address *addr; 1288 1289 if (resource->id != ACPI_RSTYPE_IO) { ··· 1295 * the second address region returned is the status/command 1296 * port. 1297 */ 1298 - if (ec->data_addr.register_bit_width == 0) { 1299 - addr = &ec->data_addr; 1300 - } else if (ec->command_addr.register_bit_width == 0) { 1301 - addr = &ec->command_addr; 1302 } else { 1303 return AE_CTRL_TERMINATE; 1304 } ··· 1316 acpi_ec_start ( 1317 struct acpi_device *device) 1318 { 1319 - acpi_status status; 1320 - struct acpi_ec *ec; 1321 1322 ACPI_FUNCTION_TRACE("acpi_ec_start"); 1323 ··· 1332 /* 1333 * Get I/O port addresses. Convert to GAS format. 1334 */ 1335 - status = acpi_walk_resources(ec->handle, METHOD_NAME__CRS, 1336 acpi_ec_io_ports, ec); 1337 - if (ACPI_FAILURE(status) || ec->command_addr.register_bit_width == 0) { 1338 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error getting I/O port addresses")); 1339 return_VALUE(-ENODEV); 1340 } 1341 1342 - ec->status_addr = ec->command_addr; 1343 1344 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "gpe=0x%02x, ports=0x%2x,0x%2x\n", 1345 - (u32) ec->gpe_bit, (u32) ec->command_addr.address, 1346 - (u32) ec->data_addr.address)); 1347 1348 /* 1349 * Install GPE handler 1350 */ 1351 - status = acpi_install_gpe_handler(NULL, ec->gpe_bit, 1352 ACPI_GPE_EDGE_TRIGGERED, &acpi_ec_gpe_handler, ec); 1353 if (ACPI_FAILURE(status)) { 1354 return_VALUE(-ENODEV); 1355 } 1356 - acpi_set_gpe_type (NULL, ec->gpe_bit, ACPI_GPE_TYPE_RUNTIME); 1357 - acpi_enable_gpe (NULL, ec->gpe_bit, ACPI_NOT_ISR); 1358 1359 - status = acpi_install_address_space_handler (ec->handle, 1360 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler, 1361 &acpi_ec_space_setup, ec); 1362 if (ACPI_FAILURE(status)) { 1363 - acpi_remove_gpe_handler(NULL, ec->gpe_bit, &acpi_ec_gpe_handler); 1364 return_VALUE(-ENODEV); 1365 } 1366 ··· 1374 struct acpi_device *device, 1375 int type) 1376 { 1377 - acpi_status status; 1378 - struct acpi_ec *ec; 1379 1380 ACPI_FUNCTION_TRACE("acpi_ec_stop"); 1381 ··· 1384 1385 ec = acpi_driver_data(device); 1386 1387 - status = acpi_remove_address_space_handler(ec->handle, 1388 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler); 1389 if (ACPI_FAILURE(status)) 1390 return_VALUE(-ENODEV); 1391 1392 - status = acpi_remove_gpe_handler(NULL, ec->gpe_bit, &acpi_ec_gpe_handler); 1393 if (ACPI_FAILURE(status)) 1394 return_VALUE(-ENODEV); 1395 ··· 1403 void *context, 1404 void **retval) 1405 { 1406 acpi_status status; 1407 1408 status = acpi_walk_resources(handle, METHOD_NAME__CRS, 1409 acpi_ec_io_ports, ec_ecdt); 1410 if (ACPI_FAILURE(status)) 1411 return status; 1412 - ec_ecdt->status_addr = ec_ecdt->command_addr; 1413 1414 - ec_ecdt->uid = -1; 1415 - acpi_evaluate_integer(handle, "_UID", NULL, &ec_ecdt->uid); 1416 1417 - status = acpi_evaluate_integer(handle, "_GPE", NULL, &ec_ecdt->gpe_bit); 1418 if (ACPI_FAILURE(status)) 1419 return status; 1420 - ec_ecdt->global_lock = TRUE; 1421 - ec_ecdt->handle = handle; 1422 1423 printk(KERN_INFO PREFIX "GPE=0x%02x, ports=0x%2x, 0x%2x\n", 1424 - (u32) ec_ecdt->gpe_bit, (u32) ec_ecdt->command_addr.address, 1425 - (u32) ec_ecdt->data_addr.address); 1426 1427 return AE_CTRL_TERMINATE; 1428 } ··· 1495 1496 printk(KERN_INFO PREFIX "Try to make an fake ECDT\n"); 1497 1498 - ec_ecdt = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); 1499 if (!ec_ecdt) { 1500 ret = -ENOMEM; 1501 goto error; 1502 } 1503 - memset(ec_ecdt, 0, sizeof(struct acpi_ec)); 1504 1505 status = acpi_get_devices (ACPI_EC_HID, 1506 acpi_fake_ecdt_callback, ··· 1521 static int __init 1522 acpi_ec_get_real_ecdt(void) 1523 { 1524 acpi_status status; 1525 struct acpi_table_ecdt *ecdt_ptr; 1526 ··· 1588 /* 1589 * Generate a temporary ec context to use until the namespace is scanned 1590 */ 1591 - ec_ecdt = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); 1592 if (!ec_ecdt) 1593 return -ENOMEM; 1594 - memset(ec_ecdt, 0, sizeof(struct acpi_ec)); 1595 1596 - init_MUTEX(&ec_ecdt->sem); 1597 - init_waitqueue_head(&ec_ecdt->wait); 1598 - ec_ecdt->command_addr = ecdt_ptr->ec_control; 1599 - ec_ecdt->status_addr = ecdt_ptr->ec_control; 1600 - ec_ecdt->data_addr = ecdt_ptr->ec_data; 1601 - ec_ecdt->gpe_bit = ecdt_ptr->gpe_bit; 1602 /* use the GL just to be safe */ 1603 - ec_ecdt->global_lock = TRUE; 1604 - ec_ecdt->uid = ecdt_ptr->uid; 1605 1606 - status = acpi_get_handle(NULL, ecdt_ptr->ec_id, &ec_ecdt->handle); 1607 if (ACPI_FAILURE(status)) { 1608 goto error; 1609 } ··· 1636 /* 1637 * Install GPE handler 1638 */ 1639 - status = acpi_install_gpe_handler(NULL, ec_ecdt->gpe_bit, 1640 ACPI_GPE_EDGE_TRIGGERED, &acpi_ec_gpe_handler, 1641 ec_ecdt); 1642 if (ACPI_FAILURE(status)) { 1643 goto error; 1644 } 1645 - acpi_set_gpe_type (NULL, ec_ecdt->gpe_bit, ACPI_GPE_TYPE_RUNTIME); 1646 - acpi_enable_gpe (NULL, ec_ecdt->gpe_bit, ACPI_NOT_ISR); 1647 1648 status = acpi_install_address_space_handler (ACPI_ROOT_OBJECT, 1649 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler, 1650 &acpi_ec_space_setup, ec_ecdt); 1651 if (ACPI_FAILURE(status)) { 1652 - acpi_remove_gpe_handler(NULL, ec_ecdt->gpe_bit, 1653 &acpi_ec_gpe_handler); 1654 goto error; 1655 } ··· 1667 1668 static int __init acpi_ec_init (void) 1669 { 1670 - int result; 1671 1672 ACPI_FUNCTION_TRACE("acpi_ec_init"); 1673 ··· 1711 return 0; 1712 } 1713 __setup("acpi_fake_ecdt", acpi_fake_ecdt_setup);
··· 59 #define ACPI_EC_DELAY 50 /* Wait 50ms max. during EC ops */ 60 #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ 61 62 + #define ACPI_EC_UDELAY 100 /* Poll @ 100us increments */ 63 + #define ACPI_EC_UDELAY_COUNT 1000 /* Wait 10ms max. during EC ops */ 64 + 65 #define ACPI_EC_COMMAND_READ 0x80 66 #define ACPI_EC_COMMAND_WRITE 0x81 67 #define ACPI_EC_BURST_ENABLE 0x82 68 #define ACPI_EC_BURST_DISABLE 0x83 69 #define ACPI_EC_COMMAND_QUERY 0x84 70 71 + #define EC_POLLING 0xFF 72 + #define EC_BURST 0x00 73 + 74 + 75 static int acpi_ec_remove (struct acpi_device *device, int type); 76 static int acpi_ec_start (struct acpi_device *device); 77 static int acpi_ec_stop (struct acpi_device *device, int type); 78 + static int acpi_ec_burst_add ( struct acpi_device *device); 79 80 static struct acpi_driver acpi_ec_driver = { 81 .name = ACPI_EC_DRIVER_NAME, 82 .class = ACPI_EC_CLASS, 83 .ids = ACPI_EC_HID, 84 .ops = { 85 + .add = acpi_ec_burst_add, 86 .remove = acpi_ec_remove, 87 .start = acpi_ec_start, 88 .stop = acpi_ec_stop, 89 }, 90 }; 91 + union acpi_ec { 92 + struct { 93 + u32 mode; 94 + acpi_handle handle; 95 + unsigned long uid; 96 + unsigned long gpe_bit; 97 + struct acpi_generic_address status_addr; 98 + struct acpi_generic_address command_addr; 99 + struct acpi_generic_address data_addr; 100 + unsigned long global_lock; 101 + } common; 102 103 + struct { 104 + u32 mode; 105 + acpi_handle handle; 106 + unsigned long uid; 107 + unsigned long gpe_bit; 108 + struct acpi_generic_address status_addr; 109 + struct acpi_generic_address command_addr; 110 + struct acpi_generic_address data_addr; 111 + unsigned long global_lock; 112 + unsigned int expect_event; 113 + atomic_t leaving_burst; /* 0 : No, 1 : Yes, 2: abort*/ 114 + atomic_t pending_gpe; 115 + struct semaphore sem; 116 + wait_queue_head_t wait; 117 + }burst; 118 + 119 + struct { 120 + u32 mode; 121 + acpi_handle handle; 122 + unsigned long uid; 123 + unsigned long gpe_bit; 124 + struct acpi_generic_address status_addr; 125 + struct acpi_generic_address command_addr; 126 + struct acpi_generic_address data_addr; 127 + unsigned long global_lock; 128 + spinlock_t lock; 129 + }polling; 130 }; 131 132 + static int acpi_ec_polling_wait ( union acpi_ec *ec, u8 event); 133 + static int acpi_ec_burst_wait(union acpi_ec *ec, unsigned int event); 134 + static int acpi_ec_polling_read ( union acpi_ec *ec, u8 address, u32 *data); 135 + static int acpi_ec_burst_read( union acpi_ec *ec, u8 address, u32 *data); 136 + static int acpi_ec_polling_write ( union acpi_ec *ec, u8 address, u8 data); 137 + static int acpi_ec_burst_write ( union acpi_ec *ec, u8 address, u8 data); 138 + static int acpi_ec_polling_query ( union acpi_ec *ec, u32 *data); 139 + static int acpi_ec_burst_query ( union acpi_ec *ec, u32 *data); 140 + static void acpi_ec_gpe_polling_query ( void *ec_cxt); 141 + static void acpi_ec_gpe_burst_query ( void *ec_cxt); 142 + static u32 acpi_ec_gpe_polling_handler ( void *data); 143 + static u32 acpi_ec_gpe_burst_handler ( void *data); 144 + static acpi_status __init 145 + acpi_fake_ecdt_polling_callback ( 146 + acpi_handle handle, 147 + u32 Level, 148 + void *context, 149 + void **retval); 150 + 151 + static acpi_status __init 152 + acpi_fake_ecdt_burst_callback ( 153 + acpi_handle handle, 154 + u32 Level, 155 + void *context, 156 + void **retval); 157 + 158 + static int __init 159 + acpi_ec_polling_get_real_ecdt(void); 160 + static int __init 161 + acpi_ec_burst_get_real_ecdt(void); 162 /* If we find an EC via the ECDT, we need to keep a ptr to its context */ 163 + static union acpi_ec *ec_ecdt; 164 165 /* External interfaces use first EC only, so remember */ 166 static struct acpi_device *first_ec; 167 + static int acpi_ec_polling_mode; 168 169 /* -------------------------------------------------------------------------- 170 Transaction Management 171 -------------------------------------------------------------------------- */ 172 173 + static inline u32 acpi_ec_read_status(union acpi_ec *ec) 174 { 175 u32 status = 0; 176 177 + acpi_hw_low_level_read(8, &status, &ec->common.status_addr); 178 return status; 179 } 180 181 + static int 182 + acpi_ec_wait ( 183 + union acpi_ec *ec, 184 + u8 event) 185 + { 186 + if (acpi_ec_polling_mode) 187 + return acpi_ec_polling_wait (ec, event); 188 + else 189 + return acpi_ec_burst_wait (ec, event); 190 + } 191 + 192 + static int 193 + acpi_ec_polling_wait ( 194 + union acpi_ec *ec, 195 + u8 event) 196 + { 197 + u32 acpi_ec_status = 0; 198 + u32 i = ACPI_EC_UDELAY_COUNT; 199 + 200 + if (!ec) 201 + return -EINVAL; 202 + 203 + /* Poll the EC status register waiting for the event to occur. */ 204 + switch (event) { 205 + case ACPI_EC_EVENT_OBF: 206 + do { 207 + acpi_hw_low_level_read(8, &acpi_ec_status, &ec->common.status_addr); 208 + if (acpi_ec_status & ACPI_EC_FLAG_OBF) 209 + return 0; 210 + udelay(ACPI_EC_UDELAY); 211 + } while (--i>0); 212 + break; 213 + case ACPI_EC_EVENT_IBE: 214 + do { 215 + acpi_hw_low_level_read(8, &acpi_ec_status, &ec->common.status_addr); 216 + if (!(acpi_ec_status & ACPI_EC_FLAG_IBF)) 217 + return 0; 218 + udelay(ACPI_EC_UDELAY); 219 + } while (--i>0); 220 + break; 221 + default: 222 + return -EINVAL; 223 + } 224 + 225 + return -ETIME; 226 + } 227 + static int acpi_ec_burst_wait(union acpi_ec *ec, unsigned int event) 228 { 229 int result = 0; 230 231 ACPI_FUNCTION_TRACE("acpi_ec_wait"); 232 233 + ec->burst.expect_event = event; 234 smp_mb(); 235 236 + result = wait_event_interruptible_timeout(ec->burst.wait, 237 + !ec->burst.expect_event, 238 msecs_to_jiffies(ACPI_EC_DELAY)); 239 240 + ec->burst.expect_event = 0; 241 smp_mb(); 242 243 if (result < 0){ ··· 160 161 static int 162 acpi_ec_enter_burst_mode ( 163 + union acpi_ec *ec) 164 { 165 u32 tmp = 0; 166 int status = 0; ··· 170 status = acpi_ec_read_status(ec); 171 if (status != -EINVAL && 172 !(status & ACPI_EC_FLAG_BURST)){ 173 + acpi_hw_low_level_write(8, ACPI_EC_BURST_ENABLE, &ec->common.command_addr); 174 status = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 175 if (status){ 176 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 177 return_VALUE(-EINVAL); 178 } 179 + acpi_hw_low_level_read(8, &tmp, &ec->common.data_addr); 180 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 181 if(tmp != 0x90 ) {/* Burst ACK byte*/ 182 return_VALUE(-EINVAL); 183 } 184 } 185 186 + atomic_set(&ec->burst.leaving_burst , 0); 187 return_VALUE(0); 188 } 189 190 static int 191 acpi_ec_leave_burst_mode ( 192 + union acpi_ec *ec) 193 { 194 int status =0; 195 196 ACPI_FUNCTION_TRACE("acpi_ec_leave_burst_mode"); 197 198 + atomic_set(&ec->burst.leaving_burst , 1); 199 status = acpi_ec_read_status(ec); 200 if (status != -EINVAL && 201 (status & ACPI_EC_FLAG_BURST)){ 202 + acpi_hw_low_level_write(8, ACPI_EC_BURST_DISABLE, &ec->common.command_addr); 203 status = acpi_ec_wait(ec, ACPI_EC_FLAG_IBF); 204 if (status){ 205 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 206 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,"------->wait fail\n")); 207 return_VALUE(-EINVAL); 208 } 209 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 210 status = acpi_ec_read_status(ec); 211 } 212 ··· 215 216 static int 217 acpi_ec_read ( 218 + union acpi_ec *ec, 219 + u8 address, 220 + u32 *data) 221 + { 222 + if (acpi_ec_polling_mode) 223 + return acpi_ec_polling_read(ec, address, data); 224 + else 225 + return acpi_ec_burst_read(ec, address, data); 226 + } 227 + static int 228 + acpi_ec_write ( 229 + union acpi_ec *ec, 230 + u8 address, 231 + u8 data) 232 + { 233 + if (acpi_ec_polling_mode) 234 + return acpi_ec_polling_write(ec, address, data); 235 + else 236 + return acpi_ec_burst_write(ec, address, data); 237 + } 238 + static int 239 + acpi_ec_polling_read ( 240 + union acpi_ec *ec, 241 + u8 address, 242 + u32 *data) 243 + { 244 + acpi_status status = AE_OK; 245 + int result = 0; 246 + unsigned long flags = 0; 247 + u32 glk = 0; 248 + 249 + ACPI_FUNCTION_TRACE("acpi_ec_read"); 250 + 251 + if (!ec || !data) 252 + return_VALUE(-EINVAL); 253 + 254 + *data = 0; 255 + 256 + if (ec->common.global_lock) { 257 + status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 258 + if (ACPI_FAILURE(status)) 259 + return_VALUE(-ENODEV); 260 + } 261 + 262 + spin_lock_irqsave(&ec->polling.lock, flags); 263 + 264 + acpi_hw_low_level_write(8, ACPI_EC_COMMAND_READ, &ec->common.command_addr); 265 + result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 266 + if (result) 267 + goto end; 268 + 269 + acpi_hw_low_level_write(8, address, &ec->common.data_addr); 270 + result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 271 + if (result) 272 + goto end; 273 + 274 + acpi_hw_low_level_read(8, data, &ec->common.data_addr); 275 + 276 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Read [%02x] from address [%02x]\n", 277 + *data, address)); 278 + 279 + end: 280 + spin_unlock_irqrestore(&ec->polling.lock, flags); 281 + 282 + if (ec->common.global_lock) 283 + acpi_release_global_lock(glk); 284 + 285 + return_VALUE(result); 286 + } 287 + 288 + 289 + static int 290 + acpi_ec_polling_write ( 291 + union acpi_ec *ec, 292 + u8 address, 293 + u8 data) 294 + { 295 + int result = 0; 296 + acpi_status status = AE_OK; 297 + unsigned long flags = 0; 298 + u32 glk = 0; 299 + 300 + ACPI_FUNCTION_TRACE("acpi_ec_write"); 301 + 302 + if (!ec) 303 + return_VALUE(-EINVAL); 304 + 305 + if (ec->common.global_lock) { 306 + status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 307 + if (ACPI_FAILURE(status)) 308 + return_VALUE(-ENODEV); 309 + } 310 + 311 + spin_lock_irqsave(&ec->polling.lock, flags); 312 + 313 + acpi_hw_low_level_write(8, ACPI_EC_COMMAND_WRITE, &ec->common.command_addr); 314 + result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 315 + if (result) 316 + goto end; 317 + 318 + acpi_hw_low_level_write(8, address, &ec->common.data_addr); 319 + result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 320 + if (result) 321 + goto end; 322 + 323 + acpi_hw_low_level_write(8, data, &ec->common.data_addr); 324 + result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 325 + if (result) 326 + goto end; 327 + 328 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Wrote [%02x] to address [%02x]\n", 329 + data, address)); 330 + 331 + end: 332 + spin_unlock_irqrestore(&ec->polling.lock, flags); 333 + 334 + if (ec->common.global_lock) 335 + acpi_release_global_lock(glk); 336 + 337 + return_VALUE(result); 338 + } 339 + 340 + static int 341 + acpi_ec_burst_read ( 342 + union acpi_ec *ec, 343 u8 address, 344 u32 *data) 345 { ··· 230 retry: 231 *data = 0; 232 233 + if (ec->common.global_lock) { 234 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 235 if (ACPI_FAILURE(status)) 236 return_VALUE(-ENODEV); 237 } 238 239 WARN_ON(in_interrupt()); 240 + down(&ec->burst.sem); 241 242 if(acpi_ec_enter_burst_mode(ec)) 243 goto end; 244 245 + acpi_hw_low_level_write(8, ACPI_EC_COMMAND_READ, &ec->common.command_addr); 246 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 247 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 248 if (status) { 249 goto end; 250 } 251 252 + acpi_hw_low_level_write(8, address, &ec->common.data_addr); 253 status= acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 254 if (status){ 255 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 256 goto end; 257 } 258 259 + acpi_hw_low_level_read(8, data, &ec->common.data_addr); 260 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 261 262 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Read [%02x] from address [%02x]\n", 263 *data, address)); 264 265 end: 266 acpi_ec_leave_burst_mode(ec); 267 + up(&ec->burst.sem); 268 269 + if (ec->common.global_lock) 270 acpi_release_global_lock(glk); 271 272 + if(atomic_read(&ec->burst.leaving_burst) == 2){ 273 ACPI_DEBUG_PRINT((ACPI_DB_INFO,"aborted, retry ...\n")); 274 + while(atomic_read(&ec->burst.pending_gpe)){ 275 msleep(1); 276 } 277 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 278 goto retry; 279 } 280 ··· 283 284 285 static int 286 + acpi_ec_burst_write ( 287 + union acpi_ec *ec, 288 u8 address, 289 u8 data) 290 { ··· 297 if (!ec) 298 return_VALUE(-EINVAL); 299 retry: 300 + if (ec->common.global_lock) { 301 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 302 if (ACPI_FAILURE(status)) 303 return_VALUE(-ENODEV); 304 } 305 306 WARN_ON(in_interrupt()); 307 + down(&ec->burst.sem); 308 309 if(acpi_ec_enter_burst_mode(ec)) 310 goto end; ··· 312 status = acpi_ec_read_status(ec); 313 if (status != -EINVAL && 314 !(status & ACPI_EC_FLAG_BURST)){ 315 + acpi_hw_low_level_write(8, ACPI_EC_BURST_ENABLE, &ec->common.command_addr); 316 status = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 317 if (status) 318 goto end; 319 + acpi_hw_low_level_read(8, &tmp, &ec->common.data_addr); 320 if(tmp != 0x90 ) /* Burst ACK byte*/ 321 goto end; 322 } 323 /*Now we are in burst mode*/ 324 325 + acpi_hw_low_level_write(8, ACPI_EC_COMMAND_WRITE, &ec->common.command_addr); 326 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 327 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 328 if (status){ 329 goto end; 330 } 331 332 + acpi_hw_low_level_write(8, address, &ec->common.data_addr); 333 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 334 if (status){ 335 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 336 goto end; 337 } 338 339 + acpi_hw_low_level_write(8, data, &ec->common.data_addr); 340 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 341 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 342 if (status) 343 goto end; 344 ··· 347 348 end: 349 acpi_ec_leave_burst_mode(ec); 350 + up(&ec->burst.sem); 351 352 + if (ec->common.global_lock) 353 acpi_release_global_lock(glk); 354 355 + if(atomic_read(&ec->burst.leaving_burst) == 2){ 356 ACPI_DEBUG_PRINT((ACPI_DB_INFO,"aborted, retry ...\n")); 357 + while(atomic_read(&ec->burst.pending_gpe)){ 358 msleep(1); 359 } 360 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 361 goto retry; 362 } 363 ··· 370 int 371 ec_read(u8 addr, u8 *val) 372 { 373 + union acpi_ec *ec; 374 int err; 375 u32 temp_data; 376 ··· 393 int 394 ec_write(u8 addr, u8 val) 395 { 396 + union acpi_ec *ec; 397 int err; 398 399 if (!first_ec) ··· 407 } 408 EXPORT_SYMBOL(ec_write); 409 410 static int 411 acpi_ec_query ( 412 + union acpi_ec *ec, 413 + u32 *data) 414 + { 415 + if (acpi_ec_polling_mode) 416 + return acpi_ec_polling_query(ec, data); 417 + else 418 + return acpi_ec_burst_query(ec, data); 419 + } 420 + static int 421 + acpi_ec_polling_query ( 422 + union acpi_ec *ec, 423 + u32 *data) 424 + { 425 + int result = 0; 426 + acpi_status status = AE_OK; 427 + unsigned long flags = 0; 428 + u32 glk = 0; 429 + 430 + ACPI_FUNCTION_TRACE("acpi_ec_query"); 431 + 432 + if (!ec || !data) 433 + return_VALUE(-EINVAL); 434 + 435 + *data = 0; 436 + 437 + if (ec->common.global_lock) { 438 + status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 439 + if (ACPI_FAILURE(status)) 440 + return_VALUE(-ENODEV); 441 + } 442 + 443 + /* 444 + * Query the EC to find out which _Qxx method we need to evaluate. 445 + * Note that successful completion of the query causes the ACPI_EC_SCI 446 + * bit to be cleared (and thus clearing the interrupt source). 447 + */ 448 + spin_lock_irqsave(&ec->polling.lock, flags); 449 + 450 + acpi_hw_low_level_write(8, ACPI_EC_COMMAND_QUERY, &ec->common.command_addr); 451 + result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 452 + if (result) 453 + goto end; 454 + 455 + acpi_hw_low_level_read(8, data, &ec->common.data_addr); 456 + if (!*data) 457 + result = -ENODATA; 458 + 459 + end: 460 + spin_unlock_irqrestore(&ec->polling.lock, flags); 461 + 462 + if (ec->common.global_lock) 463 + acpi_release_global_lock(glk); 464 + 465 + return_VALUE(result); 466 + } 467 + static int 468 + acpi_ec_burst_query ( 469 + union acpi_ec *ec, 470 u32 *data) 471 { 472 int status = 0; ··· 422 return_VALUE(-EINVAL); 423 *data = 0; 424 425 + if (ec->common.global_lock) { 426 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 427 if (ACPI_FAILURE(status)) 428 return_VALUE(-ENODEV); 429 } 430 431 + down(&ec->burst.sem); 432 if(acpi_ec_enter_burst_mode(ec)) 433 goto end; 434 /* ··· 436 * Note that successful completion of the query causes the ACPI_EC_SCI 437 * bit to be cleared (and thus clearing the interrupt source). 438 */ 439 + acpi_hw_low_level_write(8, ACPI_EC_COMMAND_QUERY, &ec->common.command_addr); 440 status = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 441 if (status){ 442 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 443 goto end; 444 } 445 446 + acpi_hw_low_level_read(8, data, &ec->common.data_addr); 447 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 448 if (!*data) 449 status = -ENODATA; 450 451 end: 452 acpi_ec_leave_burst_mode(ec); 453 + up(&ec->burst.sem); 454 455 + if (ec->common.global_lock) 456 acpi_release_global_lock(glk); 457 458 + if(atomic_read(&ec->burst.leaving_burst) == 2){ 459 ACPI_DEBUG_PRINT((ACPI_DB_INFO,"aborted, retry ...\n")); 460 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 461 status = -ENODATA; 462 } 463 return_VALUE(status); ··· 468 Event Management 469 -------------------------------------------------------------------------- */ 470 471 + union acpi_ec_query_data { 472 acpi_handle handle; 473 u8 data; 474 }; ··· 477 acpi_ec_gpe_query ( 478 void *ec_cxt) 479 { 480 + if (acpi_ec_polling_mode) 481 + acpi_ec_gpe_polling_query(ec_cxt); 482 + else 483 + acpi_ec_gpe_burst_query(ec_cxt); 484 + } 485 + 486 + static void 487 + acpi_ec_gpe_polling_query ( 488 + void *ec_cxt) 489 + { 490 + union acpi_ec *ec = (union acpi_ec *) ec_cxt; 491 + u32 value = 0; 492 + unsigned long flags = 0; 493 + static char object_name[5] = {'_','Q','0','0','\0'}; 494 + const char hex[] = {'0','1','2','3','4','5','6','7', 495 + '8','9','A','B','C','D','E','F'}; 496 + 497 + ACPI_FUNCTION_TRACE("acpi_ec_gpe_query"); 498 + 499 + if (!ec_cxt) 500 + goto end; 501 + 502 + spin_lock_irqsave(&ec->polling.lock, flags); 503 + acpi_hw_low_level_read(8, &value, &ec->common.command_addr); 504 + spin_unlock_irqrestore(&ec->polling.lock, flags); 505 + 506 + /* TBD: Implement asynch events! 507 + * NOTE: All we care about are EC-SCI's. Other EC events are 508 + * handled via polling (yuck!). This is because some systems 509 + * treat EC-SCIs as level (versus EDGE!) triggered, preventing 510 + * a purely interrupt-driven approach (grumble, grumble). 511 + */ 512 + if (!(value & ACPI_EC_FLAG_SCI)) 513 + goto end; 514 + 515 + if (acpi_ec_query(ec, &value)) 516 + goto end; 517 + 518 + object_name[2] = hex[((value >> 4) & 0x0F)]; 519 + object_name[3] = hex[(value & 0x0F)]; 520 + 521 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluating %s\n", object_name)); 522 + 523 + acpi_evaluate_object(ec->common.handle, object_name, NULL, NULL); 524 + 525 + end: 526 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 527 + } 528 + static void 529 + acpi_ec_gpe_burst_query ( 530 + void *ec_cxt) 531 + { 532 + union acpi_ec *ec = (union acpi_ec *) ec_cxt; 533 u32 value; 534 int result = -ENODATA; 535 static char object_name[5] = {'_','Q','0','0','\0'}; ··· 497 498 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluating %s\n", object_name)); 499 500 + acpi_evaluate_object(ec->common.handle, object_name, NULL, NULL); 501 end: 502 + atomic_dec(&ec->burst.pending_gpe); 503 return; 504 } 505 ··· 507 acpi_ec_gpe_handler ( 508 void *data) 509 { 510 + if (acpi_ec_polling_mode) 511 + return acpi_ec_gpe_polling_handler(data); 512 + else 513 + return acpi_ec_gpe_burst_handler(data); 514 + } 515 + static u32 516 + acpi_ec_gpe_polling_handler ( 517 + void *data) 518 + { 519 acpi_status status = AE_OK; 520 + union acpi_ec *ec = (union acpi_ec *) data; 521 522 if (!ec) 523 return ACPI_INTERRUPT_NOT_HANDLED; 524 525 + acpi_disable_gpe(NULL, ec->common.gpe_bit, ACPI_ISR); 526 + 527 + status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE, 528 + acpi_ec_gpe_query, ec); 529 + 530 + if (status == AE_OK) 531 + return ACPI_INTERRUPT_HANDLED; 532 + else 533 + return ACPI_INTERRUPT_NOT_HANDLED; 534 + } 535 + static u32 536 + acpi_ec_gpe_burst_handler ( 537 + void *data) 538 + { 539 + acpi_status status = AE_OK; 540 + u32 value; 541 + union acpi_ec *ec = (union acpi_ec *) data; 542 + 543 + if (!ec) 544 + return ACPI_INTERRUPT_NOT_HANDLED; 545 + 546 + acpi_disable_gpe(NULL, ec->common.gpe_bit, ACPI_ISR); 547 548 value = acpi_ec_read_status(ec); 549 550 if((value & ACPI_EC_FLAG_IBF) && 551 !(value & ACPI_EC_FLAG_BURST) && 552 + (atomic_read(&ec->burst.leaving_burst) == 0)) { 553 /* 554 * the embedded controller disables 555 * burst mode for any reason other 556 * than the burst disable command 557 * to process critical event. 558 */ 559 + atomic_set(&ec->burst.leaving_burst , 2); /* block current pending transaction 560 and retry */ 561 + wake_up(&ec->burst.wait); 562 }else { 563 + if ((ec->burst.expect_event == ACPI_EC_EVENT_OBF && 564 (value & ACPI_EC_FLAG_OBF)) || 565 + (ec->burst.expect_event == ACPI_EC_EVENT_IBE && 566 !(value & ACPI_EC_FLAG_IBF))) { 567 + ec->burst.expect_event = 0; 568 + wake_up(&ec->burst.wait); 569 return ACPI_INTERRUPT_HANDLED; 570 } 571 } 572 573 if (value & ACPI_EC_FLAG_SCI){ 574 + atomic_add(1, &ec->burst.pending_gpe) ; 575 status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE, 576 acpi_ec_gpe_query, ec); 577 return status == AE_OK ? 578 ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED; 579 } 580 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_ISR); 581 return status == AE_OK ? 582 ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED; 583 } ··· 585 void *region_context) 586 { 587 int result = 0; 588 + union acpi_ec *ec = NULL; 589 u64 temp = *value; 590 acpi_integer f_v = 0; 591 int i = 0; ··· 600 return_VALUE(AE_BAD_PARAMETER); 601 } 602 603 + ec = (union acpi_ec *) handler_context; 604 605 next_byte: 606 switch (function) { ··· 661 static int 662 acpi_ec_read_info (struct seq_file *seq, void *offset) 663 { 664 + union acpi_ec *ec = (union acpi_ec *) seq->private; 665 666 ACPI_FUNCTION_TRACE("acpi_ec_read_info"); 667 ··· 669 goto end; 670 671 seq_printf(seq, "gpe bit: 0x%02x\n", 672 + (u32) ec->common.gpe_bit); 673 seq_printf(seq, "ports: 0x%02x, 0x%02x\n", 674 + (u32) ec->common.status_addr.address, (u32) ec->common.data_addr.address); 675 seq_printf(seq, "use global lock: %s\n", 676 + ec->common.global_lock?"yes":"no"); 677 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 678 679 end: 680 return_VALUE(0); ··· 697 acpi_ec_add_fs ( 698 struct acpi_device *device) 699 { 700 + struct proc_dir_entry *entry = NULL; 701 702 ACPI_FUNCTION_TRACE("acpi_ec_add_fs"); 703 ··· 744 Driver Interface 745 -------------------------------------------------------------------------- */ 746 747 + 748 static int 749 + acpi_ec_polling_add ( 750 struct acpi_device *device) 751 { 752 + int result = 0; 753 + acpi_status status = AE_OK; 754 + union acpi_ec *ec = NULL; 755 unsigned long uid; 756 757 ACPI_FUNCTION_TRACE("acpi_ec_add"); ··· 758 if (!device) 759 return_VALUE(-EINVAL); 760 761 + ec = kmalloc(sizeof(union acpi_ec), GFP_KERNEL); 762 if (!ec) 763 return_VALUE(-ENOMEM); 764 + memset(ec, 0, sizeof(union acpi_ec)); 765 766 + ec->common.handle = device->handle; 767 + ec->common.uid = -1; 768 + spin_lock_init(&ec->polling.lock); 769 strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME); 770 strcpy(acpi_device_class(device), ACPI_EC_CLASS); 771 acpi_driver_data(device) = ec; 772 773 /* Use the global lock for all EC transactions? */ 774 + acpi_evaluate_integer(ec->common.handle, "_GLK", NULL, &ec->common.global_lock); 775 776 /* If our UID matches the UID for the ECDT-enumerated EC, 777 we now have the *real* EC info, so kill the makeshift one.*/ 778 + acpi_evaluate_integer(ec->common.handle, "_UID", NULL, &uid); 779 + if (ec_ecdt && ec_ecdt->common.uid == uid) { 780 acpi_remove_address_space_handler(ACPI_ROOT_OBJECT, 781 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler); 782 + 783 + acpi_remove_gpe_handler(NULL, ec_ecdt->common.gpe_bit, &acpi_ec_gpe_handler); 784 785 kfree(ec_ecdt); 786 } 787 788 /* Get GPE bit assignment (EC events). */ 789 /* TODO: Add support for _GPE returning a package */ 790 + status = acpi_evaluate_integer(ec->common.handle, "_GPE", NULL, &ec->common.gpe_bit); 791 if (ACPI_FAILURE(status)) { 792 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 793 "Error obtaining GPE bit assignment\n")); ··· 804 805 printk(KERN_INFO PREFIX "%s [%s] (gpe %d)\n", 806 acpi_device_name(device), acpi_device_bid(device), 807 + (u32) ec->common.gpe_bit); 808 + 809 + if (!first_ec) 810 + first_ec = device; 811 + 812 + end: 813 + if (result) 814 + kfree(ec); 815 + 816 + return_VALUE(result); 817 + } 818 + static int 819 + acpi_ec_burst_add ( 820 + struct acpi_device *device) 821 + { 822 + int result = 0; 823 + acpi_status status = AE_OK; 824 + union acpi_ec *ec = NULL; 825 + unsigned long uid; 826 + 827 + ACPI_FUNCTION_TRACE("acpi_ec_add"); 828 + 829 + if (!device) 830 + return_VALUE(-EINVAL); 831 + 832 + ec = kmalloc(sizeof(union acpi_ec), GFP_KERNEL); 833 + if (!ec) 834 + return_VALUE(-ENOMEM); 835 + memset(ec, 0, sizeof(union acpi_ec)); 836 + 837 + ec->common.handle = device->handle; 838 + ec->common.uid = -1; 839 + atomic_set(&ec->burst.pending_gpe, 0); 840 + atomic_set(&ec->burst.leaving_burst , 1); 841 + init_MUTEX(&ec->burst.sem); 842 + init_waitqueue_head(&ec->burst.wait); 843 + strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME); 844 + strcpy(acpi_device_class(device), ACPI_EC_CLASS); 845 + acpi_driver_data(device) = ec; 846 + 847 + /* Use the global lock for all EC transactions? */ 848 + acpi_evaluate_integer(ec->common.handle, "_GLK", NULL, &ec->common.global_lock); 849 + 850 + /* If our UID matches the UID for the ECDT-enumerated EC, 851 + we now have the *real* EC info, so kill the makeshift one.*/ 852 + acpi_evaluate_integer(ec->common.handle, "_UID", NULL, &uid); 853 + if (ec_ecdt && ec_ecdt->common.uid == uid) { 854 + acpi_remove_address_space_handler(ACPI_ROOT_OBJECT, 855 + ACPI_ADR_SPACE_EC, &acpi_ec_space_handler); 856 + 857 + acpi_remove_gpe_handler(NULL, ec_ecdt->common.gpe_bit, &acpi_ec_gpe_handler); 858 + 859 + kfree(ec_ecdt); 860 + } 861 + 862 + /* Get GPE bit assignment (EC events). */ 863 + /* TODO: Add support for _GPE returning a package */ 864 + status = acpi_evaluate_integer(ec->common.handle, "_GPE", NULL, &ec->common.gpe_bit); 865 + if (ACPI_FAILURE(status)) { 866 + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 867 + "Error obtaining GPE bit assignment\n")); 868 + result = -ENODEV; 869 + goto end; 870 + } 871 + 872 + result = acpi_ec_add_fs(device); 873 + if (result) 874 + goto end; 875 + 876 + printk(KERN_INFO PREFIX "%s [%s] (gpe %d)\n", 877 + acpi_device_name(device), acpi_device_bid(device), 878 + (u32) ec->common.gpe_bit); 879 880 if (!first_ec) 881 first_ec = device; ··· 822 struct acpi_device *device, 823 int type) 824 { 825 + union acpi_ec *ec = NULL; 826 827 ACPI_FUNCTION_TRACE("acpi_ec_remove"); 828 ··· 844 struct acpi_resource *resource, 845 void *context) 846 { 847 + union acpi_ec *ec = (union acpi_ec *) context; 848 struct acpi_generic_address *addr; 849 850 if (resource->id != ACPI_RSTYPE_IO) { ··· 856 * the second address region returned is the status/command 857 * port. 858 */ 859 + if (ec->common.data_addr.register_bit_width == 0) { 860 + addr = &ec->common.data_addr; 861 + } else if (ec->common.command_addr.register_bit_width == 0) { 862 + addr = &ec->common.command_addr; 863 } else { 864 return AE_CTRL_TERMINATE; 865 } ··· 877 acpi_ec_start ( 878 struct acpi_device *device) 879 { 880 + acpi_status status = AE_OK; 881 + union acpi_ec *ec = NULL; 882 883 ACPI_FUNCTION_TRACE("acpi_ec_start"); 884 ··· 893 /* 894 * Get I/O port addresses. Convert to GAS format. 895 */ 896 + status = acpi_walk_resources(ec->common.handle, METHOD_NAME__CRS, 897 acpi_ec_io_ports, ec); 898 + if (ACPI_FAILURE(status) || ec->common.command_addr.register_bit_width == 0) { 899 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error getting I/O port addresses")); 900 return_VALUE(-ENODEV); 901 } 902 903 + ec->common.status_addr = ec->common.command_addr; 904 905 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "gpe=0x%02x, ports=0x%2x,0x%2x\n", 906 + (u32) ec->common.gpe_bit, (u32) ec->common.command_addr.address, 907 + (u32) ec->common.data_addr.address)); 908 + 909 910 /* 911 * Install GPE handler 912 */ 913 + status = acpi_install_gpe_handler(NULL, ec->common.gpe_bit, 914 ACPI_GPE_EDGE_TRIGGERED, &acpi_ec_gpe_handler, ec); 915 if (ACPI_FAILURE(status)) { 916 return_VALUE(-ENODEV); 917 } 918 + acpi_set_gpe_type (NULL, ec->common.gpe_bit, ACPI_GPE_TYPE_RUNTIME); 919 + acpi_enable_gpe (NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 920 921 + status = acpi_install_address_space_handler (ec->common.handle, 922 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler, 923 &acpi_ec_space_setup, ec); 924 if (ACPI_FAILURE(status)) { 925 + acpi_remove_gpe_handler(NULL, ec->common.gpe_bit, &acpi_ec_gpe_handler); 926 return_VALUE(-ENODEV); 927 } 928 ··· 934 struct acpi_device *device, 935 int type) 936 { 937 + acpi_status status = AE_OK; 938 + union acpi_ec *ec = NULL; 939 940 ACPI_FUNCTION_TRACE("acpi_ec_stop"); 941 ··· 944 945 ec = acpi_driver_data(device); 946 947 + status = acpi_remove_address_space_handler(ec->common.handle, 948 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler); 949 if (ACPI_FAILURE(status)) 950 return_VALUE(-ENODEV); 951 952 + status = acpi_remove_gpe_handler(NULL, ec->common.gpe_bit, &acpi_ec_gpe_handler); 953 if (ACPI_FAILURE(status)) 954 return_VALUE(-ENODEV); 955 ··· 963 void *context, 964 void **retval) 965 { 966 + 967 + if (acpi_ec_polling_mode) 968 + return acpi_fake_ecdt_polling_callback(handle, 969 + Level, context, retval); 970 + else 971 + return acpi_fake_ecdt_burst_callback(handle, 972 + Level, context, retval); 973 + } 974 + 975 + static acpi_status __init 976 + acpi_fake_ecdt_polling_callback ( 977 + acpi_handle handle, 978 + u32 Level, 979 + void *context, 980 + void **retval) 981 + { 982 acpi_status status; 983 984 status = acpi_walk_resources(handle, METHOD_NAME__CRS, 985 acpi_ec_io_ports, ec_ecdt); 986 if (ACPI_FAILURE(status)) 987 return status; 988 + ec_ecdt->common.status_addr = ec_ecdt->common.command_addr; 989 990 + ec_ecdt->common.uid = -1; 991 + acpi_evaluate_integer(handle, "_UID", NULL, &ec_ecdt->common.uid); 992 993 + status = acpi_evaluate_integer(handle, "_GPE", NULL, &ec_ecdt->common.gpe_bit); 994 if (ACPI_FAILURE(status)) 995 return status; 996 + spin_lock_init(&ec_ecdt->polling.lock); 997 + ec_ecdt->common.global_lock = TRUE; 998 + ec_ecdt->common.handle = handle; 999 1000 printk(KERN_INFO PREFIX "GPE=0x%02x, ports=0x%2x, 0x%2x\n", 1001 + (u32) ec_ecdt->common.gpe_bit, (u32) ec_ecdt->common.command_addr.address, 1002 + (u32) ec_ecdt->common.data_addr.address); 1003 + 1004 + return AE_CTRL_TERMINATE; 1005 + } 1006 + 1007 + static acpi_status __init 1008 + acpi_fake_ecdt_burst_callback ( 1009 + acpi_handle handle, 1010 + u32 Level, 1011 + void *context, 1012 + void **retval) 1013 + { 1014 + acpi_status status; 1015 + 1016 + init_MUTEX(&ec_ecdt->burst.sem); 1017 + init_waitqueue_head(&ec_ecdt->burst.wait); 1018 + status = acpi_walk_resources(handle, METHOD_NAME__CRS, 1019 + acpi_ec_io_ports, ec_ecdt); 1020 + if (ACPI_FAILURE(status)) 1021 + return status; 1022 + ec_ecdt->common.status_addr = ec_ecdt->common.command_addr; 1023 + 1024 + ec_ecdt->common.uid = -1; 1025 + acpi_evaluate_integer(handle, "_UID", NULL, &ec_ecdt->common.uid); 1026 + 1027 + status = acpi_evaluate_integer(handle, "_GPE", NULL, &ec_ecdt->common.gpe_bit); 1028 + if (ACPI_FAILURE(status)) 1029 + return status; 1030 + ec_ecdt->common.global_lock = TRUE; 1031 + ec_ecdt->common.handle = handle; 1032 + 1033 + printk(KERN_INFO PREFIX "GPE=0x%02x, ports=0x%2x, 0x%2x\n", 1034 + (u32) ec_ecdt->common.gpe_bit, (u32) ec_ecdt->common.command_addr.address, 1035 + (u32) ec_ecdt->common.data_addr.address); 1036 1037 return AE_CTRL_TERMINATE; 1038 } ··· 1005 1006 printk(KERN_INFO PREFIX "Try to make an fake ECDT\n"); 1007 1008 + ec_ecdt = kmalloc(sizeof(union acpi_ec), GFP_KERNEL); 1009 if (!ec_ecdt) { 1010 ret = -ENOMEM; 1011 goto error; 1012 } 1013 + memset(ec_ecdt, 0, sizeof(union acpi_ec)); 1014 1015 status = acpi_get_devices (ACPI_EC_HID, 1016 acpi_fake_ecdt_callback, ··· 1031 static int __init 1032 acpi_ec_get_real_ecdt(void) 1033 { 1034 + if (acpi_ec_polling_mode) 1035 + return acpi_ec_polling_get_real_ecdt(); 1036 + else 1037 + return acpi_ec_burst_get_real_ecdt(); 1038 + } 1039 + 1040 + static int __init 1041 + acpi_ec_polling_get_real_ecdt(void) 1042 + { 1043 + acpi_status status; 1044 + struct acpi_table_ecdt *ecdt_ptr; 1045 + 1046 + status = acpi_get_firmware_table("ECDT", 1, ACPI_LOGICAL_ADDRESSING, 1047 + (struct acpi_table_header **) &ecdt_ptr); 1048 + if (ACPI_FAILURE(status)) 1049 + return -ENODEV; 1050 + 1051 + printk(KERN_INFO PREFIX "Found ECDT\n"); 1052 + 1053 + /* 1054 + * Generate a temporary ec context to use until the namespace is scanned 1055 + */ 1056 + ec_ecdt = kmalloc(sizeof(union acpi_ec), GFP_KERNEL); 1057 + if (!ec_ecdt) 1058 + return -ENOMEM; 1059 + memset(ec_ecdt, 0, sizeof(union acpi_ec)); 1060 + 1061 + ec_ecdt->common.command_addr = ecdt_ptr->ec_control; 1062 + ec_ecdt->common.status_addr = ecdt_ptr->ec_control; 1063 + ec_ecdt->common.data_addr = ecdt_ptr->ec_data; 1064 + ec_ecdt->common.gpe_bit = ecdt_ptr->gpe_bit; 1065 + spin_lock_init(&ec_ecdt->polling.lock); 1066 + /* use the GL just to be safe */ 1067 + ec_ecdt->common.global_lock = TRUE; 1068 + ec_ecdt->common.uid = ecdt_ptr->uid; 1069 + 1070 + status = acpi_get_handle(NULL, ecdt_ptr->ec_id, &ec_ecdt->common.handle); 1071 + if (ACPI_FAILURE(status)) { 1072 + goto error; 1073 + } 1074 + 1075 + return 0; 1076 + error: 1077 + printk(KERN_ERR PREFIX "Could not use ECDT\n"); 1078 + kfree(ec_ecdt); 1079 + ec_ecdt = NULL; 1080 + 1081 + return -ENODEV; 1082 + } 1083 + 1084 + 1085 + static int __init 1086 + acpi_ec_burst_get_real_ecdt(void) 1087 + { 1088 acpi_status status; 1089 struct acpi_table_ecdt *ecdt_ptr; 1090 ··· 1044 /* 1045 * Generate a temporary ec context to use until the namespace is scanned 1046 */ 1047 + ec_ecdt = kmalloc(sizeof(union acpi_ec), GFP_KERNEL); 1048 if (!ec_ecdt) 1049 return -ENOMEM; 1050 + memset(ec_ecdt, 0, sizeof(union acpi_ec)); 1051 1052 + init_MUTEX(&ec_ecdt->burst.sem); 1053 + init_waitqueue_head(&ec_ecdt->burst.wait); 1054 + ec_ecdt->common.command_addr = ecdt_ptr->ec_control; 1055 + ec_ecdt->common.status_addr = ecdt_ptr->ec_control; 1056 + ec_ecdt->common.data_addr = ecdt_ptr->ec_data; 1057 + ec_ecdt->common.gpe_bit = ecdt_ptr->gpe_bit; 1058 /* use the GL just to be safe */ 1059 + ec_ecdt->common.global_lock = TRUE; 1060 + ec_ecdt->common.uid = ecdt_ptr->uid; 1061 1062 + status = acpi_get_handle(NULL, ecdt_ptr->ec_id, &ec_ecdt->common.handle); 1063 if (ACPI_FAILURE(status)) { 1064 goto error; 1065 } ··· 1092 /* 1093 * Install GPE handler 1094 */ 1095 + status = acpi_install_gpe_handler(NULL, ec_ecdt->common.gpe_bit, 1096 ACPI_GPE_EDGE_TRIGGERED, &acpi_ec_gpe_handler, 1097 ec_ecdt); 1098 if (ACPI_FAILURE(status)) { 1099 goto error; 1100 } 1101 + acpi_set_gpe_type (NULL, ec_ecdt->common.gpe_bit, ACPI_GPE_TYPE_RUNTIME); 1102 + acpi_enable_gpe (NULL, ec_ecdt->common.gpe_bit, ACPI_NOT_ISR); 1103 1104 status = acpi_install_address_space_handler (ACPI_ROOT_OBJECT, 1105 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler, 1106 &acpi_ec_space_setup, ec_ecdt); 1107 if (ACPI_FAILURE(status)) { 1108 + acpi_remove_gpe_handler(NULL, ec_ecdt->common.gpe_bit, 1109 &acpi_ec_gpe_handler); 1110 goto error; 1111 } ··· 1123 1124 static int __init acpi_ec_init (void) 1125 { 1126 + int result = 0; 1127 1128 ACPI_FUNCTION_TRACE("acpi_ec_init"); 1129 ··· 1167 return 0; 1168 } 1169 __setup("acpi_fake_ecdt", acpi_fake_ecdt_setup); 1170 + static int __init acpi_ec_set_polling_mode(char *str) 1171 + { 1172 + acpi_ec_polling_mode = EC_POLLING; 1173 + acpi_ec_driver.ops.add = acpi_ec_polling_add; 1174 + return 0; 1175 + } 1176 + __setup("ec_polling", acpi_ec_set_polling_mode);
+59 -26
drivers/acpi/pci_irq.c
··· 269 /* -------------------------------------------------------------------------- 270 PCI Interrupt Routing Support 271 -------------------------------------------------------------------------- */ 272 273 /* 274 * acpi_pci_irq_lookup 275 * success: return IRQ >= 0 ··· 326 int pin, 327 int *edge_level, 328 int *active_high_low, 329 - char **link) 330 { 331 struct acpi_prt_entry *entry = NULL; 332 int segment = pci_domain_nr(bus); 333 int bus_nr = bus->number; 334 - int irq; 335 336 ACPI_FUNCTION_TRACE("acpi_pci_irq_lookup"); 337 ··· 346 return_VALUE(-1); 347 } 348 349 - if (entry->link.handle) { 350 - irq = acpi_pci_link_get_irq(entry->link.handle, 351 - entry->link.index, edge_level, active_high_low, link); 352 - if (irq < 0) { 353 - ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Invalid IRQ link routing entry\n")); 354 - return_VALUE(-1); 355 - } 356 - } else { 357 - irq = entry->link.index; 358 - *edge_level = ACPI_LEVEL_SENSITIVE; 359 - *active_high_low = ACPI_ACTIVE_LOW; 360 - } 361 - 362 - ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found IRQ %d\n", irq)); 363 - 364 - return_VALUE(irq); 365 } 366 367 /* ··· 361 int pin, 362 int *edge_level, 363 int *active_high_low, 364 - char **link) 365 { 366 struct pci_dev *bridge = dev; 367 int irq = -1; ··· 395 } 396 397 irq = acpi_pci_irq_lookup(bridge->bus, PCI_SLOT(bridge->devfn), 398 - pin, edge_level, active_high_low, link); 399 } 400 401 if (irq < 0) { ··· 447 * values override any BIOS-assigned IRQs set during boot. 448 */ 449 irq = acpi_pci_irq_lookup(dev->bus, PCI_SLOT(dev->devfn), pin, 450 - &edge_level, &active_high_low, &link); 451 452 /* 453 * If no PRT entry was found, we'll try to derive an IRQ from the ··· 455 */ 456 if (irq < 0) 457 irq = acpi_pci_irq_derive(dev, pin, &edge_level, 458 - &active_high_low, &link); 459 460 /* 461 * No IRQ known to the ACPI subsystem - maybe the BIOS / ··· 494 EXPORT_SYMBOL(acpi_pci_irq_enable); 495 496 497 - #ifdef CONFIG_ACPI_DEALLOCATE_IRQ 498 void 499 acpi_pci_irq_disable ( 500 struct pci_dev *dev) ··· 523 * First we check the PCI IRQ routing table (PRT) for an IRQ. 524 */ 525 gsi = acpi_pci_irq_lookup(dev->bus, PCI_SLOT(dev->devfn), pin, 526 - &edge_level, &active_high_low, NULL); 527 /* 528 * If no PRT entry was found, we'll try to derive an IRQ from the 529 * device's parent bridge. 530 */ 531 if (gsi < 0) 532 gsi = acpi_pci_irq_derive(dev, pin, 533 - &edge_level, &active_high_low, NULL); 534 if (gsi < 0) 535 return_VOID; 536 ··· 546 547 return_VOID; 548 } 549 - #endif /* CONFIG_ACPI_DEALLOCATE_IRQ */
··· 269 /* -------------------------------------------------------------------------- 270 PCI Interrupt Routing Support 271 -------------------------------------------------------------------------- */ 272 + typedef int (*irq_lookup_func)(struct acpi_prt_entry *, int *, int *, char **); 273 274 + static int 275 + acpi_pci_allocate_irq(struct acpi_prt_entry *entry, 276 + int *edge_level, 277 + int *active_high_low, 278 + char **link) 279 + { 280 + int irq; 281 + 282 + ACPI_FUNCTION_TRACE("acpi_pci_allocate_irq"); 283 + 284 + if (entry->link.handle) { 285 + irq = acpi_pci_link_allocate_irq(entry->link.handle, 286 + entry->link.index, edge_level, active_high_low, link); 287 + if (irq < 0) { 288 + ACPI_DEBUG_PRINT((ACPI_DB_WARN, "Invalid IRQ link routing entry\n")); 289 + return_VALUE(-1); 290 + } 291 + } else { 292 + irq = entry->link.index; 293 + *edge_level = ACPI_LEVEL_SENSITIVE; 294 + *active_high_low = ACPI_ACTIVE_LOW; 295 + } 296 + 297 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found IRQ %d\n", irq)); 298 + return_VALUE(irq); 299 + } 300 + 301 + static int 302 + acpi_pci_free_irq(struct acpi_prt_entry *entry, 303 + int *edge_level, 304 + int *active_high_low, 305 + char **link) 306 + { 307 + int irq; 308 + 309 + ACPI_FUNCTION_TRACE("acpi_pci_free_irq"); 310 + if (entry->link.handle) { 311 + irq = acpi_pci_link_free_irq(entry->link.handle); 312 + } else { 313 + irq = entry->link.index; 314 + } 315 + return_VALUE(irq); 316 + } 317 /* 318 * acpi_pci_irq_lookup 319 * success: return IRQ >= 0 ··· 282 int pin, 283 int *edge_level, 284 int *active_high_low, 285 + char **link, 286 + irq_lookup_func func) 287 { 288 struct acpi_prt_entry *entry = NULL; 289 int segment = pci_domain_nr(bus); 290 int bus_nr = bus->number; 291 + int ret; 292 293 ACPI_FUNCTION_TRACE("acpi_pci_irq_lookup"); 294 ··· 301 return_VALUE(-1); 302 } 303 304 + ret = func(entry, edge_level, active_high_low, link); 305 + return_VALUE(ret); 306 } 307 308 /* ··· 330 int pin, 331 int *edge_level, 332 int *active_high_low, 333 + char **link, 334 + irq_lookup_func func) 335 { 336 struct pci_dev *bridge = dev; 337 int irq = -1; ··· 363 } 364 365 irq = acpi_pci_irq_lookup(bridge->bus, PCI_SLOT(bridge->devfn), 366 + pin, edge_level, active_high_low, link, func); 367 } 368 369 if (irq < 0) { ··· 415 * values override any BIOS-assigned IRQs set during boot. 416 */ 417 irq = acpi_pci_irq_lookup(dev->bus, PCI_SLOT(dev->devfn), pin, 418 + &edge_level, &active_high_low, &link, acpi_pci_allocate_irq); 419 420 /* 421 * If no PRT entry was found, we'll try to derive an IRQ from the ··· 423 */ 424 if (irq < 0) 425 irq = acpi_pci_irq_derive(dev, pin, &edge_level, 426 + &active_high_low, &link, acpi_pci_allocate_irq); 427 428 /* 429 * No IRQ known to the ACPI subsystem - maybe the BIOS / ··· 462 EXPORT_SYMBOL(acpi_pci_irq_enable); 463 464 465 + /* FIXME: implement x86/x86_64 version */ 466 + void __attribute__((weak)) acpi_unregister_gsi(u32 i) {} 467 + 468 void 469 acpi_pci_irq_disable ( 470 struct pci_dev *dev) ··· 489 * First we check the PCI IRQ routing table (PRT) for an IRQ. 490 */ 491 gsi = acpi_pci_irq_lookup(dev->bus, PCI_SLOT(dev->devfn), pin, 492 + &edge_level, &active_high_low, NULL, acpi_pci_free_irq); 493 /* 494 * If no PRT entry was found, we'll try to derive an IRQ from the 495 * device's parent bridge. 496 */ 497 if (gsi < 0) 498 gsi = acpi_pci_irq_derive(dev, pin, 499 + &edge_level, &active_high_low, NULL, acpi_pci_free_irq); 500 if (gsi < 0) 501 return_VOID; 502 ··· 512 513 return_VOID; 514 }
+87 -16
drivers/acpi/pci_link.c
··· 68 }, 69 }; 70 71 struct acpi_pci_link_irq { 72 u8 active; /* Current IRQ */ 73 u8 edge_level; /* All IRQs */ ··· 80 u8 possible_count; 81 u8 possible[ACPI_PCI_LINK_MAX_POSSIBLE]; 82 u8 initialized:1; 83 - u8 suspend_resume:1; 84 - u8 reserved:6; 85 }; 86 87 struct acpi_pci_link { ··· 88 struct acpi_device *device; 89 acpi_handle handle; 90 struct acpi_pci_link_irq irq; 91 }; 92 93 static struct { 94 int count; 95 struct list_head entries; 96 } acpi_link; 97 98 99 /* -------------------------------------------------------------------------- ··· 537 538 ACPI_FUNCTION_TRACE("acpi_pci_link_allocate"); 539 540 - if (link->irq.suspend_resume) { 541 - acpi_pci_link_set(link, link->irq.active); 542 - link->irq.suspend_resume = 0; 543 - } 544 - if (link->irq.initialized) 545 return_VALUE(0); 546 547 /* 548 * search for active IRQ in list of possible IRQs. ··· 601 } 602 603 /* 604 - * acpi_pci_link_get_irq 605 * success: return IRQ >= 0 606 * failure: return -1 607 */ 608 609 int 610 - acpi_pci_link_get_irq ( 611 acpi_handle handle, 612 int index, 613 int *edge_level, ··· 618 struct acpi_device *device = NULL; 619 struct acpi_pci_link *link = NULL; 620 621 - ACPI_FUNCTION_TRACE("acpi_pci_link_get_irq"); 622 623 result = acpi_bus_get_device(handle, &device); 624 if (result) { ··· 638 return_VALUE(-1); 639 } 640 641 - if (acpi_pci_link_allocate(link)) 642 return_VALUE(-1); 643 644 if (!link->irq.active) { 645 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Link active IRQ is 0!\n")); 646 return_VALUE(-1); 647 } 648 649 if (edge_level) *edge_level = link->irq.edge_level; 650 if (active_high_low) *active_high_low = link->irq.active_high_low; 651 if (name) *name = acpi_device_bid(link->device); 652 return_VALUE(link->irq.active); 653 } 654 655 656 /* -------------------------------------------------------------------------- 657 Driver Interface 658 -------------------------------------------------------------------------- */ ··· 731 strcpy(acpi_device_class(device), ACPI_PCI_LINK_CLASS); 732 acpi_driver_data(device) = link; 733 734 result = acpi_pci_link_get_possible(link); 735 if (result) 736 goto end; ··· 767 end: 768 /* disable all links -- to be activated on use */ 769 acpi_ut_evaluate_object(link->handle, "_DIS", 0, NULL); 770 771 if (result) 772 kfree(link); ··· 782 { 783 struct list_head *node = NULL; 784 struct acpi_pci_link *link = NULL; 785 786 ACPI_FUNCTION_TRACE("irqrouter_suspend"); 787 788 list_for_each(node, &acpi_link.entries) { 789 link = list_entry(node, struct acpi_pci_link, node); 790 if (!link) { 791 - ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid link context\n")); 792 continue; 793 } 794 - if (link->irq.active && link->irq.initialized) 795 - link->irq.suspend_resume = 1; 796 } 797 - return_VALUE(0); 798 } 799 800 ··· 825 826 link = (struct acpi_pci_link *) acpi_driver_data(device); 827 828 - /* TBD: Acquire/release lock */ 829 list_del(&link->node); 830 831 kfree(link); 832 ··· 919 __setup("acpi_irq_balance", acpi_irq_balance_set); 920 921 922 static struct sysdev_class irqrouter_sysdev_class = { 923 set_kset_name("irqrouter"), 924 .suspend = irqrouter_suspend,
··· 68 }, 69 }; 70 71 + /* 72 + * If a link is initialized, we never change its active and initialized 73 + * later even the link is disable. Instead, we just repick the active irq 74 + */ 75 struct acpi_pci_link_irq { 76 u8 active; /* Current IRQ */ 77 u8 edge_level; /* All IRQs */ ··· 76 u8 possible_count; 77 u8 possible[ACPI_PCI_LINK_MAX_POSSIBLE]; 78 u8 initialized:1; 79 + u8 reserved:7; 80 }; 81 82 struct acpi_pci_link { ··· 85 struct acpi_device *device; 86 acpi_handle handle; 87 struct acpi_pci_link_irq irq; 88 + int refcnt; 89 }; 90 91 static struct { 92 int count; 93 struct list_head entries; 94 } acpi_link; 95 + DECLARE_MUTEX(acpi_link_lock); 96 97 98 /* -------------------------------------------------------------------------- ··· 532 533 ACPI_FUNCTION_TRACE("acpi_pci_link_allocate"); 534 535 + if (link->irq.initialized) { 536 + if (link->refcnt == 0) 537 + /* This means the link is disabled but initialized */ 538 + acpi_pci_link_set(link, link->irq.active); 539 return_VALUE(0); 540 + } 541 542 /* 543 * search for active IRQ in list of possible IRQs. ··· 596 } 597 598 /* 599 + * acpi_pci_link_allocate_irq 600 * success: return IRQ >= 0 601 * failure: return -1 602 */ 603 604 int 605 + acpi_pci_link_allocate_irq ( 606 acpi_handle handle, 607 int index, 608 int *edge_level, ··· 613 struct acpi_device *device = NULL; 614 struct acpi_pci_link *link = NULL; 615 616 + ACPI_FUNCTION_TRACE("acpi_pci_link_allocate_irq"); 617 618 result = acpi_bus_get_device(handle, &device); 619 if (result) { ··· 633 return_VALUE(-1); 634 } 635 636 + down(&acpi_link_lock); 637 + if (acpi_pci_link_allocate(link)) { 638 + up(&acpi_link_lock); 639 return_VALUE(-1); 640 + } 641 642 if (!link->irq.active) { 643 + up(&acpi_link_lock); 644 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Link active IRQ is 0!\n")); 645 return_VALUE(-1); 646 } 647 + link->refcnt ++; 648 + up(&acpi_link_lock); 649 650 if (edge_level) *edge_level = link->irq.edge_level; 651 if (active_high_low) *active_high_low = link->irq.active_high_low; 652 if (name) *name = acpi_device_bid(link->device); 653 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, 654 + "Link %s is referenced\n", acpi_device_bid(link->device))); 655 return_VALUE(link->irq.active); 656 } 657 658 + /* 659 + * We don't change link's irq information here. After it is reenabled, we 660 + * continue use the info 661 + */ 662 + int 663 + acpi_pci_link_free_irq(acpi_handle handle) 664 + { 665 + struct acpi_device *device = NULL; 666 + struct acpi_pci_link *link = NULL; 667 + acpi_status result; 668 669 + ACPI_FUNCTION_TRACE("acpi_pci_link_free_irq"); 670 + 671 + result = acpi_bus_get_device(handle, &device); 672 + if (result) { 673 + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid link device\n")); 674 + return_VALUE(-1); 675 + } 676 + 677 + link = (struct acpi_pci_link *) acpi_driver_data(device); 678 + if (!link) { 679 + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid link context\n")); 680 + return_VALUE(-1); 681 + } 682 + 683 + down(&acpi_link_lock); 684 + if (!link->irq.initialized) { 685 + up(&acpi_link_lock); 686 + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Link isn't initialized\n")); 687 + return_VALUE(-1); 688 + } 689 + 690 + link->refcnt --; 691 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, 692 + "Link %s is dereferenced\n", acpi_device_bid(link->device))); 693 + 694 + if (link->refcnt == 0) { 695 + acpi_ut_evaluate_object(link->handle, "_DIS", 0, NULL); 696 + } 697 + up(&acpi_link_lock); 698 + return_VALUE(link->irq.active); 699 + } 700 /* -------------------------------------------------------------------------- 701 Driver Interface 702 -------------------------------------------------------------------------- */ ··· 677 strcpy(acpi_device_class(device), ACPI_PCI_LINK_CLASS); 678 acpi_driver_data(device) = link; 679 680 + down(&acpi_link_lock); 681 result = acpi_pci_link_get_possible(link); 682 if (result) 683 goto end; ··· 712 end: 713 /* disable all links -- to be activated on use */ 714 acpi_ut_evaluate_object(link->handle, "_DIS", 0, NULL); 715 + up(&acpi_link_lock); 716 717 if (result) 718 kfree(link); ··· 726 { 727 struct list_head *node = NULL; 728 struct acpi_pci_link *link = NULL; 729 + int ret = 0; 730 731 ACPI_FUNCTION_TRACE("irqrouter_suspend"); 732 733 list_for_each(node, &acpi_link.entries) { 734 link = list_entry(node, struct acpi_pci_link, node); 735 if (!link) { 736 + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 737 + "Invalid link context\n")); 738 continue; 739 } 740 + if (link->irq.initialized && link->refcnt != 0 741 + /* We ignore legacy IDE device irq */ 742 + && link->irq.active != 14 && link->irq.active !=15) { 743 + printk(KERN_WARNING PREFIX 744 + "%d drivers with interrupt %d neglected to call" 745 + " pci_disable_device at .suspend\n", 746 + link->refcnt, 747 + link->irq.active); 748 + printk(KERN_WARNING PREFIX 749 + "Fix the driver, or rmmod before suspend\n"); 750 + link->refcnt = 0; 751 + ret = -EINVAL; 752 + } 753 } 754 + return_VALUE(ret); 755 } 756 757 ··· 756 757 link = (struct acpi_pci_link *) acpi_driver_data(device); 758 759 + down(&acpi_link_lock); 760 list_del(&link->node); 761 + up(&acpi_link_lock); 762 763 kfree(link); 764 ··· 849 __setup("acpi_irq_balance", acpi_irq_balance_set); 850 851 852 + /* FIXME: we will remove this interface after all drivers call pci_disable_device */ 853 static struct sysdev_class irqrouter_sysdev_class = { 854 set_kset_name("irqrouter"), 855 .suspend = irqrouter_suspend,
+17 -14
drivers/acpi/processor_idle.c
··· 81 * 82 * To skip this limit, boot/load with a large max_cstate limit. 83 */ 84 - static int no_c2c3(struct dmi_system_id *id) 85 { 86 if (max_cstate > ACPI_PROCESSOR_MAX_POWER) 87 return 0; 88 89 - printk(KERN_NOTICE PREFIX "%s detected - C2,C3 disabled." 90 " Override with \"processor.max_cstate=%d\"\n", id->ident, 91 ACPI_PROCESSOR_MAX_POWER + 1); 92 93 - max_cstate = 1; 94 95 return 0; 96 } 97 98 99 - 100 - 101 static struct dmi_system_id __initdata processor_power_dmi_table[] = { 102 - { no_c2c3, "IBM ThinkPad R40e", { 103 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 104 - DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }}, 105 - { no_c2c3, "Medion 41700", { 106 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 107 - DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J") }}, 108 {}, 109 }; 110 ··· 552 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_default_c1"); 553 554 for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++) 555 - memset(pr->power.states, 0, sizeof(struct acpi_processor_cx)); 556 557 /* if info is obtained from pblk/fadt, type equals state */ 558 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; ··· 584 585 pr->power.count = 0; 586 for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++) 587 - memset(pr->power.states, 0, sizeof(struct acpi_processor_cx)); 588 589 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); 590 if (ACPI_FAILURE(status)) { ··· 768 } 769 770 if (pr->flags.bm_check) { 771 - printk("Disabling BM access before entering C3\n"); 772 /* bus mastering control is necessary */ 773 if (!pr->flags.bm_control) { 774 ACPI_DEBUG_PRINT((ACPI_DB_INFO, ··· 775 return_VOID; 776 } 777 } else { 778 - printk("Invalidating cache before entering C3\n"); 779 /* 780 * WBINVD should be set in fadt, for C3 state to be 781 * supported on when bm_check is not required. ··· 845 result = acpi_processor_get_power_info_cst(pr); 846 if ((result) || (acpi_processor_power_verify(pr) < 2)) { 847 result = acpi_processor_get_power_info_fadt(pr); 848 - if (result) 849 result = acpi_processor_get_power_info_default_c1(pr); 850 } 851
··· 81 * 82 * To skip this limit, boot/load with a large max_cstate limit. 83 */ 84 + static int set_max_cstate(struct dmi_system_id *id) 85 { 86 if (max_cstate > ACPI_PROCESSOR_MAX_POWER) 87 return 0; 88 89 + printk(KERN_NOTICE PREFIX "%s detected - %s disabled." 90 " Override with \"processor.max_cstate=%d\"\n", id->ident, 91 + ((int)id->driver_data == 1)? "C2,C3":"C3", 92 ACPI_PROCESSOR_MAX_POWER + 1); 93 94 + max_cstate = (int)id->driver_data; 95 96 return 0; 97 } 98 99 100 static struct dmi_system_id __initdata processor_power_dmi_table[] = { 101 + { set_max_cstate, "IBM ThinkPad R40e", { 102 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 103 + DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1}, 104 + { set_max_cstate, "Medion 41700", { 105 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 106 + DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J") }, (void*)1}, 107 + { set_max_cstate, "Clevo 5600D", { 108 + DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 109 + DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307") }, 110 + (void*)2}, 111 {}, 112 }; 113 ··· 549 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_default_c1"); 550 551 for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++) 552 + memset(&(pr->power.states[i]), 0, 553 + sizeof(struct acpi_processor_cx)); 554 555 /* if info is obtained from pblk/fadt, type equals state */ 556 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; ··· 580 581 pr->power.count = 0; 582 for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++) 583 + memset(&(pr->power.states[i]), 0, 584 + sizeof(struct acpi_processor_cx)); 585 586 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); 587 if (ACPI_FAILURE(status)) { ··· 763 } 764 765 if (pr->flags.bm_check) { 766 /* bus mastering control is necessary */ 767 if (!pr->flags.bm_control) { 768 ACPI_DEBUG_PRINT((ACPI_DB_INFO, ··· 771 return_VOID; 772 } 773 } else { 774 /* 775 * WBINVD should be set in fadt, for C3 state to be 776 * supported on when bm_check is not required. ··· 842 result = acpi_processor_get_power_info_cst(pr); 843 if ((result) || (acpi_processor_power_verify(pr) < 2)) { 844 result = acpi_processor_get_power_info_fadt(pr); 845 + if ((result) || (acpi_processor_power_verify(pr) < 2)) 846 result = acpi_processor_get_power_info_default_c1(pr); 847 } 848
+63
drivers/net/sk98lin/skge.c
··· 5133 kfree(pAC); 5134 } 5135 5136 static struct pci_device_id skge_pci_tbl[] = { 5137 { PCI_VENDOR_ID_3COM, 0x1700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5138 { PCI_VENDOR_ID_3COM, 0x80eb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, ··· 5219 .id_table = skge_pci_tbl, 5220 .probe = skge_probe_one, 5221 .remove = __devexit_p(skge_remove_one), 5222 }; 5223 5224 static int __init skge_init(void)
··· 5133 kfree(pAC); 5134 } 5135 5136 + #ifdef CONFIG_PM 5137 + static int skge_suspend(struct pci_dev *pdev, pm_message_t state) 5138 + { 5139 + struct net_device *dev = pci_get_drvdata(pdev); 5140 + DEV_NET *pNet = netdev_priv(dev); 5141 + SK_AC *pAC = pNet->pAC; 5142 + struct net_device *otherdev = pAC->dev[1]; 5143 + 5144 + if (pNet->Up) { 5145 + pAC->WasIfUp[0] = SK_TRUE; 5146 + DoPrintInterfaceChange = SK_FALSE; 5147 + SkDrvDeInitAdapter(pAC, 0); /* performs SkGeClose */ 5148 + } 5149 + if (otherdev != dev) { 5150 + pNet = netdev_priv(otherdev); 5151 + if (pNet->Up) { 5152 + pAC->WasIfUp[1] = SK_TRUE; 5153 + DoPrintInterfaceChange = SK_FALSE; 5154 + SkDrvDeInitAdapter(pAC, 1); /* performs SkGeClose */ 5155 + } 5156 + } 5157 + 5158 + pci_save_state(pdev); 5159 + pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); 5160 + if (pAC->AllocFlag & SK_ALLOC_IRQ) { 5161 + free_irq(dev->irq, dev); 5162 + } 5163 + pci_disable_device(pdev); 5164 + pci_set_power_state(pdev, pci_choose_state(pdev, state)); 5165 + 5166 + return 0; 5167 + } 5168 + 5169 + static int skge_resume(struct pci_dev *pdev) 5170 + { 5171 + struct net_device *dev = pci_get_drvdata(pdev); 5172 + DEV_NET *pNet = netdev_priv(dev); 5173 + SK_AC *pAC = pNet->pAC; 5174 + 5175 + pci_set_power_state(pdev, PCI_D0); 5176 + pci_restore_state(pdev); 5177 + pci_enable_device(pdev); 5178 + pci_set_master(pdev); 5179 + if (pAC->GIni.GIMacsFound == 2) 5180 + request_irq(dev->irq, SkGeIsr, SA_SHIRQ, pAC->Name, dev); 5181 + else 5182 + request_irq(dev->irq, SkGeIsrOnePort, SA_SHIRQ, pAC->Name, dev); 5183 + 5184 + if (pAC->WasIfUp[0] == SK_TRUE) { 5185 + DoPrintInterfaceChange = SK_FALSE; 5186 + SkDrvInitAdapter(pAC, 0); /* first device */ 5187 + } 5188 + if (pAC->dev[1] != dev && pAC->WasIfUp[1] == SK_TRUE) { 5189 + DoPrintInterfaceChange = SK_FALSE; 5190 + SkDrvInitAdapter(pAC, 1); /* first device */ 5191 + } 5192 + 5193 + return 0; 5194 + } 5195 + #endif 5196 + 5197 static struct pci_device_id skge_pci_tbl[] = { 5198 { PCI_VENDOR_ID_3COM, 0x1700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, 5199 { PCI_VENDOR_ID_3COM, 0x80eb, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, ··· 5158 .id_table = skge_pci_tbl, 5159 .probe = skge_probe_one, 5160 .remove = __devexit_p(skge_remove_one), 5161 + .suspend = skge_suspend, 5162 + .resume = skge_resume, 5163 }; 5164 5165 static int __init skge_init(void)
+2 -1
include/acpi/acpi_drivers.h
··· 56 /* ACPI PCI Interrupt Link (pci_link.c) */ 57 58 int acpi_irq_penalty_init (void); 59 - int acpi_pci_link_get_irq (acpi_handle handle, int index, int *edge_level, 60 int *active_high_low, char **name); 61 62 /* ACPI PCI Interrupt Routing (pci_irq.c) */ 63
··· 56 /* ACPI PCI Interrupt Link (pci_link.c) */ 57 58 int acpi_irq_penalty_init (void); 59 + int acpi_pci_link_allocate_irq (acpi_handle handle, int index, int *edge_level, 60 int *active_high_low, char **name); 61 + int acpi_pci_link_free_irq(acpi_handle handle); 62 63 /* ACPI PCI Interrupt Routing (pci_irq.c) */ 64
-4
include/linux/acpi.h
··· 453 * If this matches the last registration, any IRQ resources for gsi 454 * are freed. 455 */ 456 - #ifdef CONFIG_ACPI_DEALLOCATE_IRQ 457 void acpi_unregister_gsi (u32 gsi); 458 - #endif 459 460 #ifdef CONFIG_ACPI_PCI 461 ··· 478 int acpi_pci_irq_enable (struct pci_dev *dev); 479 void acpi_penalize_isa_irq(int irq, int active); 480 481 - #ifdef CONFIG_ACPI_DEALLOCATE_IRQ 482 void acpi_pci_irq_disable (struct pci_dev *dev); 483 - #endif 484 485 struct acpi_pci_driver { 486 struct acpi_pci_driver *next;
··· 453 * If this matches the last registration, any IRQ resources for gsi 454 * are freed. 455 */ 456 void acpi_unregister_gsi (u32 gsi); 457 458 #ifdef CONFIG_ACPI_PCI 459 ··· 480 int acpi_pci_irq_enable (struct pci_dev *dev); 481 void acpi_penalize_isa_irq(int irq, int active); 482 483 void acpi_pci_irq_disable (struct pci_dev *dev); 484 485 struct acpi_pci_driver { 486 struct acpi_pci_driver *next;
+6 -1
sound/pci/intel8x0.c
··· 2376 snd_ac97_suspend(chip->ac97[i]); 2377 if (chip->device_type == DEVICE_INTEL_ICH4) 2378 chip->sdm_saved = igetbyte(chip, ICHREG(SDM)); 2379 pci_disable_device(chip->pci); 2380 return 0; 2381 } ··· 2390 2391 pci_enable_device(chip->pci); 2392 pci_set_master(chip->pci); 2393 - snd_intel8x0_chip_init(chip, 0); 2394 2395 /* re-initialize mixer stuff */ 2396 if (chip->device_type == DEVICE_INTEL_ICH4) {
··· 2376 snd_ac97_suspend(chip->ac97[i]); 2377 if (chip->device_type == DEVICE_INTEL_ICH4) 2378 chip->sdm_saved = igetbyte(chip, ICHREG(SDM)); 2379 + 2380 + if (chip->irq >= 0) 2381 + free_irq(chip->irq, (void *)chip); 2382 pci_disable_device(chip->pci); 2383 return 0; 2384 } ··· 2387 2388 pci_enable_device(chip->pci); 2389 pci_set_master(chip->pci); 2390 + request_irq(chip->irq, snd_intel8x0_interrupt, SA_INTERRUPT|SA_SHIRQ, card->shortname, (void *)chip); 2391 + synchronize_irq(chip->irq); 2392 + snd_intel8x0_chip_init(chip, 1); 2393 2394 /* re-initialize mixer stuff */ 2395 if (chip->device_type == DEVICE_INTEL_ICH4) {