[ACPI] Add "ec_polling" boot option

EC burst mode benefits many machines, some of
them significantly. However, our current
implementation fails on some machines such
as Rafael's Asus L5D.

This patch restores the alternative EC polling code,
which can be enabled at boot time via "ec_polling"

http://bugzilla.kernel.org/show_bug.cgi?id=4665

Signed-off-by: Luming Yu <luming.yu@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>

authored by Luming Yu and committed by Len Brown 45bea155 335f16be

+721 -170
+721 -170
drivers/acpi/ec.c
··· 59 #define ACPI_EC_DELAY 50 /* Wait 50ms max. during EC ops */ 60 #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ 61 62 #define ACPI_EC_COMMAND_READ 0x80 63 #define ACPI_EC_COMMAND_WRITE 0x81 64 #define ACPI_EC_BURST_ENABLE 0x82 65 #define ACPI_EC_BURST_DISABLE 0x83 66 #define ACPI_EC_COMMAND_QUERY 0x84 67 68 - static int acpi_ec_add (struct acpi_device *device); 69 static int acpi_ec_remove (struct acpi_device *device, int type); 70 static int acpi_ec_start (struct acpi_device *device); 71 static int acpi_ec_stop (struct acpi_device *device, int type); 72 73 static struct acpi_driver acpi_ec_driver = { 74 .name = ACPI_EC_DRIVER_NAME, 75 .class = ACPI_EC_CLASS, 76 .ids = ACPI_EC_HID, 77 .ops = { 78 - .add = acpi_ec_add, 79 .remove = acpi_ec_remove, 80 .start = acpi_ec_start, 81 .stop = acpi_ec_stop, 82 }, 83 }; 84 85 - struct acpi_ec { 86 - acpi_handle handle; 87 - unsigned long uid; 88 - unsigned long gpe_bit; 89 - struct acpi_generic_address status_addr; 90 - struct acpi_generic_address command_addr; 91 - struct acpi_generic_address data_addr; 92 - unsigned long global_lock; 93 - unsigned int expect_event; 94 - atomic_t leaving_burst; /* 0 : No, 1 : Yes, 2: abort*/ 95 - atomic_t pending_gpe; 96 - struct semaphore sem; 97 - wait_queue_head_t wait; 98 }; 99 100 /* If we find an EC via the ECDT, we need to keep a ptr to its context */ 101 - static struct acpi_ec *ec_ecdt; 102 103 /* External interfaces use first EC only, so remember */ 104 static struct acpi_device *first_ec; 105 106 /* -------------------------------------------------------------------------- 107 Transaction Management 108 -------------------------------------------------------------------------- */ 109 110 - static inline u32 acpi_ec_read_status(struct acpi_ec *ec) 111 { 112 u32 status = 0; 113 114 - acpi_hw_low_level_read(8, &status, &ec->status_addr); 115 return status; 116 } 117 118 - static int acpi_ec_wait(struct acpi_ec *ec, unsigned int event) 119 { 120 int result = 0; 121 122 ACPI_FUNCTION_TRACE("acpi_ec_wait"); 123 124 - ec->expect_event = event; 125 smp_mb(); 126 127 - result = wait_event_interruptible_timeout(ec->wait, 128 - !ec->expect_event, 129 msecs_to_jiffies(ACPI_EC_DELAY)); 130 131 - ec->expect_event = 0; 132 smp_mb(); 133 134 if (result < 0){ ··· 269 270 static int 271 acpi_ec_enter_burst_mode ( 272 - struct acpi_ec *ec) 273 { 274 u32 tmp = 0; 275 int status = 0; ··· 279 status = acpi_ec_read_status(ec); 280 if (status != -EINVAL && 281 !(status & ACPI_EC_FLAG_BURST)){ 282 - acpi_hw_low_level_write(8, ACPI_EC_BURST_ENABLE, &ec->command_addr); 283 status = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 284 if (status){ 285 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 286 return_VALUE(-EINVAL); 287 } 288 - acpi_hw_low_level_read(8, &tmp, &ec->data_addr); 289 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 290 if(tmp != 0x90 ) {/* Burst ACK byte*/ 291 return_VALUE(-EINVAL); 292 } 293 } 294 295 - atomic_set(&ec->leaving_burst , 0); 296 return_VALUE(0); 297 } 298 299 static int 300 acpi_ec_leave_burst_mode ( 301 - struct acpi_ec *ec) 302 { 303 int status =0; 304 305 ACPI_FUNCTION_TRACE("acpi_ec_leave_burst_mode"); 306 307 - atomic_set(&ec->leaving_burst , 1); 308 status = acpi_ec_read_status(ec); 309 if (status != -EINVAL && 310 (status & ACPI_EC_FLAG_BURST)){ 311 - acpi_hw_low_level_write(8, ACPI_EC_BURST_DISABLE, &ec->command_addr); 312 status = acpi_ec_wait(ec, ACPI_EC_FLAG_IBF); 313 if (status){ 314 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 315 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,"------->wait fail\n")); 316 return_VALUE(-EINVAL); 317 } 318 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 319 status = acpi_ec_read_status(ec); 320 } 321 ··· 324 325 static int 326 acpi_ec_read ( 327 - struct acpi_ec *ec, 328 u8 address, 329 u32 *data) 330 { ··· 463 retry: 464 *data = 0; 465 466 - if (ec->global_lock) { 467 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 468 if (ACPI_FAILURE(status)) 469 return_VALUE(-ENODEV); 470 } 471 472 WARN_ON(in_interrupt()); 473 - down(&ec->sem); 474 475 if(acpi_ec_enter_burst_mode(ec)) 476 goto end; 477 478 - acpi_hw_low_level_write(8, ACPI_EC_COMMAND_READ, &ec->command_addr); 479 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 480 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 481 if (status) { 482 goto end; 483 } 484 485 - acpi_hw_low_level_write(8, address, &ec->data_addr); 486 status= acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 487 if (status){ 488 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 489 goto end; 490 } 491 492 - acpi_hw_low_level_read(8, data, &ec->data_addr); 493 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 494 495 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Read [%02x] from address [%02x]\n", 496 *data, address)); 497 498 end: 499 acpi_ec_leave_burst_mode(ec); 500 - up(&ec->sem); 501 502 - if (ec->global_lock) 503 acpi_release_global_lock(glk); 504 505 - if(atomic_read(&ec->leaving_burst) == 2){ 506 ACPI_DEBUG_PRINT((ACPI_DB_INFO,"aborted, retry ...\n")); 507 - while(atomic_read(&ec->pending_gpe)){ 508 msleep(1); 509 } 510 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 511 goto retry; 512 } 513 ··· 516 517 518 static int 519 - acpi_ec_write ( 520 - struct acpi_ec *ec, 521 u8 address, 522 u8 data) 523 { ··· 530 if (!ec) 531 return_VALUE(-EINVAL); 532 retry: 533 - if (ec->global_lock) { 534 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 535 if (ACPI_FAILURE(status)) 536 return_VALUE(-ENODEV); 537 } 538 539 WARN_ON(in_interrupt()); 540 - down(&ec->sem); 541 542 if(acpi_ec_enter_burst_mode(ec)) 543 goto end; ··· 545 status = acpi_ec_read_status(ec); 546 if (status != -EINVAL && 547 !(status & ACPI_EC_FLAG_BURST)){ 548 - acpi_hw_low_level_write(8, ACPI_EC_BURST_ENABLE, &ec->command_addr); 549 status = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 550 if (status) 551 goto end; 552 - acpi_hw_low_level_read(8, &tmp, &ec->data_addr); 553 if(tmp != 0x90 ) /* Burst ACK byte*/ 554 goto end; 555 } 556 /*Now we are in burst mode*/ 557 558 - acpi_hw_low_level_write(8, ACPI_EC_COMMAND_WRITE, &ec->command_addr); 559 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 560 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 561 if (status){ 562 goto end; 563 } 564 565 - acpi_hw_low_level_write(8, address, &ec->data_addr); 566 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 567 if (status){ 568 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 569 goto end; 570 } 571 572 - acpi_hw_low_level_write(8, data, &ec->data_addr); 573 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 574 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 575 if (status) 576 goto end; 577 ··· 580 581 end: 582 acpi_ec_leave_burst_mode(ec); 583 - up(&ec->sem); 584 585 - if (ec->global_lock) 586 acpi_release_global_lock(glk); 587 588 - if(atomic_read(&ec->leaving_burst) == 2){ 589 ACPI_DEBUG_PRINT((ACPI_DB_INFO,"aborted, retry ...\n")); 590 - while(atomic_read(&ec->pending_gpe)){ 591 msleep(1); 592 } 593 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 594 goto retry; 595 } 596 ··· 603 int 604 ec_read(u8 addr, u8 *val) 605 { 606 - struct acpi_ec *ec; 607 int err; 608 u32 temp_data; 609 ··· 626 int 627 ec_write(u8 addr, u8 val) 628 { 629 - struct acpi_ec *ec; 630 int err; 631 632 if (!first_ec) ··· 640 } 641 EXPORT_SYMBOL(ec_write); 642 643 - 644 static int 645 acpi_ec_query ( 646 - struct acpi_ec *ec, 647 u32 *data) 648 { 649 int status = 0; ··· 711 return_VALUE(-EINVAL); 712 *data = 0; 713 714 - if (ec->global_lock) { 715 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 716 if (ACPI_FAILURE(status)) 717 return_VALUE(-ENODEV); 718 } 719 720 - down(&ec->sem); 721 if(acpi_ec_enter_burst_mode(ec)) 722 goto end; 723 /* ··· 725 * Note that successful completion of the query causes the ACPI_EC_SCI 726 * bit to be cleared (and thus clearing the interrupt source). 727 */ 728 - acpi_hw_low_level_write(8, ACPI_EC_COMMAND_QUERY, &ec->command_addr); 729 status = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 730 if (status){ 731 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 732 goto end; 733 } 734 735 - acpi_hw_low_level_read(8, data, &ec->data_addr); 736 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 737 if (!*data) 738 status = -ENODATA; 739 740 end: 741 acpi_ec_leave_burst_mode(ec); 742 - up(&ec->sem); 743 744 - if (ec->global_lock) 745 acpi_release_global_lock(glk); 746 747 - if(atomic_read(&ec->leaving_burst) == 2){ 748 ACPI_DEBUG_PRINT((ACPI_DB_INFO,"aborted, retry ...\n")); 749 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 750 status = -ENODATA; 751 } 752 return_VALUE(status); ··· 757 Event Management 758 -------------------------------------------------------------------------- */ 759 760 - struct acpi_ec_query_data { 761 acpi_handle handle; 762 u8 data; 763 }; ··· 766 acpi_ec_gpe_query ( 767 void *ec_cxt) 768 { 769 - struct acpi_ec *ec = (struct acpi_ec *) ec_cxt; 770 u32 value; 771 int result = -ENODATA; 772 static char object_name[5] = {'_','Q','0','0','\0'}; ··· 838 839 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluating %s\n", object_name)); 840 841 - acpi_evaluate_object(ec->handle, object_name, NULL, NULL); 842 end: 843 - atomic_dec(&ec->pending_gpe); 844 return; 845 } 846 ··· 848 acpi_ec_gpe_handler ( 849 void *data) 850 { 851 acpi_status status = AE_OK; 852 - u32 value; 853 - struct acpi_ec *ec = (struct acpi_ec *) data; 854 855 if (!ec) 856 return ACPI_INTERRUPT_NOT_HANDLED; 857 858 - acpi_disable_gpe(NULL, ec->gpe_bit, ACPI_ISR); 859 860 value = acpi_ec_read_status(ec); 861 862 if((value & ACPI_EC_FLAG_IBF) && 863 !(value & ACPI_EC_FLAG_BURST) && 864 - (atomic_read(&ec->leaving_burst) == 0)) { 865 /* 866 * the embedded controller disables 867 * burst mode for any reason other 868 * than the burst disable command 869 * to process critical event. 870 */ 871 - atomic_set(&ec->leaving_burst , 2); /* block current pending transaction 872 and retry */ 873 - wake_up(&ec->wait); 874 }else { 875 - if ((ec->expect_event == ACPI_EC_EVENT_OBF && 876 (value & ACPI_EC_FLAG_OBF)) || 877 - (ec->expect_event == ACPI_EC_EVENT_IBE && 878 !(value & ACPI_EC_FLAG_IBF))) { 879 - ec->expect_event = 0; 880 - wake_up(&ec->wait); 881 return ACPI_INTERRUPT_HANDLED; 882 } 883 } 884 885 if (value & ACPI_EC_FLAG_SCI){ 886 - atomic_add(1, &ec->pending_gpe) ; 887 status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE, 888 acpi_ec_gpe_query, ec); 889 return status == AE_OK ? 890 ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED; 891 } 892 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_ISR); 893 return status == AE_OK ? 894 ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED; 895 } ··· 955 void *region_context) 956 { 957 int result = 0; 958 - struct acpi_ec *ec = NULL; 959 u64 temp = *value; 960 acpi_integer f_v = 0; 961 int i = 0; ··· 970 return_VALUE(AE_BAD_PARAMETER); 971 } 972 973 - ec = (struct acpi_ec *) handler_context; 974 975 next_byte: 976 switch (function) { ··· 1031 static int 1032 acpi_ec_read_info (struct seq_file *seq, void *offset) 1033 { 1034 - struct acpi_ec *ec = (struct acpi_ec *) seq->private; 1035 1036 ACPI_FUNCTION_TRACE("acpi_ec_read_info"); 1037 ··· 1039 goto end; 1040 1041 seq_printf(seq, "gpe bit: 0x%02x\n", 1042 - (u32) ec->gpe_bit); 1043 seq_printf(seq, "ports: 0x%02x, 0x%02x\n", 1044 - (u32) ec->status_addr.address, (u32) ec->data_addr.address); 1045 seq_printf(seq, "use global lock: %s\n", 1046 - ec->global_lock?"yes":"no"); 1047 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 1048 1049 end: 1050 return_VALUE(0); ··· 1067 acpi_ec_add_fs ( 1068 struct acpi_device *device) 1069 { 1070 - struct proc_dir_entry *entry; 1071 1072 ACPI_FUNCTION_TRACE("acpi_ec_add_fs"); 1073 ··· 1114 Driver Interface 1115 -------------------------------------------------------------------------- */ 1116 1117 static int 1118 - acpi_ec_add ( 1119 struct acpi_device *device) 1120 { 1121 - int result; 1122 - acpi_status status; 1123 - struct acpi_ec *ec; 1124 unsigned long uid; 1125 1126 ACPI_FUNCTION_TRACE("acpi_ec_add"); ··· 1129 if (!device) 1130 return_VALUE(-EINVAL); 1131 1132 - ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); 1133 if (!ec) 1134 return_VALUE(-ENOMEM); 1135 - memset(ec, 0, sizeof(struct acpi_ec)); 1136 1137 - ec->handle = device->handle; 1138 - ec->uid = -1; 1139 - atomic_set(&ec->pending_gpe, 0); 1140 - atomic_set(&ec->leaving_burst , 1); 1141 - init_MUTEX(&ec->sem); 1142 - init_waitqueue_head(&ec->wait); 1143 strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME); 1144 strcpy(acpi_device_class(device), ACPI_EC_CLASS); 1145 acpi_driver_data(device) = ec; 1146 1147 /* Use the global lock for all EC transactions? */ 1148 - acpi_evaluate_integer(ec->handle, "_GLK", NULL, &ec->global_lock); 1149 1150 /* If our UID matches the UID for the ECDT-enumerated EC, 1151 we now have the *real* EC info, so kill the makeshift one.*/ 1152 - acpi_evaluate_integer(ec->handle, "_UID", NULL, &uid); 1153 - if (ec_ecdt && ec_ecdt->uid == uid) { 1154 acpi_remove_address_space_handler(ACPI_ROOT_OBJECT, 1155 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler); 1156 - 1157 - acpi_remove_gpe_handler(NULL, ec_ecdt->gpe_bit, &acpi_ec_gpe_handler); 1158 1159 kfree(ec_ecdt); 1160 } 1161 1162 /* Get GPE bit assignment (EC events). */ 1163 /* TODO: Add support for _GPE returning a package */ 1164 - status = acpi_evaluate_integer(ec->handle, "_GPE", NULL, &ec->gpe_bit); 1165 if (ACPI_FAILURE(status)) { 1166 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 1167 "Error obtaining GPE bit assignment\n")); ··· 1172 1173 printk(KERN_INFO PREFIX "%s [%s] (gpe %d)\n", 1174 acpi_device_name(device), acpi_device_bid(device), 1175 - (u32) ec->gpe_bit); 1176 1177 if (!first_ec) 1178 first_ec = device; ··· 1261 struct acpi_device *device, 1262 int type) 1263 { 1264 - struct acpi_ec *ec; 1265 1266 ACPI_FUNCTION_TRACE("acpi_ec_remove"); 1267 ··· 1283 struct acpi_resource *resource, 1284 void *context) 1285 { 1286 - struct acpi_ec *ec = (struct acpi_ec *) context; 1287 struct acpi_generic_address *addr; 1288 1289 if (resource->id != ACPI_RSTYPE_IO) { ··· 1295 * the second address region returned is the status/command 1296 * port. 1297 */ 1298 - if (ec->data_addr.register_bit_width == 0) { 1299 - addr = &ec->data_addr; 1300 - } else if (ec->command_addr.register_bit_width == 0) { 1301 - addr = &ec->command_addr; 1302 } else { 1303 return AE_CTRL_TERMINATE; 1304 } ··· 1316 acpi_ec_start ( 1317 struct acpi_device *device) 1318 { 1319 - acpi_status status; 1320 - struct acpi_ec *ec; 1321 1322 ACPI_FUNCTION_TRACE("acpi_ec_start"); 1323 ··· 1332 /* 1333 * Get I/O port addresses. Convert to GAS format. 1334 */ 1335 - status = acpi_walk_resources(ec->handle, METHOD_NAME__CRS, 1336 acpi_ec_io_ports, ec); 1337 - if (ACPI_FAILURE(status) || ec->command_addr.register_bit_width == 0) { 1338 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error getting I/O port addresses")); 1339 return_VALUE(-ENODEV); 1340 } 1341 1342 - ec->status_addr = ec->command_addr; 1343 1344 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "gpe=0x%02x, ports=0x%2x,0x%2x\n", 1345 - (u32) ec->gpe_bit, (u32) ec->command_addr.address, 1346 - (u32) ec->data_addr.address)); 1347 1348 /* 1349 * Install GPE handler 1350 */ 1351 - status = acpi_install_gpe_handler(NULL, ec->gpe_bit, 1352 ACPI_GPE_EDGE_TRIGGERED, &acpi_ec_gpe_handler, ec); 1353 if (ACPI_FAILURE(status)) { 1354 return_VALUE(-ENODEV); 1355 } 1356 - acpi_set_gpe_type (NULL, ec->gpe_bit, ACPI_GPE_TYPE_RUNTIME); 1357 - acpi_enable_gpe (NULL, ec->gpe_bit, ACPI_NOT_ISR); 1358 1359 - status = acpi_install_address_space_handler (ec->handle, 1360 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler, 1361 &acpi_ec_space_setup, ec); 1362 if (ACPI_FAILURE(status)) { 1363 - acpi_remove_gpe_handler(NULL, ec->gpe_bit, &acpi_ec_gpe_handler); 1364 return_VALUE(-ENODEV); 1365 } 1366 ··· 1374 struct acpi_device *device, 1375 int type) 1376 { 1377 - acpi_status status; 1378 - struct acpi_ec *ec; 1379 1380 ACPI_FUNCTION_TRACE("acpi_ec_stop"); 1381 ··· 1384 1385 ec = acpi_driver_data(device); 1386 1387 - status = acpi_remove_address_space_handler(ec->handle, 1388 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler); 1389 if (ACPI_FAILURE(status)) 1390 return_VALUE(-ENODEV); 1391 1392 - status = acpi_remove_gpe_handler(NULL, ec->gpe_bit, &acpi_ec_gpe_handler); 1393 if (ACPI_FAILURE(status)) 1394 return_VALUE(-ENODEV); 1395 ··· 1403 void *context, 1404 void **retval) 1405 { 1406 acpi_status status; 1407 1408 status = acpi_walk_resources(handle, METHOD_NAME__CRS, 1409 acpi_ec_io_ports, ec_ecdt); 1410 if (ACPI_FAILURE(status)) 1411 return status; 1412 - ec_ecdt->status_addr = ec_ecdt->command_addr; 1413 1414 - ec_ecdt->uid = -1; 1415 - acpi_evaluate_integer(handle, "_UID", NULL, &ec_ecdt->uid); 1416 1417 - status = acpi_evaluate_integer(handle, "_GPE", NULL, &ec_ecdt->gpe_bit); 1418 if (ACPI_FAILURE(status)) 1419 return status; 1420 - ec_ecdt->global_lock = TRUE; 1421 - ec_ecdt->handle = handle; 1422 1423 printk(KERN_INFO PREFIX "GPE=0x%02x, ports=0x%2x, 0x%2x\n", 1424 - (u32) ec_ecdt->gpe_bit, (u32) ec_ecdt->command_addr.address, 1425 - (u32) ec_ecdt->data_addr.address); 1426 1427 return AE_CTRL_TERMINATE; 1428 } ··· 1495 1496 printk(KERN_INFO PREFIX "Try to make an fake ECDT\n"); 1497 1498 - ec_ecdt = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); 1499 if (!ec_ecdt) { 1500 ret = -ENOMEM; 1501 goto error; 1502 } 1503 - memset(ec_ecdt, 0, sizeof(struct acpi_ec)); 1504 1505 status = acpi_get_devices (ACPI_EC_HID, 1506 acpi_fake_ecdt_callback, ··· 1521 static int __init 1522 acpi_ec_get_real_ecdt(void) 1523 { 1524 acpi_status status; 1525 struct acpi_table_ecdt *ecdt_ptr; 1526 ··· 1588 /* 1589 * Generate a temporary ec context to use until the namespace is scanned 1590 */ 1591 - ec_ecdt = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); 1592 if (!ec_ecdt) 1593 return -ENOMEM; 1594 - memset(ec_ecdt, 0, sizeof(struct acpi_ec)); 1595 1596 - init_MUTEX(&ec_ecdt->sem); 1597 - init_waitqueue_head(&ec_ecdt->wait); 1598 - ec_ecdt->command_addr = ecdt_ptr->ec_control; 1599 - ec_ecdt->status_addr = ecdt_ptr->ec_control; 1600 - ec_ecdt->data_addr = ecdt_ptr->ec_data; 1601 - ec_ecdt->gpe_bit = ecdt_ptr->gpe_bit; 1602 /* use the GL just to be safe */ 1603 - ec_ecdt->global_lock = TRUE; 1604 - ec_ecdt->uid = ecdt_ptr->uid; 1605 1606 - status = acpi_get_handle(NULL, ecdt_ptr->ec_id, &ec_ecdt->handle); 1607 if (ACPI_FAILURE(status)) { 1608 goto error; 1609 } ··· 1636 /* 1637 * Install GPE handler 1638 */ 1639 - status = acpi_install_gpe_handler(NULL, ec_ecdt->gpe_bit, 1640 ACPI_GPE_EDGE_TRIGGERED, &acpi_ec_gpe_handler, 1641 ec_ecdt); 1642 if (ACPI_FAILURE(status)) { 1643 goto error; 1644 } 1645 - acpi_set_gpe_type (NULL, ec_ecdt->gpe_bit, ACPI_GPE_TYPE_RUNTIME); 1646 - acpi_enable_gpe (NULL, ec_ecdt->gpe_bit, ACPI_NOT_ISR); 1647 1648 status = acpi_install_address_space_handler (ACPI_ROOT_OBJECT, 1649 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler, 1650 &acpi_ec_space_setup, ec_ecdt); 1651 if (ACPI_FAILURE(status)) { 1652 - acpi_remove_gpe_handler(NULL, ec_ecdt->gpe_bit, 1653 &acpi_ec_gpe_handler); 1654 goto error; 1655 } ··· 1667 1668 static int __init acpi_ec_init (void) 1669 { 1670 - int result; 1671 1672 ACPI_FUNCTION_TRACE("acpi_ec_init"); 1673 ··· 1711 return 0; 1712 } 1713 __setup("acpi_fake_ecdt", acpi_fake_ecdt_setup);
··· 59 #define ACPI_EC_DELAY 50 /* Wait 50ms max. during EC ops */ 60 #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ 61 62 + #define ACPI_EC_UDELAY 100 /* Poll @ 100us increments */ 63 + #define ACPI_EC_UDELAY_COUNT 1000 /* Wait 10ms max. during EC ops */ 64 + 65 #define ACPI_EC_COMMAND_READ 0x80 66 #define ACPI_EC_COMMAND_WRITE 0x81 67 #define ACPI_EC_BURST_ENABLE 0x82 68 #define ACPI_EC_BURST_DISABLE 0x83 69 #define ACPI_EC_COMMAND_QUERY 0x84 70 71 + #define EC_POLLING 0xFF 72 + #define EC_BURST 0x00 73 + 74 + 75 static int acpi_ec_remove (struct acpi_device *device, int type); 76 static int acpi_ec_start (struct acpi_device *device); 77 static int acpi_ec_stop (struct acpi_device *device, int type); 78 + static int acpi_ec_burst_add ( struct acpi_device *device); 79 80 static struct acpi_driver acpi_ec_driver = { 81 .name = ACPI_EC_DRIVER_NAME, 82 .class = ACPI_EC_CLASS, 83 .ids = ACPI_EC_HID, 84 .ops = { 85 + .add = acpi_ec_burst_add, 86 .remove = acpi_ec_remove, 87 .start = acpi_ec_start, 88 .stop = acpi_ec_stop, 89 }, 90 }; 91 + union acpi_ec { 92 + struct { 93 + u32 mode; 94 + acpi_handle handle; 95 + unsigned long uid; 96 + unsigned long gpe_bit; 97 + struct acpi_generic_address status_addr; 98 + struct acpi_generic_address command_addr; 99 + struct acpi_generic_address data_addr; 100 + unsigned long global_lock; 101 + } common; 102 103 + struct { 104 + u32 mode; 105 + acpi_handle handle; 106 + unsigned long uid; 107 + unsigned long gpe_bit; 108 + struct acpi_generic_address status_addr; 109 + struct acpi_generic_address command_addr; 110 + struct acpi_generic_address data_addr; 111 + unsigned long global_lock; 112 + unsigned int expect_event; 113 + atomic_t leaving_burst; /* 0 : No, 1 : Yes, 2: abort*/ 114 + atomic_t pending_gpe; 115 + struct semaphore sem; 116 + wait_queue_head_t wait; 117 + }burst; 118 + 119 + struct { 120 + u32 mode; 121 + acpi_handle handle; 122 + unsigned long uid; 123 + unsigned long gpe_bit; 124 + struct acpi_generic_address status_addr; 125 + struct acpi_generic_address command_addr; 126 + struct acpi_generic_address data_addr; 127 + unsigned long global_lock; 128 + spinlock_t lock; 129 + }polling; 130 }; 131 132 + static int acpi_ec_polling_wait ( union acpi_ec *ec, u8 event); 133 + static int acpi_ec_burst_wait(union acpi_ec *ec, unsigned int event); 134 + static int acpi_ec_polling_read ( union acpi_ec *ec, u8 address, u32 *data); 135 + static int acpi_ec_burst_read( union acpi_ec *ec, u8 address, u32 *data); 136 + static int acpi_ec_polling_write ( union acpi_ec *ec, u8 address, u8 data); 137 + static int acpi_ec_burst_write ( union acpi_ec *ec, u8 address, u8 data); 138 + static int acpi_ec_polling_query ( union acpi_ec *ec, u32 *data); 139 + static int acpi_ec_burst_query ( union acpi_ec *ec, u32 *data); 140 + static void acpi_ec_gpe_polling_query ( void *ec_cxt); 141 + static void acpi_ec_gpe_burst_query ( void *ec_cxt); 142 + static u32 acpi_ec_gpe_polling_handler ( void *data); 143 + static u32 acpi_ec_gpe_burst_handler ( void *data); 144 + static acpi_status __init 145 + acpi_fake_ecdt_polling_callback ( 146 + acpi_handle handle, 147 + u32 Level, 148 + void *context, 149 + void **retval); 150 + 151 + static acpi_status __init 152 + acpi_fake_ecdt_burst_callback ( 153 + acpi_handle handle, 154 + u32 Level, 155 + void *context, 156 + void **retval); 157 + 158 + static int __init 159 + acpi_ec_polling_get_real_ecdt(void); 160 + static int __init 161 + acpi_ec_burst_get_real_ecdt(void); 162 /* If we find an EC via the ECDT, we need to keep a ptr to its context */ 163 + static union acpi_ec *ec_ecdt; 164 165 /* External interfaces use first EC only, so remember */ 166 static struct acpi_device *first_ec; 167 + static int acpi_ec_polling_mode; 168 169 /* -------------------------------------------------------------------------- 170 Transaction Management 171 -------------------------------------------------------------------------- */ 172 173 + static inline u32 acpi_ec_read_status(union acpi_ec *ec) 174 { 175 u32 status = 0; 176 177 + acpi_hw_low_level_read(8, &status, &ec->common.status_addr); 178 return status; 179 } 180 181 + static int 182 + acpi_ec_wait ( 183 + union acpi_ec *ec, 184 + u8 event) 185 + { 186 + if (acpi_ec_polling_mode) 187 + return acpi_ec_polling_wait (ec, event); 188 + else 189 + return acpi_ec_burst_wait (ec, event); 190 + } 191 + 192 + static int 193 + acpi_ec_polling_wait ( 194 + union acpi_ec *ec, 195 + u8 event) 196 + { 197 + u32 acpi_ec_status = 0; 198 + u32 i = ACPI_EC_UDELAY_COUNT; 199 + 200 + if (!ec) 201 + return -EINVAL; 202 + 203 + /* Poll the EC status register waiting for the event to occur. */ 204 + switch (event) { 205 + case ACPI_EC_EVENT_OBF: 206 + do { 207 + acpi_hw_low_level_read(8, &acpi_ec_status, &ec->common.status_addr); 208 + if (acpi_ec_status & ACPI_EC_FLAG_OBF) 209 + return 0; 210 + udelay(ACPI_EC_UDELAY); 211 + } while (--i>0); 212 + break; 213 + case ACPI_EC_EVENT_IBE: 214 + do { 215 + acpi_hw_low_level_read(8, &acpi_ec_status, &ec->common.status_addr); 216 + if (!(acpi_ec_status & ACPI_EC_FLAG_IBF)) 217 + return 0; 218 + udelay(ACPI_EC_UDELAY); 219 + } while (--i>0); 220 + break; 221 + default: 222 + return -EINVAL; 223 + } 224 + 225 + return -ETIME; 226 + } 227 + static int acpi_ec_burst_wait(union acpi_ec *ec, unsigned int event) 228 { 229 int result = 0; 230 231 ACPI_FUNCTION_TRACE("acpi_ec_wait"); 232 233 + ec->burst.expect_event = event; 234 smp_mb(); 235 236 + result = wait_event_interruptible_timeout(ec->burst.wait, 237 + !ec->burst.expect_event, 238 msecs_to_jiffies(ACPI_EC_DELAY)); 239 240 + ec->burst.expect_event = 0; 241 smp_mb(); 242 243 if (result < 0){ ··· 160 161 static int 162 acpi_ec_enter_burst_mode ( 163 + union acpi_ec *ec) 164 { 165 u32 tmp = 0; 166 int status = 0; ··· 170 status = acpi_ec_read_status(ec); 171 if (status != -EINVAL && 172 !(status & ACPI_EC_FLAG_BURST)){ 173 + acpi_hw_low_level_write(8, ACPI_EC_BURST_ENABLE, &ec->common.command_addr); 174 status = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 175 if (status){ 176 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 177 return_VALUE(-EINVAL); 178 } 179 + acpi_hw_low_level_read(8, &tmp, &ec->common.data_addr); 180 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 181 if(tmp != 0x90 ) {/* Burst ACK byte*/ 182 return_VALUE(-EINVAL); 183 } 184 } 185 186 + atomic_set(&ec->burst.leaving_burst , 0); 187 return_VALUE(0); 188 } 189 190 static int 191 acpi_ec_leave_burst_mode ( 192 + union acpi_ec *ec) 193 { 194 int status =0; 195 196 ACPI_FUNCTION_TRACE("acpi_ec_leave_burst_mode"); 197 198 + atomic_set(&ec->burst.leaving_burst , 1); 199 status = acpi_ec_read_status(ec); 200 if (status != -EINVAL && 201 (status & ACPI_EC_FLAG_BURST)){ 202 + acpi_hw_low_level_write(8, ACPI_EC_BURST_DISABLE, &ec->common.command_addr); 203 status = acpi_ec_wait(ec, ACPI_EC_FLAG_IBF); 204 if (status){ 205 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 206 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,"------->wait fail\n")); 207 return_VALUE(-EINVAL); 208 } 209 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 210 status = acpi_ec_read_status(ec); 211 } 212 ··· 215 216 static int 217 acpi_ec_read ( 218 + union acpi_ec *ec, 219 + u8 address, 220 + u32 *data) 221 + { 222 + if (acpi_ec_polling_mode) 223 + return acpi_ec_polling_read(ec, address, data); 224 + else 225 + return acpi_ec_burst_read(ec, address, data); 226 + } 227 + static int 228 + acpi_ec_write ( 229 + union acpi_ec *ec, 230 + u8 address, 231 + u8 data) 232 + { 233 + if (acpi_ec_polling_mode) 234 + return acpi_ec_polling_write(ec, address, data); 235 + else 236 + return acpi_ec_burst_write(ec, address, data); 237 + } 238 + static int 239 + acpi_ec_polling_read ( 240 + union acpi_ec *ec, 241 + u8 address, 242 + u32 *data) 243 + { 244 + acpi_status status = AE_OK; 245 + int result = 0; 246 + unsigned long flags = 0; 247 + u32 glk = 0; 248 + 249 + ACPI_FUNCTION_TRACE("acpi_ec_read"); 250 + 251 + if (!ec || !data) 252 + return_VALUE(-EINVAL); 253 + 254 + *data = 0; 255 + 256 + if (ec->common.global_lock) { 257 + status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 258 + if (ACPI_FAILURE(status)) 259 + return_VALUE(-ENODEV); 260 + } 261 + 262 + spin_lock_irqsave(&ec->polling.lock, flags); 263 + 264 + acpi_hw_low_level_write(8, ACPI_EC_COMMAND_READ, &ec->common.command_addr); 265 + result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 266 + if (result) 267 + goto end; 268 + 269 + acpi_hw_low_level_write(8, address, &ec->common.data_addr); 270 + result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 271 + if (result) 272 + goto end; 273 + 274 + acpi_hw_low_level_read(8, data, &ec->common.data_addr); 275 + 276 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Read [%02x] from address [%02x]\n", 277 + *data, address)); 278 + 279 + end: 280 + spin_unlock_irqrestore(&ec->polling.lock, flags); 281 + 282 + if (ec->common.global_lock) 283 + acpi_release_global_lock(glk); 284 + 285 + return_VALUE(result); 286 + } 287 + 288 + 289 + static int 290 + acpi_ec_polling_write ( 291 + union acpi_ec *ec, 292 + u8 address, 293 + u8 data) 294 + { 295 + int result = 0; 296 + acpi_status status = AE_OK; 297 + unsigned long flags = 0; 298 + u32 glk = 0; 299 + 300 + ACPI_FUNCTION_TRACE("acpi_ec_write"); 301 + 302 + if (!ec) 303 + return_VALUE(-EINVAL); 304 + 305 + if (ec->common.global_lock) { 306 + status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 307 + if (ACPI_FAILURE(status)) 308 + return_VALUE(-ENODEV); 309 + } 310 + 311 + spin_lock_irqsave(&ec->polling.lock, flags); 312 + 313 + acpi_hw_low_level_write(8, ACPI_EC_COMMAND_WRITE, &ec->common.command_addr); 314 + result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 315 + if (result) 316 + goto end; 317 + 318 + acpi_hw_low_level_write(8, address, &ec->common.data_addr); 319 + result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 320 + if (result) 321 + goto end; 322 + 323 + acpi_hw_low_level_write(8, data, &ec->common.data_addr); 324 + result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 325 + if (result) 326 + goto end; 327 + 328 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Wrote [%02x] to address [%02x]\n", 329 + data, address)); 330 + 331 + end: 332 + spin_unlock_irqrestore(&ec->polling.lock, flags); 333 + 334 + if (ec->common.global_lock) 335 + acpi_release_global_lock(glk); 336 + 337 + return_VALUE(result); 338 + } 339 + 340 + static int 341 + acpi_ec_burst_read ( 342 + union acpi_ec *ec, 343 u8 address, 344 u32 *data) 345 { ··· 230 retry: 231 *data = 0; 232 233 + if (ec->common.global_lock) { 234 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 235 if (ACPI_FAILURE(status)) 236 return_VALUE(-ENODEV); 237 } 238 239 WARN_ON(in_interrupt()); 240 + down(&ec->burst.sem); 241 242 if(acpi_ec_enter_burst_mode(ec)) 243 goto end; 244 245 + acpi_hw_low_level_write(8, ACPI_EC_COMMAND_READ, &ec->common.command_addr); 246 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 247 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 248 if (status) { 249 goto end; 250 } 251 252 + acpi_hw_low_level_write(8, address, &ec->common.data_addr); 253 status= acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 254 if (status){ 255 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 256 goto end; 257 } 258 259 + acpi_hw_low_level_read(8, data, &ec->common.data_addr); 260 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 261 262 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Read [%02x] from address [%02x]\n", 263 *data, address)); 264 265 end: 266 acpi_ec_leave_burst_mode(ec); 267 + up(&ec->burst.sem); 268 269 + if (ec->common.global_lock) 270 acpi_release_global_lock(glk); 271 272 + if(atomic_read(&ec->burst.leaving_burst) == 2){ 273 ACPI_DEBUG_PRINT((ACPI_DB_INFO,"aborted, retry ...\n")); 274 + while(atomic_read(&ec->burst.pending_gpe)){ 275 msleep(1); 276 } 277 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 278 goto retry; 279 } 280 ··· 283 284 285 static int 286 + acpi_ec_burst_write ( 287 + union acpi_ec *ec, 288 u8 address, 289 u8 data) 290 { ··· 297 if (!ec) 298 return_VALUE(-EINVAL); 299 retry: 300 + if (ec->common.global_lock) { 301 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 302 if (ACPI_FAILURE(status)) 303 return_VALUE(-ENODEV); 304 } 305 306 WARN_ON(in_interrupt()); 307 + down(&ec->burst.sem); 308 309 if(acpi_ec_enter_burst_mode(ec)) 310 goto end; ··· 312 status = acpi_ec_read_status(ec); 313 if (status != -EINVAL && 314 !(status & ACPI_EC_FLAG_BURST)){ 315 + acpi_hw_low_level_write(8, ACPI_EC_BURST_ENABLE, &ec->common.command_addr); 316 status = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 317 if (status) 318 goto end; 319 + acpi_hw_low_level_read(8, &tmp, &ec->common.data_addr); 320 if(tmp != 0x90 ) /* Burst ACK byte*/ 321 goto end; 322 } 323 /*Now we are in burst mode*/ 324 325 + acpi_hw_low_level_write(8, ACPI_EC_COMMAND_WRITE, &ec->common.command_addr); 326 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 327 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 328 if (status){ 329 goto end; 330 } 331 332 + acpi_hw_low_level_write(8, address, &ec->common.data_addr); 333 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 334 if (status){ 335 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 336 goto end; 337 } 338 339 + acpi_hw_low_level_write(8, data, &ec->common.data_addr); 340 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 341 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 342 if (status) 343 goto end; 344 ··· 347 348 end: 349 acpi_ec_leave_burst_mode(ec); 350 + up(&ec->burst.sem); 351 352 + if (ec->common.global_lock) 353 acpi_release_global_lock(glk); 354 355 + if(atomic_read(&ec->burst.leaving_burst) == 2){ 356 ACPI_DEBUG_PRINT((ACPI_DB_INFO,"aborted, retry ...\n")); 357 + while(atomic_read(&ec->burst.pending_gpe)){ 358 msleep(1); 359 } 360 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 361 goto retry; 362 } 363 ··· 370 int 371 ec_read(u8 addr, u8 *val) 372 { 373 + union acpi_ec *ec; 374 int err; 375 u32 temp_data; 376 ··· 393 int 394 ec_write(u8 addr, u8 val) 395 { 396 + union acpi_ec *ec; 397 int err; 398 399 if (!first_ec) ··· 407 } 408 EXPORT_SYMBOL(ec_write); 409 410 static int 411 acpi_ec_query ( 412 + union acpi_ec *ec, 413 + u32 *data) 414 + { 415 + if (acpi_ec_polling_mode) 416 + return acpi_ec_polling_query(ec, data); 417 + else 418 + return acpi_ec_burst_query(ec, data); 419 + } 420 + static int 421 + acpi_ec_polling_query ( 422 + union acpi_ec *ec, 423 + u32 *data) 424 + { 425 + int result = 0; 426 + acpi_status status = AE_OK; 427 + unsigned long flags = 0; 428 + u32 glk = 0; 429 + 430 + ACPI_FUNCTION_TRACE("acpi_ec_query"); 431 + 432 + if (!ec || !data) 433 + return_VALUE(-EINVAL); 434 + 435 + *data = 0; 436 + 437 + if (ec->common.global_lock) { 438 + status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 439 + if (ACPI_FAILURE(status)) 440 + return_VALUE(-ENODEV); 441 + } 442 + 443 + /* 444 + * Query the EC to find out which _Qxx method we need to evaluate. 445 + * Note that successful completion of the query causes the ACPI_EC_SCI 446 + * bit to be cleared (and thus clearing the interrupt source). 447 + */ 448 + spin_lock_irqsave(&ec->polling.lock, flags); 449 + 450 + acpi_hw_low_level_write(8, ACPI_EC_COMMAND_QUERY, &ec->common.command_addr); 451 + result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 452 + if (result) 453 + goto end; 454 + 455 + acpi_hw_low_level_read(8, data, &ec->common.data_addr); 456 + if (!*data) 457 + result = -ENODATA; 458 + 459 + end: 460 + spin_unlock_irqrestore(&ec->polling.lock, flags); 461 + 462 + if (ec->common.global_lock) 463 + acpi_release_global_lock(glk); 464 + 465 + return_VALUE(result); 466 + } 467 + static int 468 + acpi_ec_burst_query ( 469 + union acpi_ec *ec, 470 u32 *data) 471 { 472 int status = 0; ··· 422 return_VALUE(-EINVAL); 423 *data = 0; 424 425 + if (ec->common.global_lock) { 426 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 427 if (ACPI_FAILURE(status)) 428 return_VALUE(-ENODEV); 429 } 430 431 + down(&ec->burst.sem); 432 if(acpi_ec_enter_burst_mode(ec)) 433 goto end; 434 /* ··· 436 * Note that successful completion of the query causes the ACPI_EC_SCI 437 * bit to be cleared (and thus clearing the interrupt source). 438 */ 439 + acpi_hw_low_level_write(8, ACPI_EC_COMMAND_QUERY, &ec->common.command_addr); 440 status = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 441 if (status){ 442 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 443 goto end; 444 } 445 446 + acpi_hw_low_level_read(8, data, &ec->common.data_addr); 447 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 448 if (!*data) 449 status = -ENODATA; 450 451 end: 452 acpi_ec_leave_burst_mode(ec); 453 + up(&ec->burst.sem); 454 455 + if (ec->common.global_lock) 456 acpi_release_global_lock(glk); 457 458 + if(atomic_read(&ec->burst.leaving_burst) == 2){ 459 ACPI_DEBUG_PRINT((ACPI_DB_INFO,"aborted, retry ...\n")); 460 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 461 status = -ENODATA; 462 } 463 return_VALUE(status); ··· 468 Event Management 469 -------------------------------------------------------------------------- */ 470 471 + union acpi_ec_query_data { 472 acpi_handle handle; 473 u8 data; 474 }; ··· 477 acpi_ec_gpe_query ( 478 void *ec_cxt) 479 { 480 + if (acpi_ec_polling_mode) 481 + acpi_ec_gpe_polling_query(ec_cxt); 482 + else 483 + acpi_ec_gpe_burst_query(ec_cxt); 484 + } 485 + 486 + static void 487 + acpi_ec_gpe_polling_query ( 488 + void *ec_cxt) 489 + { 490 + union acpi_ec *ec = (union acpi_ec *) ec_cxt; 491 + u32 value = 0; 492 + unsigned long flags = 0; 493 + static char object_name[5] = {'_','Q','0','0','\0'}; 494 + const char hex[] = {'0','1','2','3','4','5','6','7', 495 + '8','9','A','B','C','D','E','F'}; 496 + 497 + ACPI_FUNCTION_TRACE("acpi_ec_gpe_query"); 498 + 499 + if (!ec_cxt) 500 + goto end; 501 + 502 + spin_lock_irqsave(&ec->polling.lock, flags); 503 + acpi_hw_low_level_read(8, &value, &ec->common.command_addr); 504 + spin_unlock_irqrestore(&ec->polling.lock, flags); 505 + 506 + /* TBD: Implement asynch events! 507 + * NOTE: All we care about are EC-SCI's. Other EC events are 508 + * handled via polling (yuck!). This is because some systems 509 + * treat EC-SCIs as level (versus EDGE!) triggered, preventing 510 + * a purely interrupt-driven approach (grumble, grumble). 511 + */ 512 + if (!(value & ACPI_EC_FLAG_SCI)) 513 + goto end; 514 + 515 + if (acpi_ec_query(ec, &value)) 516 + goto end; 517 + 518 + object_name[2] = hex[((value >> 4) & 0x0F)]; 519 + object_name[3] = hex[(value & 0x0F)]; 520 + 521 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluating %s\n", object_name)); 522 + 523 + acpi_evaluate_object(ec->common.handle, object_name, NULL, NULL); 524 + 525 + end: 526 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 527 + } 528 + static void 529 + acpi_ec_gpe_burst_query ( 530 + void *ec_cxt) 531 + { 532 + union acpi_ec *ec = (union acpi_ec *) ec_cxt; 533 u32 value; 534 int result = -ENODATA; 535 static char object_name[5] = {'_','Q','0','0','\0'}; ··· 497 498 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluating %s\n", object_name)); 499 500 + acpi_evaluate_object(ec->common.handle, object_name, NULL, NULL); 501 end: 502 + atomic_dec(&ec->burst.pending_gpe); 503 return; 504 } 505 ··· 507 acpi_ec_gpe_handler ( 508 void *data) 509 { 510 + if (acpi_ec_polling_mode) 511 + return acpi_ec_gpe_polling_handler(data); 512 + else 513 + return acpi_ec_gpe_burst_handler(data); 514 + } 515 + static u32 516 + acpi_ec_gpe_polling_handler ( 517 + void *data) 518 + { 519 acpi_status status = AE_OK; 520 + union acpi_ec *ec = (union acpi_ec *) data; 521 522 if (!ec) 523 return ACPI_INTERRUPT_NOT_HANDLED; 524 525 + acpi_disable_gpe(NULL, ec->common.gpe_bit, ACPI_ISR); 526 + 527 + status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE, 528 + acpi_ec_gpe_query, ec); 529 + 530 + if (status == AE_OK) 531 + return ACPI_INTERRUPT_HANDLED; 532 + else 533 + return ACPI_INTERRUPT_NOT_HANDLED; 534 + } 535 + static u32 536 + acpi_ec_gpe_burst_handler ( 537 + void *data) 538 + { 539 + acpi_status status = AE_OK; 540 + u32 value; 541 + union acpi_ec *ec = (union acpi_ec *) data; 542 + 543 + if (!ec) 544 + return ACPI_INTERRUPT_NOT_HANDLED; 545 + 546 + acpi_disable_gpe(NULL, ec->common.gpe_bit, ACPI_ISR); 547 548 value = acpi_ec_read_status(ec); 549 550 if((value & ACPI_EC_FLAG_IBF) && 551 !(value & ACPI_EC_FLAG_BURST) && 552 + (atomic_read(&ec->burst.leaving_burst) == 0)) { 553 /* 554 * the embedded controller disables 555 * burst mode for any reason other 556 * than the burst disable command 557 * to process critical event. 558 */ 559 + atomic_set(&ec->burst.leaving_burst , 2); /* block current pending transaction 560 and retry */ 561 + wake_up(&ec->burst.wait); 562 }else { 563 + if ((ec->burst.expect_event == ACPI_EC_EVENT_OBF && 564 (value & ACPI_EC_FLAG_OBF)) || 565 + (ec->burst.expect_event == ACPI_EC_EVENT_IBE && 566 !(value & ACPI_EC_FLAG_IBF))) { 567 + ec->burst.expect_event = 0; 568 + wake_up(&ec->burst.wait); 569 return ACPI_INTERRUPT_HANDLED; 570 } 571 } 572 573 if (value & ACPI_EC_FLAG_SCI){ 574 + atomic_add(1, &ec->burst.pending_gpe) ; 575 status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE, 576 acpi_ec_gpe_query, ec); 577 return status == AE_OK ? 578 ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED; 579 } 580 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_ISR); 581 return status == AE_OK ? 582 ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED; 583 } ··· 585 void *region_context) 586 { 587 int result = 0; 588 + union acpi_ec *ec = NULL; 589 u64 temp = *value; 590 acpi_integer f_v = 0; 591 int i = 0; ··· 600 return_VALUE(AE_BAD_PARAMETER); 601 } 602 603 + ec = (union acpi_ec *) handler_context; 604 605 next_byte: 606 switch (function) { ··· 661 static int 662 acpi_ec_read_info (struct seq_file *seq, void *offset) 663 { 664 + union acpi_ec *ec = (union acpi_ec *) seq->private; 665 666 ACPI_FUNCTION_TRACE("acpi_ec_read_info"); 667 ··· 669 goto end; 670 671 seq_printf(seq, "gpe bit: 0x%02x\n", 672 + (u32) ec->common.gpe_bit); 673 seq_printf(seq, "ports: 0x%02x, 0x%02x\n", 674 + (u32) ec->common.status_addr.address, (u32) ec->common.data_addr.address); 675 seq_printf(seq, "use global lock: %s\n", 676 + ec->common.global_lock?"yes":"no"); 677 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 678 679 end: 680 return_VALUE(0); ··· 697 acpi_ec_add_fs ( 698 struct acpi_device *device) 699 { 700 + struct proc_dir_entry *entry = NULL; 701 702 ACPI_FUNCTION_TRACE("acpi_ec_add_fs"); 703 ··· 744 Driver Interface 745 -------------------------------------------------------------------------- */ 746 747 + 748 static int 749 + acpi_ec_polling_add ( 750 struct acpi_device *device) 751 { 752 + int result = 0; 753 + acpi_status status = AE_OK; 754 + union acpi_ec *ec = NULL; 755 unsigned long uid; 756 757 ACPI_FUNCTION_TRACE("acpi_ec_add"); ··· 758 if (!device) 759 return_VALUE(-EINVAL); 760 761 + ec = kmalloc(sizeof(union acpi_ec), GFP_KERNEL); 762 if (!ec) 763 return_VALUE(-ENOMEM); 764 + memset(ec, 0, sizeof(union acpi_ec)); 765 766 + ec->common.handle = device->handle; 767 + ec->common.uid = -1; 768 + spin_lock_init(&ec->polling.lock); 769 strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME); 770 strcpy(acpi_device_class(device), ACPI_EC_CLASS); 771 acpi_driver_data(device) = ec; 772 773 /* Use the global lock for all EC transactions? */ 774 + acpi_evaluate_integer(ec->common.handle, "_GLK", NULL, &ec->common.global_lock); 775 776 /* If our UID matches the UID for the ECDT-enumerated EC, 777 we now have the *real* EC info, so kill the makeshift one.*/ 778 + acpi_evaluate_integer(ec->common.handle, "_UID", NULL, &uid); 779 + if (ec_ecdt && ec_ecdt->common.uid == uid) { 780 acpi_remove_address_space_handler(ACPI_ROOT_OBJECT, 781 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler); 782 + 783 + acpi_remove_gpe_handler(NULL, ec_ecdt->common.gpe_bit, &acpi_ec_gpe_handler); 784 785 kfree(ec_ecdt); 786 } 787 788 /* Get GPE bit assignment (EC events). */ 789 /* TODO: Add support for _GPE returning a package */ 790 + status = acpi_evaluate_integer(ec->common.handle, "_GPE", NULL, &ec->common.gpe_bit); 791 if (ACPI_FAILURE(status)) { 792 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 793 "Error obtaining GPE bit assignment\n")); ··· 804 805 printk(KERN_INFO PREFIX "%s [%s] (gpe %d)\n", 806 acpi_device_name(device), acpi_device_bid(device), 807 + (u32) ec->common.gpe_bit); 808 + 809 + if (!first_ec) 810 + first_ec = device; 811 + 812 + end: 813 + if (result) 814 + kfree(ec); 815 + 816 + return_VALUE(result); 817 + } 818 + static int 819 + acpi_ec_burst_add ( 820 + struct acpi_device *device) 821 + { 822 + int result = 0; 823 + acpi_status status = AE_OK; 824 + union acpi_ec *ec = NULL; 825 + unsigned long uid; 826 + 827 + ACPI_FUNCTION_TRACE("acpi_ec_add"); 828 + 829 + if (!device) 830 + return_VALUE(-EINVAL); 831 + 832 + ec = kmalloc(sizeof(union acpi_ec), GFP_KERNEL); 833 + if (!ec) 834 + return_VALUE(-ENOMEM); 835 + memset(ec, 0, sizeof(union acpi_ec)); 836 + 837 + ec->common.handle = device->handle; 838 + ec->common.uid = -1; 839 + atomic_set(&ec->burst.pending_gpe, 0); 840 + atomic_set(&ec->burst.leaving_burst , 1); 841 + init_MUTEX(&ec->burst.sem); 842 + init_waitqueue_head(&ec->burst.wait); 843 + strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME); 844 + strcpy(acpi_device_class(device), ACPI_EC_CLASS); 845 + acpi_driver_data(device) = ec; 846 + 847 + /* Use the global lock for all EC transactions? */ 848 + acpi_evaluate_integer(ec->common.handle, "_GLK", NULL, &ec->common.global_lock); 849 + 850 + /* If our UID matches the UID for the ECDT-enumerated EC, 851 + we now have the *real* EC info, so kill the makeshift one.*/ 852 + acpi_evaluate_integer(ec->common.handle, "_UID", NULL, &uid); 853 + if (ec_ecdt && ec_ecdt->common.uid == uid) { 854 + acpi_remove_address_space_handler(ACPI_ROOT_OBJECT, 855 + ACPI_ADR_SPACE_EC, &acpi_ec_space_handler); 856 + 857 + acpi_remove_gpe_handler(NULL, ec_ecdt->common.gpe_bit, &acpi_ec_gpe_handler); 858 + 859 + kfree(ec_ecdt); 860 + } 861 + 862 + /* Get GPE bit assignment (EC events). */ 863 + /* TODO: Add support for _GPE returning a package */ 864 + status = acpi_evaluate_integer(ec->common.handle, "_GPE", NULL, &ec->common.gpe_bit); 865 + if (ACPI_FAILURE(status)) { 866 + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 867 + "Error obtaining GPE bit assignment\n")); 868 + result = -ENODEV; 869 + goto end; 870 + } 871 + 872 + result = acpi_ec_add_fs(device); 873 + if (result) 874 + goto end; 875 + 876 + printk(KERN_INFO PREFIX "%s [%s] (gpe %d)\n", 877 + acpi_device_name(device), acpi_device_bid(device), 878 + (u32) ec->common.gpe_bit); 879 880 if (!first_ec) 881 first_ec = device; ··· 822 struct acpi_device *device, 823 int type) 824 { 825 + union acpi_ec *ec = NULL; 826 827 ACPI_FUNCTION_TRACE("acpi_ec_remove"); 828 ··· 844 struct acpi_resource *resource, 845 void *context) 846 { 847 + union acpi_ec *ec = (union acpi_ec *) context; 848 struct acpi_generic_address *addr; 849 850 if (resource->id != ACPI_RSTYPE_IO) { ··· 856 * the second address region returned is the status/command 857 * port. 858 */ 859 + if (ec->common.data_addr.register_bit_width == 0) { 860 + addr = &ec->common.data_addr; 861 + } else if (ec->common.command_addr.register_bit_width == 0) { 862 + addr = &ec->common.command_addr; 863 } else { 864 return AE_CTRL_TERMINATE; 865 } ··· 877 acpi_ec_start ( 878 struct acpi_device *device) 879 { 880 + acpi_status status = AE_OK; 881 + union acpi_ec *ec = NULL; 882 883 ACPI_FUNCTION_TRACE("acpi_ec_start"); 884 ··· 893 /* 894 * Get I/O port addresses. Convert to GAS format. 895 */ 896 + status = acpi_walk_resources(ec->common.handle, METHOD_NAME__CRS, 897 acpi_ec_io_ports, ec); 898 + if (ACPI_FAILURE(status) || ec->common.command_addr.register_bit_width == 0) { 899 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error getting I/O port addresses")); 900 return_VALUE(-ENODEV); 901 } 902 903 + ec->common.status_addr = ec->common.command_addr; 904 905 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "gpe=0x%02x, ports=0x%2x,0x%2x\n", 906 + (u32) ec->common.gpe_bit, (u32) ec->common.command_addr.address, 907 + (u32) ec->common.data_addr.address)); 908 + 909 910 /* 911 * Install GPE handler 912 */ 913 + status = acpi_install_gpe_handler(NULL, ec->common.gpe_bit, 914 ACPI_GPE_EDGE_TRIGGERED, &acpi_ec_gpe_handler, ec); 915 if (ACPI_FAILURE(status)) { 916 return_VALUE(-ENODEV); 917 } 918 + acpi_set_gpe_type (NULL, ec->common.gpe_bit, ACPI_GPE_TYPE_RUNTIME); 919 + acpi_enable_gpe (NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 920 921 + status = acpi_install_address_space_handler (ec->common.handle, 922 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler, 923 &acpi_ec_space_setup, ec); 924 if (ACPI_FAILURE(status)) { 925 + acpi_remove_gpe_handler(NULL, ec->common.gpe_bit, &acpi_ec_gpe_handler); 926 return_VALUE(-ENODEV); 927 } 928 ··· 934 struct acpi_device *device, 935 int type) 936 { 937 + acpi_status status = AE_OK; 938 + union acpi_ec *ec = NULL; 939 940 ACPI_FUNCTION_TRACE("acpi_ec_stop"); 941 ··· 944 945 ec = acpi_driver_data(device); 946 947 + status = acpi_remove_address_space_handler(ec->common.handle, 948 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler); 949 if (ACPI_FAILURE(status)) 950 return_VALUE(-ENODEV); 951 952 + status = acpi_remove_gpe_handler(NULL, ec->common.gpe_bit, &acpi_ec_gpe_handler); 953 if (ACPI_FAILURE(status)) 954 return_VALUE(-ENODEV); 955 ··· 963 void *context, 964 void **retval) 965 { 966 + 967 + if (acpi_ec_polling_mode) 968 + return acpi_fake_ecdt_polling_callback(handle, 969 + Level, context, retval); 970 + else 971 + return acpi_fake_ecdt_burst_callback(handle, 972 + Level, context, retval); 973 + } 974 + 975 + static acpi_status __init 976 + acpi_fake_ecdt_polling_callback ( 977 + acpi_handle handle, 978 + u32 Level, 979 + void *context, 980 + void **retval) 981 + { 982 acpi_status status; 983 984 status = acpi_walk_resources(handle, METHOD_NAME__CRS, 985 acpi_ec_io_ports, ec_ecdt); 986 if (ACPI_FAILURE(status)) 987 return status; 988 + ec_ecdt->common.status_addr = ec_ecdt->common.command_addr; 989 990 + ec_ecdt->common.uid = -1; 991 + acpi_evaluate_integer(handle, "_UID", NULL, &ec_ecdt->common.uid); 992 993 + status = acpi_evaluate_integer(handle, "_GPE", NULL, &ec_ecdt->common.gpe_bit); 994 if (ACPI_FAILURE(status)) 995 return status; 996 + spin_lock_init(&ec_ecdt->polling.lock); 997 + ec_ecdt->common.global_lock = TRUE; 998 + ec_ecdt->common.handle = handle; 999 1000 printk(KERN_INFO PREFIX "GPE=0x%02x, ports=0x%2x, 0x%2x\n", 1001 + (u32) ec_ecdt->common.gpe_bit, (u32) ec_ecdt->common.command_addr.address, 1002 + (u32) ec_ecdt->common.data_addr.address); 1003 + 1004 + return AE_CTRL_TERMINATE; 1005 + } 1006 + 1007 + static acpi_status __init 1008 + acpi_fake_ecdt_burst_callback ( 1009 + acpi_handle handle, 1010 + u32 Level, 1011 + void *context, 1012 + void **retval) 1013 + { 1014 + acpi_status status; 1015 + 1016 + init_MUTEX(&ec_ecdt->burst.sem); 1017 + init_waitqueue_head(&ec_ecdt->burst.wait); 1018 + status = acpi_walk_resources(handle, METHOD_NAME__CRS, 1019 + acpi_ec_io_ports, ec_ecdt); 1020 + if (ACPI_FAILURE(status)) 1021 + return status; 1022 + ec_ecdt->common.status_addr = ec_ecdt->common.command_addr; 1023 + 1024 + ec_ecdt->common.uid = -1; 1025 + acpi_evaluate_integer(handle, "_UID", NULL, &ec_ecdt->common.uid); 1026 + 1027 + status = acpi_evaluate_integer(handle, "_GPE", NULL, &ec_ecdt->common.gpe_bit); 1028 + if (ACPI_FAILURE(status)) 1029 + return status; 1030 + ec_ecdt->common.global_lock = TRUE; 1031 + ec_ecdt->common.handle = handle; 1032 + 1033 + printk(KERN_INFO PREFIX "GPE=0x%02x, ports=0x%2x, 0x%2x\n", 1034 + (u32) ec_ecdt->common.gpe_bit, (u32) ec_ecdt->common.command_addr.address, 1035 + (u32) ec_ecdt->common.data_addr.address); 1036 1037 return AE_CTRL_TERMINATE; 1038 } ··· 1005 1006 printk(KERN_INFO PREFIX "Try to make an fake ECDT\n"); 1007 1008 + ec_ecdt = kmalloc(sizeof(union acpi_ec), GFP_KERNEL); 1009 if (!ec_ecdt) { 1010 ret = -ENOMEM; 1011 goto error; 1012 } 1013 + memset(ec_ecdt, 0, sizeof(union acpi_ec)); 1014 1015 status = acpi_get_devices (ACPI_EC_HID, 1016 acpi_fake_ecdt_callback, ··· 1031 static int __init 1032 acpi_ec_get_real_ecdt(void) 1033 { 1034 + if (acpi_ec_polling_mode) 1035 + return acpi_ec_polling_get_real_ecdt(); 1036 + else 1037 + return acpi_ec_burst_get_real_ecdt(); 1038 + } 1039 + 1040 + static int __init 1041 + acpi_ec_polling_get_real_ecdt(void) 1042 + { 1043 + acpi_status status; 1044 + struct acpi_table_ecdt *ecdt_ptr; 1045 + 1046 + status = acpi_get_firmware_table("ECDT", 1, ACPI_LOGICAL_ADDRESSING, 1047 + (struct acpi_table_header **) &ecdt_ptr); 1048 + if (ACPI_FAILURE(status)) 1049 + return -ENODEV; 1050 + 1051 + printk(KERN_INFO PREFIX "Found ECDT\n"); 1052 + 1053 + /* 1054 + * Generate a temporary ec context to use until the namespace is scanned 1055 + */ 1056 + ec_ecdt = kmalloc(sizeof(union acpi_ec), GFP_KERNEL); 1057 + if (!ec_ecdt) 1058 + return -ENOMEM; 1059 + memset(ec_ecdt, 0, sizeof(union acpi_ec)); 1060 + 1061 + ec_ecdt->common.command_addr = ecdt_ptr->ec_control; 1062 + ec_ecdt->common.status_addr = ecdt_ptr->ec_control; 1063 + ec_ecdt->common.data_addr = ecdt_ptr->ec_data; 1064 + ec_ecdt->common.gpe_bit = ecdt_ptr->gpe_bit; 1065 + spin_lock_init(&ec_ecdt->polling.lock); 1066 + /* use the GL just to be safe */ 1067 + ec_ecdt->common.global_lock = TRUE; 1068 + ec_ecdt->common.uid = ecdt_ptr->uid; 1069 + 1070 + status = acpi_get_handle(NULL, ecdt_ptr->ec_id, &ec_ecdt->common.handle); 1071 + if (ACPI_FAILURE(status)) { 1072 + goto error; 1073 + } 1074 + 1075 + return 0; 1076 + error: 1077 + printk(KERN_ERR PREFIX "Could not use ECDT\n"); 1078 + kfree(ec_ecdt); 1079 + ec_ecdt = NULL; 1080 + 1081 + return -ENODEV; 1082 + } 1083 + 1084 + 1085 + static int __init 1086 + acpi_ec_burst_get_real_ecdt(void) 1087 + { 1088 acpi_status status; 1089 struct acpi_table_ecdt *ecdt_ptr; 1090 ··· 1044 /* 1045 * Generate a temporary ec context to use until the namespace is scanned 1046 */ 1047 + ec_ecdt = kmalloc(sizeof(union acpi_ec), GFP_KERNEL); 1048 if (!ec_ecdt) 1049 return -ENOMEM; 1050 + memset(ec_ecdt, 0, sizeof(union acpi_ec)); 1051 1052 + init_MUTEX(&ec_ecdt->burst.sem); 1053 + init_waitqueue_head(&ec_ecdt->burst.wait); 1054 + ec_ecdt->common.command_addr = ecdt_ptr->ec_control; 1055 + ec_ecdt->common.status_addr = ecdt_ptr->ec_control; 1056 + ec_ecdt->common.data_addr = ecdt_ptr->ec_data; 1057 + ec_ecdt->common.gpe_bit = ecdt_ptr->gpe_bit; 1058 /* use the GL just to be safe */ 1059 + ec_ecdt->common.global_lock = TRUE; 1060 + ec_ecdt->common.uid = ecdt_ptr->uid; 1061 1062 + status = acpi_get_handle(NULL, ecdt_ptr->ec_id, &ec_ecdt->common.handle); 1063 if (ACPI_FAILURE(status)) { 1064 goto error; 1065 } ··· 1092 /* 1093 * Install GPE handler 1094 */ 1095 + status = acpi_install_gpe_handler(NULL, ec_ecdt->common.gpe_bit, 1096 ACPI_GPE_EDGE_TRIGGERED, &acpi_ec_gpe_handler, 1097 ec_ecdt); 1098 if (ACPI_FAILURE(status)) { 1099 goto error; 1100 } 1101 + acpi_set_gpe_type (NULL, ec_ecdt->common.gpe_bit, ACPI_GPE_TYPE_RUNTIME); 1102 + acpi_enable_gpe (NULL, ec_ecdt->common.gpe_bit, ACPI_NOT_ISR); 1103 1104 status = acpi_install_address_space_handler (ACPI_ROOT_OBJECT, 1105 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler, 1106 &acpi_ec_space_setup, ec_ecdt); 1107 if (ACPI_FAILURE(status)) { 1108 + acpi_remove_gpe_handler(NULL, ec_ecdt->common.gpe_bit, 1109 &acpi_ec_gpe_handler); 1110 goto error; 1111 } ··· 1123 1124 static int __init acpi_ec_init (void) 1125 { 1126 + int result = 0; 1127 1128 ACPI_FUNCTION_TRACE("acpi_ec_init"); 1129 ··· 1167 return 0; 1168 } 1169 __setup("acpi_fake_ecdt", acpi_fake_ecdt_setup); 1170 + static int __init acpi_ec_set_polling_mode(char *str) 1171 + { 1172 + acpi_ec_polling_mode = EC_POLLING; 1173 + acpi_ec_driver.ops.add = acpi_ec_polling_add; 1174 + return 0; 1175 + } 1176 + __setup("ec_polling", acpi_ec_set_polling_mode);