[ACPI] Add "ec_polling" boot option

EC burst mode benefits many machines, some of
them significantly. However, our current
implementation fails on some machines such
as Rafael's Asus L5D.

This patch restores the alternative EC polling code,
which can be enabled at boot time via "ec_polling"

http://bugzilla.kernel.org/show_bug.cgi?id=4665

Signed-off-by: Luming Yu <luming.yu@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>

authored by Luming Yu and committed by Len Brown 45bea155 335f16be

+721 -170
+721 -170
drivers/acpi/ec.c
··· 59 59 #define ACPI_EC_DELAY 50 /* Wait 50ms max. during EC ops */ 60 60 #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ 61 61 62 + #define ACPI_EC_UDELAY 100 /* Poll @ 100us increments */ 63 + #define ACPI_EC_UDELAY_COUNT 1000 /* Wait 10ms max. during EC ops */ 64 + 62 65 #define ACPI_EC_COMMAND_READ 0x80 63 66 #define ACPI_EC_COMMAND_WRITE 0x81 64 67 #define ACPI_EC_BURST_ENABLE 0x82 65 68 #define ACPI_EC_BURST_DISABLE 0x83 66 69 #define ACPI_EC_COMMAND_QUERY 0x84 67 70 68 - static int acpi_ec_add (struct acpi_device *device); 71 + #define EC_POLLING 0xFF 72 + #define EC_BURST 0x00 73 + 74 + 69 75 static int acpi_ec_remove (struct acpi_device *device, int type); 70 76 static int acpi_ec_start (struct acpi_device *device); 71 77 static int acpi_ec_stop (struct acpi_device *device, int type); 78 + static int acpi_ec_burst_add ( struct acpi_device *device); 72 79 73 80 static struct acpi_driver acpi_ec_driver = { 74 81 .name = ACPI_EC_DRIVER_NAME, 75 82 .class = ACPI_EC_CLASS, 76 83 .ids = ACPI_EC_HID, 77 84 .ops = { 78 - .add = acpi_ec_add, 85 + .add = acpi_ec_burst_add, 79 86 .remove = acpi_ec_remove, 80 87 .start = acpi_ec_start, 81 88 .stop = acpi_ec_stop, 82 89 }, 83 90 }; 91 + union acpi_ec { 92 + struct { 93 + u32 mode; 94 + acpi_handle handle; 95 + unsigned long uid; 96 + unsigned long gpe_bit; 97 + struct acpi_generic_address status_addr; 98 + struct acpi_generic_address command_addr; 99 + struct acpi_generic_address data_addr; 100 + unsigned long global_lock; 101 + } common; 84 102 85 - struct acpi_ec { 86 - acpi_handle handle; 87 - unsigned long uid; 88 - unsigned long gpe_bit; 89 - struct acpi_generic_address status_addr; 90 - struct acpi_generic_address command_addr; 91 - struct acpi_generic_address data_addr; 92 - unsigned long global_lock; 93 - unsigned int expect_event; 94 - atomic_t leaving_burst; /* 0 : No, 1 : Yes, 2: abort*/ 95 - atomic_t pending_gpe; 96 - struct semaphore sem; 97 - wait_queue_head_t wait; 103 + struct { 104 + u32 mode; 105 + acpi_handle handle; 106 + unsigned long uid; 107 + unsigned long gpe_bit; 108 + struct acpi_generic_address status_addr; 109 + struct acpi_generic_address command_addr; 110 + struct acpi_generic_address data_addr; 111 + unsigned long global_lock; 112 + unsigned int expect_event; 113 + atomic_t leaving_burst; /* 0 : No, 1 : Yes, 2: abort*/ 114 + atomic_t pending_gpe; 115 + struct semaphore sem; 116 + wait_queue_head_t wait; 117 + }burst; 118 + 119 + struct { 120 + u32 mode; 121 + acpi_handle handle; 122 + unsigned long uid; 123 + unsigned long gpe_bit; 124 + struct acpi_generic_address status_addr; 125 + struct acpi_generic_address command_addr; 126 + struct acpi_generic_address data_addr; 127 + unsigned long global_lock; 128 + spinlock_t lock; 129 + }polling; 98 130 }; 99 131 132 + static int acpi_ec_polling_wait ( union acpi_ec *ec, u8 event); 133 + static int acpi_ec_burst_wait(union acpi_ec *ec, unsigned int event); 134 + static int acpi_ec_polling_read ( union acpi_ec *ec, u8 address, u32 *data); 135 + static int acpi_ec_burst_read( union acpi_ec *ec, u8 address, u32 *data); 136 + static int acpi_ec_polling_write ( union acpi_ec *ec, u8 address, u8 data); 137 + static int acpi_ec_burst_write ( union acpi_ec *ec, u8 address, u8 data); 138 + static int acpi_ec_polling_query ( union acpi_ec *ec, u32 *data); 139 + static int acpi_ec_burst_query ( union acpi_ec *ec, u32 *data); 140 + static void acpi_ec_gpe_polling_query ( void *ec_cxt); 141 + static void acpi_ec_gpe_burst_query ( void *ec_cxt); 142 + static u32 acpi_ec_gpe_polling_handler ( void *data); 143 + static u32 acpi_ec_gpe_burst_handler ( void *data); 144 + static acpi_status __init 145 + acpi_fake_ecdt_polling_callback ( 146 + acpi_handle handle, 147 + u32 Level, 148 + void *context, 149 + void **retval); 150 + 151 + static acpi_status __init 152 + acpi_fake_ecdt_burst_callback ( 153 + acpi_handle handle, 154 + u32 Level, 155 + void *context, 156 + void **retval); 157 + 158 + static int __init 159 + acpi_ec_polling_get_real_ecdt(void); 160 + static int __init 161 + acpi_ec_burst_get_real_ecdt(void); 100 162 /* If we find an EC via the ECDT, we need to keep a ptr to its context */ 101 - static struct acpi_ec *ec_ecdt; 163 + static union acpi_ec *ec_ecdt; 102 164 103 165 /* External interfaces use first EC only, so remember */ 104 166 static struct acpi_device *first_ec; 167 + static int acpi_ec_polling_mode; 105 168 106 169 /* -------------------------------------------------------------------------- 107 170 Transaction Management 108 171 -------------------------------------------------------------------------- */ 109 172 110 - static inline u32 acpi_ec_read_status(struct acpi_ec *ec) 173 + static inline u32 acpi_ec_read_status(union acpi_ec *ec) 111 174 { 112 175 u32 status = 0; 113 176 114 - acpi_hw_low_level_read(8, &status, &ec->status_addr); 177 + acpi_hw_low_level_read(8, &status, &ec->common.status_addr); 115 178 return status; 116 179 } 117 180 118 - static int acpi_ec_wait(struct acpi_ec *ec, unsigned int event) 181 + static int 182 + acpi_ec_wait ( 183 + union acpi_ec *ec, 184 + u8 event) 185 + { 186 + if (acpi_ec_polling_mode) 187 + return acpi_ec_polling_wait (ec, event); 188 + else 189 + return acpi_ec_burst_wait (ec, event); 190 + } 191 + 192 + static int 193 + acpi_ec_polling_wait ( 194 + union acpi_ec *ec, 195 + u8 event) 196 + { 197 + u32 acpi_ec_status = 0; 198 + u32 i = ACPI_EC_UDELAY_COUNT; 199 + 200 + if (!ec) 201 + return -EINVAL; 202 + 203 + /* Poll the EC status register waiting for the event to occur. */ 204 + switch (event) { 205 + case ACPI_EC_EVENT_OBF: 206 + do { 207 + acpi_hw_low_level_read(8, &acpi_ec_status, &ec->common.status_addr); 208 + if (acpi_ec_status & ACPI_EC_FLAG_OBF) 209 + return 0; 210 + udelay(ACPI_EC_UDELAY); 211 + } while (--i>0); 212 + break; 213 + case ACPI_EC_EVENT_IBE: 214 + do { 215 + acpi_hw_low_level_read(8, &acpi_ec_status, &ec->common.status_addr); 216 + if (!(acpi_ec_status & ACPI_EC_FLAG_IBF)) 217 + return 0; 218 + udelay(ACPI_EC_UDELAY); 219 + } while (--i>0); 220 + break; 221 + default: 222 + return -EINVAL; 223 + } 224 + 225 + return -ETIME; 226 + } 227 + static int acpi_ec_burst_wait(union acpi_ec *ec, unsigned int event) 119 228 { 120 229 int result = 0; 121 230 122 231 ACPI_FUNCTION_TRACE("acpi_ec_wait"); 123 232 124 - ec->expect_event = event; 233 + ec->burst.expect_event = event; 125 234 smp_mb(); 126 235 127 - result = wait_event_interruptible_timeout(ec->wait, 128 - !ec->expect_event, 236 + result = wait_event_interruptible_timeout(ec->burst.wait, 237 + !ec->burst.expect_event, 129 238 msecs_to_jiffies(ACPI_EC_DELAY)); 130 239 131 - ec->expect_event = 0; 240 + ec->burst.expect_event = 0; 132 241 smp_mb(); 133 242 134 243 if (result < 0){ ··· 269 160 270 161 static int 271 162 acpi_ec_enter_burst_mode ( 272 - struct acpi_ec *ec) 163 + union acpi_ec *ec) 273 164 { 274 165 u32 tmp = 0; 275 166 int status = 0; ··· 279 170 status = acpi_ec_read_status(ec); 280 171 if (status != -EINVAL && 281 172 !(status & ACPI_EC_FLAG_BURST)){ 282 - acpi_hw_low_level_write(8, ACPI_EC_BURST_ENABLE, &ec->command_addr); 173 + acpi_hw_low_level_write(8, ACPI_EC_BURST_ENABLE, &ec->common.command_addr); 283 174 status = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 284 175 if (status){ 285 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 176 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 286 177 return_VALUE(-EINVAL); 287 178 } 288 - acpi_hw_low_level_read(8, &tmp, &ec->data_addr); 289 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 179 + acpi_hw_low_level_read(8, &tmp, &ec->common.data_addr); 180 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 290 181 if(tmp != 0x90 ) {/* Burst ACK byte*/ 291 182 return_VALUE(-EINVAL); 292 183 } 293 184 } 294 185 295 - atomic_set(&ec->leaving_burst , 0); 186 + atomic_set(&ec->burst.leaving_burst , 0); 296 187 return_VALUE(0); 297 188 } 298 189 299 190 static int 300 191 acpi_ec_leave_burst_mode ( 301 - struct acpi_ec *ec) 192 + union acpi_ec *ec) 302 193 { 303 194 int status =0; 304 195 305 196 ACPI_FUNCTION_TRACE("acpi_ec_leave_burst_mode"); 306 197 307 - atomic_set(&ec->leaving_burst , 1); 198 + atomic_set(&ec->burst.leaving_burst , 1); 308 199 status = acpi_ec_read_status(ec); 309 200 if (status != -EINVAL && 310 201 (status & ACPI_EC_FLAG_BURST)){ 311 - acpi_hw_low_level_write(8, ACPI_EC_BURST_DISABLE, &ec->command_addr); 202 + acpi_hw_low_level_write(8, ACPI_EC_BURST_DISABLE, &ec->common.command_addr); 312 203 status = acpi_ec_wait(ec, ACPI_EC_FLAG_IBF); 313 204 if (status){ 314 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 205 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 315 206 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,"------->wait fail\n")); 316 207 return_VALUE(-EINVAL); 317 208 } 318 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 209 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 319 210 status = acpi_ec_read_status(ec); 320 211 } 321 212 ··· 324 215 325 216 static int 326 217 acpi_ec_read ( 327 - struct acpi_ec *ec, 218 + union acpi_ec *ec, 219 + u8 address, 220 + u32 *data) 221 + { 222 + if (acpi_ec_polling_mode) 223 + return acpi_ec_polling_read(ec, address, data); 224 + else 225 + return acpi_ec_burst_read(ec, address, data); 226 + } 227 + static int 228 + acpi_ec_write ( 229 + union acpi_ec *ec, 230 + u8 address, 231 + u8 data) 232 + { 233 + if (acpi_ec_polling_mode) 234 + return acpi_ec_polling_write(ec, address, data); 235 + else 236 + return acpi_ec_burst_write(ec, address, data); 237 + } 238 + static int 239 + acpi_ec_polling_read ( 240 + union acpi_ec *ec, 241 + u8 address, 242 + u32 *data) 243 + { 244 + acpi_status status = AE_OK; 245 + int result = 0; 246 + unsigned long flags = 0; 247 + u32 glk = 0; 248 + 249 + ACPI_FUNCTION_TRACE("acpi_ec_read"); 250 + 251 + if (!ec || !data) 252 + return_VALUE(-EINVAL); 253 + 254 + *data = 0; 255 + 256 + if (ec->common.global_lock) { 257 + status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 258 + if (ACPI_FAILURE(status)) 259 + return_VALUE(-ENODEV); 260 + } 261 + 262 + spin_lock_irqsave(&ec->polling.lock, flags); 263 + 264 + acpi_hw_low_level_write(8, ACPI_EC_COMMAND_READ, &ec->common.command_addr); 265 + result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 266 + if (result) 267 + goto end; 268 + 269 + acpi_hw_low_level_write(8, address, &ec->common.data_addr); 270 + result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 271 + if (result) 272 + goto end; 273 + 274 + acpi_hw_low_level_read(8, data, &ec->common.data_addr); 275 + 276 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Read [%02x] from address [%02x]\n", 277 + *data, address)); 278 + 279 + end: 280 + spin_unlock_irqrestore(&ec->polling.lock, flags); 281 + 282 + if (ec->common.global_lock) 283 + acpi_release_global_lock(glk); 284 + 285 + return_VALUE(result); 286 + } 287 + 288 + 289 + static int 290 + acpi_ec_polling_write ( 291 + union acpi_ec *ec, 292 + u8 address, 293 + u8 data) 294 + { 295 + int result = 0; 296 + acpi_status status = AE_OK; 297 + unsigned long flags = 0; 298 + u32 glk = 0; 299 + 300 + ACPI_FUNCTION_TRACE("acpi_ec_write"); 301 + 302 + if (!ec) 303 + return_VALUE(-EINVAL); 304 + 305 + if (ec->common.global_lock) { 306 + status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 307 + if (ACPI_FAILURE(status)) 308 + return_VALUE(-ENODEV); 309 + } 310 + 311 + spin_lock_irqsave(&ec->polling.lock, flags); 312 + 313 + acpi_hw_low_level_write(8, ACPI_EC_COMMAND_WRITE, &ec->common.command_addr); 314 + result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 315 + if (result) 316 + goto end; 317 + 318 + acpi_hw_low_level_write(8, address, &ec->common.data_addr); 319 + result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 320 + if (result) 321 + goto end; 322 + 323 + acpi_hw_low_level_write(8, data, &ec->common.data_addr); 324 + result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 325 + if (result) 326 + goto end; 327 + 328 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Wrote [%02x] to address [%02x]\n", 329 + data, address)); 330 + 331 + end: 332 + spin_unlock_irqrestore(&ec->polling.lock, flags); 333 + 334 + if (ec->common.global_lock) 335 + acpi_release_global_lock(glk); 336 + 337 + return_VALUE(result); 338 + } 339 + 340 + static int 341 + acpi_ec_burst_read ( 342 + union acpi_ec *ec, 328 343 u8 address, 329 344 u32 *data) 330 345 { ··· 463 230 retry: 464 231 *data = 0; 465 232 466 - if (ec->global_lock) { 233 + if (ec->common.global_lock) { 467 234 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 468 235 if (ACPI_FAILURE(status)) 469 236 return_VALUE(-ENODEV); 470 237 } 471 238 472 239 WARN_ON(in_interrupt()); 473 - down(&ec->sem); 240 + down(&ec->burst.sem); 474 241 475 242 if(acpi_ec_enter_burst_mode(ec)) 476 243 goto end; 477 244 478 - acpi_hw_low_level_write(8, ACPI_EC_COMMAND_READ, &ec->command_addr); 245 + acpi_hw_low_level_write(8, ACPI_EC_COMMAND_READ, &ec->common.command_addr); 479 246 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 480 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 247 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 481 248 if (status) { 482 249 goto end; 483 250 } 484 251 485 - acpi_hw_low_level_write(8, address, &ec->data_addr); 252 + acpi_hw_low_level_write(8, address, &ec->common.data_addr); 486 253 status= acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 487 254 if (status){ 488 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 255 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 489 256 goto end; 490 257 } 491 258 492 - acpi_hw_low_level_read(8, data, &ec->data_addr); 493 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 259 + acpi_hw_low_level_read(8, data, &ec->common.data_addr); 260 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 494 261 495 262 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Read [%02x] from address [%02x]\n", 496 263 *data, address)); 497 264 498 265 end: 499 266 acpi_ec_leave_burst_mode(ec); 500 - up(&ec->sem); 267 + up(&ec->burst.sem); 501 268 502 - if (ec->global_lock) 269 + if (ec->common.global_lock) 503 270 acpi_release_global_lock(glk); 504 271 505 - if(atomic_read(&ec->leaving_burst) == 2){ 272 + if(atomic_read(&ec->burst.leaving_burst) == 2){ 506 273 ACPI_DEBUG_PRINT((ACPI_DB_INFO,"aborted, retry ...\n")); 507 - while(atomic_read(&ec->pending_gpe)){ 274 + while(atomic_read(&ec->burst.pending_gpe)){ 508 275 msleep(1); 509 276 } 510 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 277 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 511 278 goto retry; 512 279 } 513 280 ··· 516 283 517 284 518 285 static int 519 - acpi_ec_write ( 520 - struct acpi_ec *ec, 286 + acpi_ec_burst_write ( 287 + union acpi_ec *ec, 521 288 u8 address, 522 289 u8 data) 523 290 { ··· 530 297 if (!ec) 531 298 return_VALUE(-EINVAL); 532 299 retry: 533 - if (ec->global_lock) { 300 + if (ec->common.global_lock) { 534 301 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 535 302 if (ACPI_FAILURE(status)) 536 303 return_VALUE(-ENODEV); 537 304 } 538 305 539 306 WARN_ON(in_interrupt()); 540 - down(&ec->sem); 307 + down(&ec->burst.sem); 541 308 542 309 if(acpi_ec_enter_burst_mode(ec)) 543 310 goto end; ··· 545 312 status = acpi_ec_read_status(ec); 546 313 if (status != -EINVAL && 547 314 !(status & ACPI_EC_FLAG_BURST)){ 548 - acpi_hw_low_level_write(8, ACPI_EC_BURST_ENABLE, &ec->command_addr); 315 + acpi_hw_low_level_write(8, ACPI_EC_BURST_ENABLE, &ec->common.command_addr); 549 316 status = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 550 317 if (status) 551 318 goto end; 552 - acpi_hw_low_level_read(8, &tmp, &ec->data_addr); 319 + acpi_hw_low_level_read(8, &tmp, &ec->common.data_addr); 553 320 if(tmp != 0x90 ) /* Burst ACK byte*/ 554 321 goto end; 555 322 } 556 323 /*Now we are in burst mode*/ 557 324 558 - acpi_hw_low_level_write(8, ACPI_EC_COMMAND_WRITE, &ec->command_addr); 325 + acpi_hw_low_level_write(8, ACPI_EC_COMMAND_WRITE, &ec->common.command_addr); 559 326 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 560 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 327 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 561 328 if (status){ 562 329 goto end; 563 330 } 564 331 565 - acpi_hw_low_level_write(8, address, &ec->data_addr); 332 + acpi_hw_low_level_write(8, address, &ec->common.data_addr); 566 333 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 567 334 if (status){ 568 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 335 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 569 336 goto end; 570 337 } 571 338 572 - acpi_hw_low_level_write(8, data, &ec->data_addr); 339 + acpi_hw_low_level_write(8, data, &ec->common.data_addr); 573 340 status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBE); 574 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 341 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 575 342 if (status) 576 343 goto end; 577 344 ··· 580 347 581 348 end: 582 349 acpi_ec_leave_burst_mode(ec); 583 - up(&ec->sem); 350 + up(&ec->burst.sem); 584 351 585 - if (ec->global_lock) 352 + if (ec->common.global_lock) 586 353 acpi_release_global_lock(glk); 587 354 588 - if(atomic_read(&ec->leaving_burst) == 2){ 355 + if(atomic_read(&ec->burst.leaving_burst) == 2){ 589 356 ACPI_DEBUG_PRINT((ACPI_DB_INFO,"aborted, retry ...\n")); 590 - while(atomic_read(&ec->pending_gpe)){ 357 + while(atomic_read(&ec->burst.pending_gpe)){ 591 358 msleep(1); 592 359 } 593 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 360 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 594 361 goto retry; 595 362 } 596 363 ··· 603 370 int 604 371 ec_read(u8 addr, u8 *val) 605 372 { 606 - struct acpi_ec *ec; 373 + union acpi_ec *ec; 607 374 int err; 608 375 u32 temp_data; 609 376 ··· 626 393 int 627 394 ec_write(u8 addr, u8 val) 628 395 { 629 - struct acpi_ec *ec; 396 + union acpi_ec *ec; 630 397 int err; 631 398 632 399 if (!first_ec) ··· 640 407 } 641 408 EXPORT_SYMBOL(ec_write); 642 409 643 - 644 410 static int 645 411 acpi_ec_query ( 646 - struct acpi_ec *ec, 412 + union acpi_ec *ec, 413 + u32 *data) 414 + { 415 + if (acpi_ec_polling_mode) 416 + return acpi_ec_polling_query(ec, data); 417 + else 418 + return acpi_ec_burst_query(ec, data); 419 + } 420 + static int 421 + acpi_ec_polling_query ( 422 + union acpi_ec *ec, 423 + u32 *data) 424 + { 425 + int result = 0; 426 + acpi_status status = AE_OK; 427 + unsigned long flags = 0; 428 + u32 glk = 0; 429 + 430 + ACPI_FUNCTION_TRACE("acpi_ec_query"); 431 + 432 + if (!ec || !data) 433 + return_VALUE(-EINVAL); 434 + 435 + *data = 0; 436 + 437 + if (ec->common.global_lock) { 438 + status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 439 + if (ACPI_FAILURE(status)) 440 + return_VALUE(-ENODEV); 441 + } 442 + 443 + /* 444 + * Query the EC to find out which _Qxx method we need to evaluate. 445 + * Note that successful completion of the query causes the ACPI_EC_SCI 446 + * bit to be cleared (and thus clearing the interrupt source). 447 + */ 448 + spin_lock_irqsave(&ec->polling.lock, flags); 449 + 450 + acpi_hw_low_level_write(8, ACPI_EC_COMMAND_QUERY, &ec->common.command_addr); 451 + result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 452 + if (result) 453 + goto end; 454 + 455 + acpi_hw_low_level_read(8, data, &ec->common.data_addr); 456 + if (!*data) 457 + result = -ENODATA; 458 + 459 + end: 460 + spin_unlock_irqrestore(&ec->polling.lock, flags); 461 + 462 + if (ec->common.global_lock) 463 + acpi_release_global_lock(glk); 464 + 465 + return_VALUE(result); 466 + } 467 + static int 468 + acpi_ec_burst_query ( 469 + union acpi_ec *ec, 647 470 u32 *data) 648 471 { 649 472 int status = 0; ··· 711 422 return_VALUE(-EINVAL); 712 423 *data = 0; 713 424 714 - if (ec->global_lock) { 425 + if (ec->common.global_lock) { 715 426 status = acpi_acquire_global_lock(ACPI_EC_UDELAY_GLK, &glk); 716 427 if (ACPI_FAILURE(status)) 717 428 return_VALUE(-ENODEV); 718 429 } 719 430 720 - down(&ec->sem); 431 + down(&ec->burst.sem); 721 432 if(acpi_ec_enter_burst_mode(ec)) 722 433 goto end; 723 434 /* ··· 725 436 * Note that successful completion of the query causes the ACPI_EC_SCI 726 437 * bit to be cleared (and thus clearing the interrupt source). 727 438 */ 728 - acpi_hw_low_level_write(8, ACPI_EC_COMMAND_QUERY, &ec->command_addr); 439 + acpi_hw_low_level_write(8, ACPI_EC_COMMAND_QUERY, &ec->common.command_addr); 729 440 status = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF); 730 441 if (status){ 731 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 442 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 732 443 goto end; 733 444 } 734 445 735 - acpi_hw_low_level_read(8, data, &ec->data_addr); 736 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 446 + acpi_hw_low_level_read(8, data, &ec->common.data_addr); 447 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 737 448 if (!*data) 738 449 status = -ENODATA; 739 450 740 451 end: 741 452 acpi_ec_leave_burst_mode(ec); 742 - up(&ec->sem); 453 + up(&ec->burst.sem); 743 454 744 - if (ec->global_lock) 455 + if (ec->common.global_lock) 745 456 acpi_release_global_lock(glk); 746 457 747 - if(atomic_read(&ec->leaving_burst) == 2){ 458 + if(atomic_read(&ec->burst.leaving_burst) == 2){ 748 459 ACPI_DEBUG_PRINT((ACPI_DB_INFO,"aborted, retry ...\n")); 749 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 460 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 750 461 status = -ENODATA; 751 462 } 752 463 return_VALUE(status); ··· 757 468 Event Management 758 469 -------------------------------------------------------------------------- */ 759 470 760 - struct acpi_ec_query_data { 471 + union acpi_ec_query_data { 761 472 acpi_handle handle; 762 473 u8 data; 763 474 }; ··· 766 477 acpi_ec_gpe_query ( 767 478 void *ec_cxt) 768 479 { 769 - struct acpi_ec *ec = (struct acpi_ec *) ec_cxt; 480 + if (acpi_ec_polling_mode) 481 + acpi_ec_gpe_polling_query(ec_cxt); 482 + else 483 + acpi_ec_gpe_burst_query(ec_cxt); 484 + } 485 + 486 + static void 487 + acpi_ec_gpe_polling_query ( 488 + void *ec_cxt) 489 + { 490 + union acpi_ec *ec = (union acpi_ec *) ec_cxt; 491 + u32 value = 0; 492 + unsigned long flags = 0; 493 + static char object_name[5] = {'_','Q','0','0','\0'}; 494 + const char hex[] = {'0','1','2','3','4','5','6','7', 495 + '8','9','A','B','C','D','E','F'}; 496 + 497 + ACPI_FUNCTION_TRACE("acpi_ec_gpe_query"); 498 + 499 + if (!ec_cxt) 500 + goto end; 501 + 502 + spin_lock_irqsave(&ec->polling.lock, flags); 503 + acpi_hw_low_level_read(8, &value, &ec->common.command_addr); 504 + spin_unlock_irqrestore(&ec->polling.lock, flags); 505 + 506 + /* TBD: Implement asynch events! 507 + * NOTE: All we care about are EC-SCI's. Other EC events are 508 + * handled via polling (yuck!). This is because some systems 509 + * treat EC-SCIs as level (versus EDGE!) triggered, preventing 510 + * a purely interrupt-driven approach (grumble, grumble). 511 + */ 512 + if (!(value & ACPI_EC_FLAG_SCI)) 513 + goto end; 514 + 515 + if (acpi_ec_query(ec, &value)) 516 + goto end; 517 + 518 + object_name[2] = hex[((value >> 4) & 0x0F)]; 519 + object_name[3] = hex[(value & 0x0F)]; 520 + 521 + ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluating %s\n", object_name)); 522 + 523 + acpi_evaluate_object(ec->common.handle, object_name, NULL, NULL); 524 + 525 + end: 526 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 527 + } 528 + static void 529 + acpi_ec_gpe_burst_query ( 530 + void *ec_cxt) 531 + { 532 + union acpi_ec *ec = (union acpi_ec *) ec_cxt; 770 533 u32 value; 771 534 int result = -ENODATA; 772 535 static char object_name[5] = {'_','Q','0','0','\0'}; ··· 838 497 839 498 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Evaluating %s\n", object_name)); 840 499 841 - acpi_evaluate_object(ec->handle, object_name, NULL, NULL); 500 + acpi_evaluate_object(ec->common.handle, object_name, NULL, NULL); 842 501 end: 843 - atomic_dec(&ec->pending_gpe); 502 + atomic_dec(&ec->burst.pending_gpe); 844 503 return; 845 504 } 846 505 ··· 848 507 acpi_ec_gpe_handler ( 849 508 void *data) 850 509 { 510 + if (acpi_ec_polling_mode) 511 + return acpi_ec_gpe_polling_handler(data); 512 + else 513 + return acpi_ec_gpe_burst_handler(data); 514 + } 515 + static u32 516 + acpi_ec_gpe_polling_handler ( 517 + void *data) 518 + { 851 519 acpi_status status = AE_OK; 852 - u32 value; 853 - struct acpi_ec *ec = (struct acpi_ec *) data; 520 + union acpi_ec *ec = (union acpi_ec *) data; 854 521 855 522 if (!ec) 856 523 return ACPI_INTERRUPT_NOT_HANDLED; 857 524 858 - acpi_disable_gpe(NULL, ec->gpe_bit, ACPI_ISR); 525 + acpi_disable_gpe(NULL, ec->common.gpe_bit, ACPI_ISR); 526 + 527 + status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE, 528 + acpi_ec_gpe_query, ec); 529 + 530 + if (status == AE_OK) 531 + return ACPI_INTERRUPT_HANDLED; 532 + else 533 + return ACPI_INTERRUPT_NOT_HANDLED; 534 + } 535 + static u32 536 + acpi_ec_gpe_burst_handler ( 537 + void *data) 538 + { 539 + acpi_status status = AE_OK; 540 + u32 value; 541 + union acpi_ec *ec = (union acpi_ec *) data; 542 + 543 + if (!ec) 544 + return ACPI_INTERRUPT_NOT_HANDLED; 545 + 546 + acpi_disable_gpe(NULL, ec->common.gpe_bit, ACPI_ISR); 859 547 860 548 value = acpi_ec_read_status(ec); 861 549 862 550 if((value & ACPI_EC_FLAG_IBF) && 863 551 !(value & ACPI_EC_FLAG_BURST) && 864 - (atomic_read(&ec->leaving_burst) == 0)) { 552 + (atomic_read(&ec->burst.leaving_burst) == 0)) { 865 553 /* 866 554 * the embedded controller disables 867 555 * burst mode for any reason other 868 556 * than the burst disable command 869 557 * to process critical event. 870 558 */ 871 - atomic_set(&ec->leaving_burst , 2); /* block current pending transaction 559 + atomic_set(&ec->burst.leaving_burst , 2); /* block current pending transaction 872 560 and retry */ 873 - wake_up(&ec->wait); 561 + wake_up(&ec->burst.wait); 874 562 }else { 875 - if ((ec->expect_event == ACPI_EC_EVENT_OBF && 563 + if ((ec->burst.expect_event == ACPI_EC_EVENT_OBF && 876 564 (value & ACPI_EC_FLAG_OBF)) || 877 - (ec->expect_event == ACPI_EC_EVENT_IBE && 565 + (ec->burst.expect_event == ACPI_EC_EVENT_IBE && 878 566 !(value & ACPI_EC_FLAG_IBF))) { 879 - ec->expect_event = 0; 880 - wake_up(&ec->wait); 567 + ec->burst.expect_event = 0; 568 + wake_up(&ec->burst.wait); 881 569 return ACPI_INTERRUPT_HANDLED; 882 570 } 883 571 } 884 572 885 573 if (value & ACPI_EC_FLAG_SCI){ 886 - atomic_add(1, &ec->pending_gpe) ; 574 + atomic_add(1, &ec->burst.pending_gpe) ; 887 575 status = acpi_os_queue_for_execution(OSD_PRIORITY_GPE, 888 576 acpi_ec_gpe_query, ec); 889 577 return status == AE_OK ? 890 578 ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED; 891 579 } 892 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_ISR); 580 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_ISR); 893 581 return status == AE_OK ? 894 582 ACPI_INTERRUPT_HANDLED : ACPI_INTERRUPT_NOT_HANDLED; 895 583 } ··· 955 585 void *region_context) 956 586 { 957 587 int result = 0; 958 - struct acpi_ec *ec = NULL; 588 + union acpi_ec *ec = NULL; 959 589 u64 temp = *value; 960 590 acpi_integer f_v = 0; 961 591 int i = 0; ··· 970 600 return_VALUE(AE_BAD_PARAMETER); 971 601 } 972 602 973 - ec = (struct acpi_ec *) handler_context; 603 + ec = (union acpi_ec *) handler_context; 974 604 975 605 next_byte: 976 606 switch (function) { ··· 1031 661 static int 1032 662 acpi_ec_read_info (struct seq_file *seq, void *offset) 1033 663 { 1034 - struct acpi_ec *ec = (struct acpi_ec *) seq->private; 664 + union acpi_ec *ec = (union acpi_ec *) seq->private; 1035 665 1036 666 ACPI_FUNCTION_TRACE("acpi_ec_read_info"); 1037 667 ··· 1039 669 goto end; 1040 670 1041 671 seq_printf(seq, "gpe bit: 0x%02x\n", 1042 - (u32) ec->gpe_bit); 672 + (u32) ec->common.gpe_bit); 1043 673 seq_printf(seq, "ports: 0x%02x, 0x%02x\n", 1044 - (u32) ec->status_addr.address, (u32) ec->data_addr.address); 674 + (u32) ec->common.status_addr.address, (u32) ec->common.data_addr.address); 1045 675 seq_printf(seq, "use global lock: %s\n", 1046 - ec->global_lock?"yes":"no"); 1047 - acpi_enable_gpe(NULL, ec->gpe_bit, ACPI_NOT_ISR); 676 + ec->common.global_lock?"yes":"no"); 677 + acpi_enable_gpe(NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 1048 678 1049 679 end: 1050 680 return_VALUE(0); ··· 1067 697 acpi_ec_add_fs ( 1068 698 struct acpi_device *device) 1069 699 { 1070 - struct proc_dir_entry *entry; 700 + struct proc_dir_entry *entry = NULL; 1071 701 1072 702 ACPI_FUNCTION_TRACE("acpi_ec_add_fs"); 1073 703 ··· 1114 744 Driver Interface 1115 745 -------------------------------------------------------------------------- */ 1116 746 747 + 1117 748 static int 1118 - acpi_ec_add ( 749 + acpi_ec_polling_add ( 1119 750 struct acpi_device *device) 1120 751 { 1121 - int result; 1122 - acpi_status status; 1123 - struct acpi_ec *ec; 752 + int result = 0; 753 + acpi_status status = AE_OK; 754 + union acpi_ec *ec = NULL; 1124 755 unsigned long uid; 1125 756 1126 757 ACPI_FUNCTION_TRACE("acpi_ec_add"); ··· 1129 758 if (!device) 1130 759 return_VALUE(-EINVAL); 1131 760 1132 - ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); 761 + ec = kmalloc(sizeof(union acpi_ec), GFP_KERNEL); 1133 762 if (!ec) 1134 763 return_VALUE(-ENOMEM); 1135 - memset(ec, 0, sizeof(struct acpi_ec)); 764 + memset(ec, 0, sizeof(union acpi_ec)); 1136 765 1137 - ec->handle = device->handle; 1138 - ec->uid = -1; 1139 - atomic_set(&ec->pending_gpe, 0); 1140 - atomic_set(&ec->leaving_burst , 1); 1141 - init_MUTEX(&ec->sem); 1142 - init_waitqueue_head(&ec->wait); 766 + ec->common.handle = device->handle; 767 + ec->common.uid = -1; 768 + spin_lock_init(&ec->polling.lock); 1143 769 strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME); 1144 770 strcpy(acpi_device_class(device), ACPI_EC_CLASS); 1145 771 acpi_driver_data(device) = ec; 1146 772 1147 773 /* Use the global lock for all EC transactions? */ 1148 - acpi_evaluate_integer(ec->handle, "_GLK", NULL, &ec->global_lock); 774 + acpi_evaluate_integer(ec->common.handle, "_GLK", NULL, &ec->common.global_lock); 1149 775 1150 776 /* If our UID matches the UID for the ECDT-enumerated EC, 1151 777 we now have the *real* EC info, so kill the makeshift one.*/ 1152 - acpi_evaluate_integer(ec->handle, "_UID", NULL, &uid); 1153 - if (ec_ecdt && ec_ecdt->uid == uid) { 778 + acpi_evaluate_integer(ec->common.handle, "_UID", NULL, &uid); 779 + if (ec_ecdt && ec_ecdt->common.uid == uid) { 1154 780 acpi_remove_address_space_handler(ACPI_ROOT_OBJECT, 1155 781 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler); 1156 - 1157 - acpi_remove_gpe_handler(NULL, ec_ecdt->gpe_bit, &acpi_ec_gpe_handler); 782 + 783 + acpi_remove_gpe_handler(NULL, ec_ecdt->common.gpe_bit, &acpi_ec_gpe_handler); 1158 784 1159 785 kfree(ec_ecdt); 1160 786 } 1161 787 1162 788 /* Get GPE bit assignment (EC events). */ 1163 789 /* TODO: Add support for _GPE returning a package */ 1164 - status = acpi_evaluate_integer(ec->handle, "_GPE", NULL, &ec->gpe_bit); 790 + status = acpi_evaluate_integer(ec->common.handle, "_GPE", NULL, &ec->common.gpe_bit); 1165 791 if (ACPI_FAILURE(status)) { 1166 792 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 1167 793 "Error obtaining GPE bit assignment\n")); ··· 1172 804 1173 805 printk(KERN_INFO PREFIX "%s [%s] (gpe %d)\n", 1174 806 acpi_device_name(device), acpi_device_bid(device), 1175 - (u32) ec->gpe_bit); 807 + (u32) ec->common.gpe_bit); 808 + 809 + if (!first_ec) 810 + first_ec = device; 811 + 812 + end: 813 + if (result) 814 + kfree(ec); 815 + 816 + return_VALUE(result); 817 + } 818 + static int 819 + acpi_ec_burst_add ( 820 + struct acpi_device *device) 821 + { 822 + int result = 0; 823 + acpi_status status = AE_OK; 824 + union acpi_ec *ec = NULL; 825 + unsigned long uid; 826 + 827 + ACPI_FUNCTION_TRACE("acpi_ec_add"); 828 + 829 + if (!device) 830 + return_VALUE(-EINVAL); 831 + 832 + ec = kmalloc(sizeof(union acpi_ec), GFP_KERNEL); 833 + if (!ec) 834 + return_VALUE(-ENOMEM); 835 + memset(ec, 0, sizeof(union acpi_ec)); 836 + 837 + ec->common.handle = device->handle; 838 + ec->common.uid = -1; 839 + atomic_set(&ec->burst.pending_gpe, 0); 840 + atomic_set(&ec->burst.leaving_burst , 1); 841 + init_MUTEX(&ec->burst.sem); 842 + init_waitqueue_head(&ec->burst.wait); 843 + strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME); 844 + strcpy(acpi_device_class(device), ACPI_EC_CLASS); 845 + acpi_driver_data(device) = ec; 846 + 847 + /* Use the global lock for all EC transactions? */ 848 + acpi_evaluate_integer(ec->common.handle, "_GLK", NULL, &ec->common.global_lock); 849 + 850 + /* If our UID matches the UID for the ECDT-enumerated EC, 851 + we now have the *real* EC info, so kill the makeshift one.*/ 852 + acpi_evaluate_integer(ec->common.handle, "_UID", NULL, &uid); 853 + if (ec_ecdt && ec_ecdt->common.uid == uid) { 854 + acpi_remove_address_space_handler(ACPI_ROOT_OBJECT, 855 + ACPI_ADR_SPACE_EC, &acpi_ec_space_handler); 856 + 857 + acpi_remove_gpe_handler(NULL, ec_ecdt->common.gpe_bit, &acpi_ec_gpe_handler); 858 + 859 + kfree(ec_ecdt); 860 + } 861 + 862 + /* Get GPE bit assignment (EC events). */ 863 + /* TODO: Add support for _GPE returning a package */ 864 + status = acpi_evaluate_integer(ec->common.handle, "_GPE", NULL, &ec->common.gpe_bit); 865 + if (ACPI_FAILURE(status)) { 866 + ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 867 + "Error obtaining GPE bit assignment\n")); 868 + result = -ENODEV; 869 + goto end; 870 + } 871 + 872 + result = acpi_ec_add_fs(device); 873 + if (result) 874 + goto end; 875 + 876 + printk(KERN_INFO PREFIX "%s [%s] (gpe %d)\n", 877 + acpi_device_name(device), acpi_device_bid(device), 878 + (u32) ec->common.gpe_bit); 1176 879 1177 880 if (!first_ec) 1178 881 first_ec = device; ··· 1261 822 struct acpi_device *device, 1262 823 int type) 1263 824 { 1264 - struct acpi_ec *ec; 825 + union acpi_ec *ec = NULL; 1265 826 1266 827 ACPI_FUNCTION_TRACE("acpi_ec_remove"); 1267 828 ··· 1283 844 struct acpi_resource *resource, 1284 845 void *context) 1285 846 { 1286 - struct acpi_ec *ec = (struct acpi_ec *) context; 847 + union acpi_ec *ec = (union acpi_ec *) context; 1287 848 struct acpi_generic_address *addr; 1288 849 1289 850 if (resource->id != ACPI_RSTYPE_IO) { ··· 1295 856 * the second address region returned is the status/command 1296 857 * port. 1297 858 */ 1298 - if (ec->data_addr.register_bit_width == 0) { 1299 - addr = &ec->data_addr; 1300 - } else if (ec->command_addr.register_bit_width == 0) { 1301 - addr = &ec->command_addr; 859 + if (ec->common.data_addr.register_bit_width == 0) { 860 + addr = &ec->common.data_addr; 861 + } else if (ec->common.command_addr.register_bit_width == 0) { 862 + addr = &ec->common.command_addr; 1302 863 } else { 1303 864 return AE_CTRL_TERMINATE; 1304 865 } ··· 1316 877 acpi_ec_start ( 1317 878 struct acpi_device *device) 1318 879 { 1319 - acpi_status status; 1320 - struct acpi_ec *ec; 880 + acpi_status status = AE_OK; 881 + union acpi_ec *ec = NULL; 1321 882 1322 883 ACPI_FUNCTION_TRACE("acpi_ec_start"); 1323 884 ··· 1332 893 /* 1333 894 * Get I/O port addresses. Convert to GAS format. 1334 895 */ 1335 - status = acpi_walk_resources(ec->handle, METHOD_NAME__CRS, 896 + status = acpi_walk_resources(ec->common.handle, METHOD_NAME__CRS, 1336 897 acpi_ec_io_ports, ec); 1337 - if (ACPI_FAILURE(status) || ec->command_addr.register_bit_width == 0) { 898 + if (ACPI_FAILURE(status) || ec->common.command_addr.register_bit_width == 0) { 1338 899 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error getting I/O port addresses")); 1339 900 return_VALUE(-ENODEV); 1340 901 } 1341 902 1342 - ec->status_addr = ec->command_addr; 903 + ec->common.status_addr = ec->common.command_addr; 1343 904 1344 905 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "gpe=0x%02x, ports=0x%2x,0x%2x\n", 1345 - (u32) ec->gpe_bit, (u32) ec->command_addr.address, 1346 - (u32) ec->data_addr.address)); 906 + (u32) ec->common.gpe_bit, (u32) ec->common.command_addr.address, 907 + (u32) ec->common.data_addr.address)); 908 + 1347 909 1348 910 /* 1349 911 * Install GPE handler 1350 912 */ 1351 - status = acpi_install_gpe_handler(NULL, ec->gpe_bit, 913 + status = acpi_install_gpe_handler(NULL, ec->common.gpe_bit, 1352 914 ACPI_GPE_EDGE_TRIGGERED, &acpi_ec_gpe_handler, ec); 1353 915 if (ACPI_FAILURE(status)) { 1354 916 return_VALUE(-ENODEV); 1355 917 } 1356 - acpi_set_gpe_type (NULL, ec->gpe_bit, ACPI_GPE_TYPE_RUNTIME); 1357 - acpi_enable_gpe (NULL, ec->gpe_bit, ACPI_NOT_ISR); 918 + acpi_set_gpe_type (NULL, ec->common.gpe_bit, ACPI_GPE_TYPE_RUNTIME); 919 + acpi_enable_gpe (NULL, ec->common.gpe_bit, ACPI_NOT_ISR); 1358 920 1359 - status = acpi_install_address_space_handler (ec->handle, 921 + status = acpi_install_address_space_handler (ec->common.handle, 1360 922 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler, 1361 923 &acpi_ec_space_setup, ec); 1362 924 if (ACPI_FAILURE(status)) { 1363 - acpi_remove_gpe_handler(NULL, ec->gpe_bit, &acpi_ec_gpe_handler); 925 + acpi_remove_gpe_handler(NULL, ec->common.gpe_bit, &acpi_ec_gpe_handler); 1364 926 return_VALUE(-ENODEV); 1365 927 } 1366 928 ··· 1374 934 struct acpi_device *device, 1375 935 int type) 1376 936 { 1377 - acpi_status status; 1378 - struct acpi_ec *ec; 937 + acpi_status status = AE_OK; 938 + union acpi_ec *ec = NULL; 1379 939 1380 940 ACPI_FUNCTION_TRACE("acpi_ec_stop"); 1381 941 ··· 1384 944 1385 945 ec = acpi_driver_data(device); 1386 946 1387 - status = acpi_remove_address_space_handler(ec->handle, 947 + status = acpi_remove_address_space_handler(ec->common.handle, 1388 948 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler); 1389 949 if (ACPI_FAILURE(status)) 1390 950 return_VALUE(-ENODEV); 1391 951 1392 - status = acpi_remove_gpe_handler(NULL, ec->gpe_bit, &acpi_ec_gpe_handler); 952 + status = acpi_remove_gpe_handler(NULL, ec->common.gpe_bit, &acpi_ec_gpe_handler); 1393 953 if (ACPI_FAILURE(status)) 1394 954 return_VALUE(-ENODEV); 1395 955 ··· 1403 963 void *context, 1404 964 void **retval) 1405 965 { 966 + 967 + if (acpi_ec_polling_mode) 968 + return acpi_fake_ecdt_polling_callback(handle, 969 + Level, context, retval); 970 + else 971 + return acpi_fake_ecdt_burst_callback(handle, 972 + Level, context, retval); 973 + } 974 + 975 + static acpi_status __init 976 + acpi_fake_ecdt_polling_callback ( 977 + acpi_handle handle, 978 + u32 Level, 979 + void *context, 980 + void **retval) 981 + { 1406 982 acpi_status status; 1407 983 1408 984 status = acpi_walk_resources(handle, METHOD_NAME__CRS, 1409 985 acpi_ec_io_ports, ec_ecdt); 1410 986 if (ACPI_FAILURE(status)) 1411 987 return status; 1412 - ec_ecdt->status_addr = ec_ecdt->command_addr; 988 + ec_ecdt->common.status_addr = ec_ecdt->common.command_addr; 1413 989 1414 - ec_ecdt->uid = -1; 1415 - acpi_evaluate_integer(handle, "_UID", NULL, &ec_ecdt->uid); 990 + ec_ecdt->common.uid = -1; 991 + acpi_evaluate_integer(handle, "_UID", NULL, &ec_ecdt->common.uid); 1416 992 1417 - status = acpi_evaluate_integer(handle, "_GPE", NULL, &ec_ecdt->gpe_bit); 993 + status = acpi_evaluate_integer(handle, "_GPE", NULL, &ec_ecdt->common.gpe_bit); 1418 994 if (ACPI_FAILURE(status)) 1419 995 return status; 1420 - ec_ecdt->global_lock = TRUE; 1421 - ec_ecdt->handle = handle; 996 + spin_lock_init(&ec_ecdt->polling.lock); 997 + ec_ecdt->common.global_lock = TRUE; 998 + ec_ecdt->common.handle = handle; 1422 999 1423 1000 printk(KERN_INFO PREFIX "GPE=0x%02x, ports=0x%2x, 0x%2x\n", 1424 - (u32) ec_ecdt->gpe_bit, (u32) ec_ecdt->command_addr.address, 1425 - (u32) ec_ecdt->data_addr.address); 1001 + (u32) ec_ecdt->common.gpe_bit, (u32) ec_ecdt->common.command_addr.address, 1002 + (u32) ec_ecdt->common.data_addr.address); 1003 + 1004 + return AE_CTRL_TERMINATE; 1005 + } 1006 + 1007 + static acpi_status __init 1008 + acpi_fake_ecdt_burst_callback ( 1009 + acpi_handle handle, 1010 + u32 Level, 1011 + void *context, 1012 + void **retval) 1013 + { 1014 + acpi_status status; 1015 + 1016 + init_MUTEX(&ec_ecdt->burst.sem); 1017 + init_waitqueue_head(&ec_ecdt->burst.wait); 1018 + status = acpi_walk_resources(handle, METHOD_NAME__CRS, 1019 + acpi_ec_io_ports, ec_ecdt); 1020 + if (ACPI_FAILURE(status)) 1021 + return status; 1022 + ec_ecdt->common.status_addr = ec_ecdt->common.command_addr; 1023 + 1024 + ec_ecdt->common.uid = -1; 1025 + acpi_evaluate_integer(handle, "_UID", NULL, &ec_ecdt->common.uid); 1026 + 1027 + status = acpi_evaluate_integer(handle, "_GPE", NULL, &ec_ecdt->common.gpe_bit); 1028 + if (ACPI_FAILURE(status)) 1029 + return status; 1030 + ec_ecdt->common.global_lock = TRUE; 1031 + ec_ecdt->common.handle = handle; 1032 + 1033 + printk(KERN_INFO PREFIX "GPE=0x%02x, ports=0x%2x, 0x%2x\n", 1034 + (u32) ec_ecdt->common.gpe_bit, (u32) ec_ecdt->common.command_addr.address, 1035 + (u32) ec_ecdt->common.data_addr.address); 1426 1036 1427 1037 return AE_CTRL_TERMINATE; 1428 1038 } ··· 1495 1005 1496 1006 printk(KERN_INFO PREFIX "Try to make an fake ECDT\n"); 1497 1007 1498 - ec_ecdt = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); 1008 + ec_ecdt = kmalloc(sizeof(union acpi_ec), GFP_KERNEL); 1499 1009 if (!ec_ecdt) { 1500 1010 ret = -ENOMEM; 1501 1011 goto error; 1502 1012 } 1503 - memset(ec_ecdt, 0, sizeof(struct acpi_ec)); 1013 + memset(ec_ecdt, 0, sizeof(union acpi_ec)); 1504 1014 1505 1015 status = acpi_get_devices (ACPI_EC_HID, 1506 1016 acpi_fake_ecdt_callback, ··· 1521 1031 static int __init 1522 1032 acpi_ec_get_real_ecdt(void) 1523 1033 { 1034 + if (acpi_ec_polling_mode) 1035 + return acpi_ec_polling_get_real_ecdt(); 1036 + else 1037 + return acpi_ec_burst_get_real_ecdt(); 1038 + } 1039 + 1040 + static int __init 1041 + acpi_ec_polling_get_real_ecdt(void) 1042 + { 1043 + acpi_status status; 1044 + struct acpi_table_ecdt *ecdt_ptr; 1045 + 1046 + status = acpi_get_firmware_table("ECDT", 1, ACPI_LOGICAL_ADDRESSING, 1047 + (struct acpi_table_header **) &ecdt_ptr); 1048 + if (ACPI_FAILURE(status)) 1049 + return -ENODEV; 1050 + 1051 + printk(KERN_INFO PREFIX "Found ECDT\n"); 1052 + 1053 + /* 1054 + * Generate a temporary ec context to use until the namespace is scanned 1055 + */ 1056 + ec_ecdt = kmalloc(sizeof(union acpi_ec), GFP_KERNEL); 1057 + if (!ec_ecdt) 1058 + return -ENOMEM; 1059 + memset(ec_ecdt, 0, sizeof(union acpi_ec)); 1060 + 1061 + ec_ecdt->common.command_addr = ecdt_ptr->ec_control; 1062 + ec_ecdt->common.status_addr = ecdt_ptr->ec_control; 1063 + ec_ecdt->common.data_addr = ecdt_ptr->ec_data; 1064 + ec_ecdt->common.gpe_bit = ecdt_ptr->gpe_bit; 1065 + spin_lock_init(&ec_ecdt->polling.lock); 1066 + /* use the GL just to be safe */ 1067 + ec_ecdt->common.global_lock = TRUE; 1068 + ec_ecdt->common.uid = ecdt_ptr->uid; 1069 + 1070 + status = acpi_get_handle(NULL, ecdt_ptr->ec_id, &ec_ecdt->common.handle); 1071 + if (ACPI_FAILURE(status)) { 1072 + goto error; 1073 + } 1074 + 1075 + return 0; 1076 + error: 1077 + printk(KERN_ERR PREFIX "Could not use ECDT\n"); 1078 + kfree(ec_ecdt); 1079 + ec_ecdt = NULL; 1080 + 1081 + return -ENODEV; 1082 + } 1083 + 1084 + 1085 + static int __init 1086 + acpi_ec_burst_get_real_ecdt(void) 1087 + { 1524 1088 acpi_status status; 1525 1089 struct acpi_table_ecdt *ecdt_ptr; 1526 1090 ··· 1588 1044 /* 1589 1045 * Generate a temporary ec context to use until the namespace is scanned 1590 1046 */ 1591 - ec_ecdt = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); 1047 + ec_ecdt = kmalloc(sizeof(union acpi_ec), GFP_KERNEL); 1592 1048 if (!ec_ecdt) 1593 1049 return -ENOMEM; 1594 - memset(ec_ecdt, 0, sizeof(struct acpi_ec)); 1050 + memset(ec_ecdt, 0, sizeof(union acpi_ec)); 1595 1051 1596 - init_MUTEX(&ec_ecdt->sem); 1597 - init_waitqueue_head(&ec_ecdt->wait); 1598 - ec_ecdt->command_addr = ecdt_ptr->ec_control; 1599 - ec_ecdt->status_addr = ecdt_ptr->ec_control; 1600 - ec_ecdt->data_addr = ecdt_ptr->ec_data; 1601 - ec_ecdt->gpe_bit = ecdt_ptr->gpe_bit; 1052 + init_MUTEX(&ec_ecdt->burst.sem); 1053 + init_waitqueue_head(&ec_ecdt->burst.wait); 1054 + ec_ecdt->common.command_addr = ecdt_ptr->ec_control; 1055 + ec_ecdt->common.status_addr = ecdt_ptr->ec_control; 1056 + ec_ecdt->common.data_addr = ecdt_ptr->ec_data; 1057 + ec_ecdt->common.gpe_bit = ecdt_ptr->gpe_bit; 1602 1058 /* use the GL just to be safe */ 1603 - ec_ecdt->global_lock = TRUE; 1604 - ec_ecdt->uid = ecdt_ptr->uid; 1059 + ec_ecdt->common.global_lock = TRUE; 1060 + ec_ecdt->common.uid = ecdt_ptr->uid; 1605 1061 1606 - status = acpi_get_handle(NULL, ecdt_ptr->ec_id, &ec_ecdt->handle); 1062 + status = acpi_get_handle(NULL, ecdt_ptr->ec_id, &ec_ecdt->common.handle); 1607 1063 if (ACPI_FAILURE(status)) { 1608 1064 goto error; 1609 1065 } ··· 1636 1092 /* 1637 1093 * Install GPE handler 1638 1094 */ 1639 - status = acpi_install_gpe_handler(NULL, ec_ecdt->gpe_bit, 1095 + status = acpi_install_gpe_handler(NULL, ec_ecdt->common.gpe_bit, 1640 1096 ACPI_GPE_EDGE_TRIGGERED, &acpi_ec_gpe_handler, 1641 1097 ec_ecdt); 1642 1098 if (ACPI_FAILURE(status)) { 1643 1099 goto error; 1644 1100 } 1645 - acpi_set_gpe_type (NULL, ec_ecdt->gpe_bit, ACPI_GPE_TYPE_RUNTIME); 1646 - acpi_enable_gpe (NULL, ec_ecdt->gpe_bit, ACPI_NOT_ISR); 1101 + acpi_set_gpe_type (NULL, ec_ecdt->common.gpe_bit, ACPI_GPE_TYPE_RUNTIME); 1102 + acpi_enable_gpe (NULL, ec_ecdt->common.gpe_bit, ACPI_NOT_ISR); 1647 1103 1648 1104 status = acpi_install_address_space_handler (ACPI_ROOT_OBJECT, 1649 1105 ACPI_ADR_SPACE_EC, &acpi_ec_space_handler, 1650 1106 &acpi_ec_space_setup, ec_ecdt); 1651 1107 if (ACPI_FAILURE(status)) { 1652 - acpi_remove_gpe_handler(NULL, ec_ecdt->gpe_bit, 1108 + acpi_remove_gpe_handler(NULL, ec_ecdt->common.gpe_bit, 1653 1109 &acpi_ec_gpe_handler); 1654 1110 goto error; 1655 1111 } ··· 1667 1123 1668 1124 static int __init acpi_ec_init (void) 1669 1125 { 1670 - int result; 1126 + int result = 0; 1671 1127 1672 1128 ACPI_FUNCTION_TRACE("acpi_ec_init"); 1673 1129 ··· 1711 1167 return 0; 1712 1168 } 1713 1169 __setup("acpi_fake_ecdt", acpi_fake_ecdt_setup); 1170 + static int __init acpi_ec_set_polling_mode(char *str) 1171 + { 1172 + acpi_ec_polling_mode = EC_POLLING; 1173 + acpi_ec_driver.ops.add = acpi_ec_polling_add; 1174 + return 0; 1175 + } 1176 + __setup("ec_polling", acpi_ec_set_polling_mode);