ACPI: ec: fix race in status register access

Delay the read of the EC status register until
after the event that caused it occurs -- otherwise
it is possible to read and act on stale status that was
associated with the previous event.

Do this with a perpetually incrementing "event_count" to detect
when a new event occurs and it is safe to read status.

There is no workaround for polling mode -- it is inherently
exposed to reading and acting on stale status, since it
doesn't have an interrupt to tell it the event completed.

http://bugzilla.kernel.org/show_bug.cgi?id=8110

Signed-off-by: Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>

authored by Alexey Starikovskiy and committed by Len Brown 9e197219 08e15e81

+23 -17
+23 -17
drivers/acpi/ec.c
··· 100 100 unsigned long global_lock; 101 101 struct mutex lock; 102 102 atomic_t query_pending; 103 + atomic_t event_count; 103 104 atomic_t leaving_burst; /* 0 : No, 1 : Yes, 2: abort */ 104 105 wait_queue_head_t wait; 105 106 } *ec_ecdt; ··· 132 131 outb(data, ec->data_addr); 133 132 } 134 133 135 - static inline int acpi_ec_check_status(struct acpi_ec *ec, enum ec_event event) 134 + static inline int acpi_ec_check_status(struct acpi_ec *ec, enum ec_event event, 135 + unsigned old_count) 136 136 { 137 137 u8 status = acpi_ec_read_status(ec); 138 - 138 + if (old_count == atomic_read(&ec->event_count)) 139 + return 0; 139 140 if (event == ACPI_EC_EVENT_OBF_1) { 140 141 if (status & ACPI_EC_FLAG_OBF) 141 142 return 1; ··· 149 146 return 0; 150 147 } 151 148 152 - static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event) 149 + static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, unsigned count) 153 150 { 154 151 if (acpi_ec_mode == EC_POLL) { 155 152 unsigned long delay = jiffies + msecs_to_jiffies(ACPI_EC_DELAY); 156 153 while (time_before(jiffies, delay)) { 157 - if (acpi_ec_check_status(ec, event)) 154 + if (acpi_ec_check_status(ec, event, 0)) 158 155 return 0; 159 156 } 160 157 } else { 161 158 if (wait_event_timeout(ec->wait, 162 - acpi_ec_check_status(ec, event), 159 + acpi_ec_check_status(ec, event, count), 163 160 msecs_to_jiffies(ACPI_EC_DELAY)) || 164 - acpi_ec_check_status(ec, event)) { 161 + acpi_ec_check_status(ec, event, 0)) { 165 162 return 0; 166 163 } else { 167 164 printk(KERN_ERR PREFIX "acpi_ec_wait timeout," ··· 228 225 u8 * rdata, unsigned rdata_len) 229 226 { 230 227 int result = 0; 231 - 228 + unsigned count = atomic_read(&ec->event_count); 232 229 acpi_ec_write_cmd(ec, command); 233 230 234 231 for (; wdata_len > 0; --wdata_len) { 235 - result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0); 232 + result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count); 236 233 if (result) { 237 234 printk(KERN_ERR PREFIX 238 235 "write_cmd timeout, command = %d\n", command); 239 236 goto end; 240 237 } 238 + count = atomic_read(&ec->event_count); 241 239 acpi_ec_write_data(ec, *(wdata++)); 242 240 } 243 241 244 242 if (!rdata_len) { 245 - result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0); 243 + result = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, count); 246 244 if (result) { 247 245 printk(KERN_ERR PREFIX 248 246 "finish-write timeout, command = %d\n", command); ··· 254 250 } 255 251 256 252 for (; rdata_len > 0; --rdata_len) { 257 - result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1); 253 + result = acpi_ec_wait(ec, ACPI_EC_EVENT_OBF_1, count); 258 254 if (result) { 259 255 printk(KERN_ERR PREFIX "read timeout, command = %d\n", 260 256 command); 261 257 goto end; 262 258 } 263 - 259 + count = atomic_read(&ec->event_count); 264 260 *(rdata++) = acpi_ec_read_data(ec); 265 261 } 266 262 end: ··· 292 288 /* Make sure GPE is enabled before doing transaction */ 293 289 acpi_enable_gpe(NULL, ec->gpe, ACPI_NOT_ISR); 294 290 295 - status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0); 291 + status = acpi_ec_wait(ec, ACPI_EC_EVENT_IBF_0, 0); 296 292 if (status) { 297 293 printk(KERN_DEBUG PREFIX 298 294 "input buffer is not empty, aborting transaction\n"); ··· 373 369 EXPORT_SYMBOL(ec_write); 374 370 375 371 int ec_transaction(u8 command, 376 - const u8 * wdata, unsigned wdata_len, 377 - u8 * rdata, unsigned rdata_len) 372 + const u8 * wdata, unsigned wdata_len, 373 + u8 * rdata, unsigned rdata_len) 378 374 { 379 375 struct acpi_ec *ec; 380 376 ··· 439 435 acpi_status status = AE_OK; 440 436 u8 value; 441 437 struct acpi_ec *ec = (struct acpi_ec *)data; 442 - 438 + atomic_inc(&ec->event_count); 443 439 if (acpi_ec_mode == EC_INTR) { 444 440 wake_up(&ec->wait); 445 441 } ··· 637 633 ec->uid = -1; 638 634 mutex_init(&ec->lock); 639 635 atomic_set(&ec->query_pending, 0); 636 + atomic_set(&ec->event_count, 1); 640 637 if (acpi_ec_mode == EC_INTR) { 641 638 atomic_set(&ec->leaving_burst, 1); 642 639 init_waitqueue_head(&ec->wait); ··· 812 807 acpi_status status; 813 808 814 809 mutex_init(&ec_ecdt->lock); 810 + atomic_set(&ec_ecdt->event_count, 1); 815 811 if (acpi_ec_mode == EC_INTR) { 816 812 init_waitqueue_head(&ec_ecdt->wait); 817 813 } ··· 894 888 return -ENOMEM; 895 889 896 890 mutex_init(&ec_ecdt->lock); 891 + atomic_set(&ec_ecdt->event_count, 1); 897 892 if (acpi_ec_mode == EC_INTR) { 898 893 init_waitqueue_head(&ec_ecdt->wait); 899 894 } ··· 1023 1016 acpi_ec_mode = EC_POLL; 1024 1017 } 1025 1018 acpi_ec_driver.ops.add = acpi_ec_add; 1026 - printk(KERN_NOTICE PREFIX "%s mode.\n", 1027 - intr ? "interrupt" : "polling"); 1019 + printk(KERN_NOTICE PREFIX "%s mode.\n", intr ? "interrupt" : "polling"); 1028 1020 1029 1021 return 1; 1030 1022 }