at master 26 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * NVEC: NVIDIA compliant embedded controller interface 4 * 5 * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net> 6 * 7 * Authors: Pierre-Hugues Husson <phhusson@free.fr> 8 * Ilya Petrov <ilya.muromec@gmail.com> 9 * Marc Dietrich <marvin24@gmx.de> 10 * Julian Andres Klode <jak@jak-linux.org> 11 */ 12 13#include <linux/kernel.h> 14#include <linux/module.h> 15#include <linux/atomic.h> 16#include <linux/clk.h> 17#include <linux/completion.h> 18#include <linux/delay.h> 19#include <linux/err.h> 20#include <linux/gpio/consumer.h> 21#include <linux/interrupt.h> 22#include <linux/io.h> 23#include <linux/irq.h> 24#include <linux/of.h> 25#include <linux/list.h> 26#include <linux/mfd/core.h> 27#include <linux/mutex.h> 28#include <linux/notifier.h> 29#include <linux/slab.h> 30#include <linux/spinlock.h> 31#include <linux/workqueue.h> 32 33#include "nvec.h" 34 35#define I2C_CNFG 0x00 36#define I2C_CNFG_PACKET_MODE_EN BIT(10) 37#define I2C_CNFG_NEW_MASTER_SFM BIT(11) 38#define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12 39 40#define I2C_SL_CNFG 0x20 41#define I2C_SL_NEWSL BIT(2) 42#define I2C_SL_NACK BIT(1) 43#define I2C_SL_RESP BIT(0) 44#define I2C_SL_IRQ BIT(3) 45#define END_TRANS BIT(4) 46#define RCVD BIT(2) 47#define RNW BIT(1) 48 49#define I2C_SL_RCVD 0x24 50#define I2C_SL_STATUS 0x28 51#define I2C_SL_ADDR1 0x2c 52#define I2C_SL_ADDR2 0x30 53#define I2C_SL_DELAY_COUNT 0x3c 54 55/** 56 * enum nvec_msg_category - Message categories for nvec_msg_alloc() 57 * @NVEC_MSG_RX: The message is an incoming message (from EC) 58 * @NVEC_MSG_TX: The message is an outgoing message (to EC) 59 */ 60enum nvec_msg_category { 61 NVEC_MSG_RX, 62 NVEC_MSG_TX, 63}; 64 65enum nvec_sleep_subcmds { 66 GLOBAL_EVENTS, 67 AP_PWR_DOWN, 68 AP_SUSPEND, 69}; 70 71#define CNF_EVENT_REPORTING 0x01 72#define GET_FIRMWARE_VERSION 0x15 73#define LID_SWITCH BIT(1) 74#define PWR_BUTTON BIT(15) 75 76static struct nvec_chip *nvec_power_handle; 77 78static const struct mfd_cell nvec_devices[] = { 79 { 80 .name = "nvec-kbd", 81 }, 82 { 83 .name = "nvec-mouse", 84 }, 85 { 86 .name = "nvec-power", 87 .id = 0, 88 }, 89 { 90 .name = "nvec-power", 91 .id = 1, 92 }, 93 { 94 .name = "nvec-paz00", 95 }, 96}; 97 98/** 99 * nvec_register_notifier - Register a notifier with nvec 100 * @nvec: A &struct nvec_chip 101 * @nb: The notifier block to register 102 * @events: Unused 103 * 104 * Registers a notifier with @nvec. The notifier will be added to an atomic 105 * notifier chain that is called for all received messages except those that 106 * correspond to a request initiated by nvec_write_sync(). 107 */ 108int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb, 109 unsigned int events) 110{ 111 return atomic_notifier_chain_register(&nvec->notifier_list, nb); 112} 113EXPORT_SYMBOL_GPL(nvec_register_notifier); 114 115/** 116 * nvec_unregister_notifier - Unregister a notifier with nvec 117 * @nvec: A &struct nvec_chip 118 * @nb: The notifier block to unregister 119 * 120 * Unregisters a notifier with @nvec. The notifier will be removed from the 121 * atomic notifier chain. 122 */ 123int nvec_unregister_notifier(struct nvec_chip *nvec, struct notifier_block *nb) 124{ 125 return atomic_notifier_chain_unregister(&nvec->notifier_list, nb); 126} 127EXPORT_SYMBOL_GPL(nvec_unregister_notifier); 128 129/* 130 * nvec_status_notifier - The final notifier 131 * 132 * Prints a message about control events not handled in the notifier 133 * chain. 134 */ 135static int nvec_status_notifier(struct notifier_block *nb, 136 unsigned long event_type, void *data) 137{ 138 struct nvec_chip *nvec = container_of(nb, struct nvec_chip, 139 nvec_status_notifier); 140 unsigned char *msg = data; 141 142 if (event_type != NVEC_CNTL) 143 return NOTIFY_DONE; 144 145 dev_warn(nvec->dev, "unhandled msg type %ld\n", event_type); 146 print_hex_dump(KERN_WARNING, "payload: ", DUMP_PREFIX_NONE, 16, 1, 147 msg, msg[1] + 2, true); 148 149 return NOTIFY_OK; 150} 151 152/** 153 * nvec_msg_alloc: 154 * @nvec: A &struct nvec_chip 155 * @category: Pool category, see &enum nvec_msg_category 156 * 157 * Allocate a single &struct nvec_msg object from the message pool of 158 * @nvec. The result shall be passed to nvec_msg_free() if no longer 159 * used. 160 * 161 * Outgoing messages are placed in the upper 75% of the pool, keeping the 162 * lower 25% available for RX buffers only. The reason is to prevent a 163 * situation where all buffers are full and a message is thus endlessly 164 * retried because the response could never be processed. 165 */ 166static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec, 167 enum nvec_msg_category category) 168{ 169 int i = (category == NVEC_MSG_TX) ? (NVEC_POOL_SIZE / 4) : 0; 170 171 for (; i < NVEC_POOL_SIZE; i++) { 172 if (atomic_xchg(&nvec->msg_pool[i].used, 1) == 0) { 173 dev_vdbg(nvec->dev, "INFO: Allocate %i\n", i); 174 return &nvec->msg_pool[i]; 175 } 176 } 177 178 dev_err(nvec->dev, "Could not allocate %s buffer\n", 179 (category == NVEC_MSG_TX) ? "TX" : "RX"); 180 181 return NULL; 182} 183 184/** 185 * nvec_msg_free: 186 * @nvec: A &struct nvec_chip 187 * @msg: A message (must be allocated by nvec_msg_alloc() and belong to @nvec) 188 * 189 * Free the given message 190 */ 191void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg) 192{ 193 if (msg != &nvec->tx_scratch) 194 dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool); 195 atomic_set(&msg->used, 0); 196} 197EXPORT_SYMBOL_GPL(nvec_msg_free); 198 199/** 200 * nvec_msg_is_event - Return %true if @msg is an event 201 * @msg: A message 202 */ 203static bool nvec_msg_is_event(struct nvec_msg *msg) 204{ 205 return msg->data[0] >> 7; 206} 207 208/** 209 * nvec_msg_size - Get the size of a message 210 * @msg: The message to get the size for 211 * 212 * This only works for received messages, not for outgoing messages. 213 */ 214static size_t nvec_msg_size(struct nvec_msg *msg) 215{ 216 bool is_event = nvec_msg_is_event(msg); 217 int event_length = (msg->data[0] & 0x60) >> 5; 218 219 /* for variable size, payload size in byte 1 + count (1) + cmd (1) */ 220 if (!is_event || event_length == NVEC_VAR_SIZE) 221 return (msg->pos || msg->size) ? (msg->data[1] + 2) : 0; 222 else if (event_length == NVEC_2BYTES) 223 return 2; 224 else if (event_length == NVEC_3BYTES) 225 return 3; 226 return 0; 227} 228 229/** 230 * nvec_gpio_set_value - Set the GPIO value 231 * @nvec: A &struct nvec_chip 232 * @value: The value to write (0 or 1) 233 * 234 * Like gpio_set_value(), but generating debugging information 235 */ 236static void nvec_gpio_set_value(struct nvec_chip *nvec, int value) 237{ 238 dev_dbg(nvec->dev, "GPIO changed from %u to %u\n", 239 gpiod_get_value(nvec->gpiod), value); 240 gpiod_set_value(nvec->gpiod, value); 241} 242 243/** 244 * nvec_write_async - Asynchronously write a message to NVEC 245 * @nvec: An nvec_chip instance 246 * @data: The message data, starting with the request type 247 * @size: The size of @data 248 * 249 * Queue a single message to be transferred to the embedded controller 250 * and return immediately. 251 * 252 * Returns: 0 on success, a negative error code on failure. If a failure 253 * occurred, the nvec driver may print an error. 254 */ 255int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data, 256 short size) 257{ 258 struct nvec_msg *msg; 259 unsigned long flags; 260 261 msg = nvec_msg_alloc(nvec, NVEC_MSG_TX); 262 263 if (!msg) 264 return -ENOMEM; 265 266 msg->data[0] = size; 267 memcpy(msg->data + 1, data, size); 268 msg->size = size + 1; 269 270 spin_lock_irqsave(&nvec->tx_lock, flags); 271 list_add_tail(&msg->node, &nvec->tx_data); 272 spin_unlock_irqrestore(&nvec->tx_lock, flags); 273 274 schedule_work(&nvec->tx_work); 275 276 return 0; 277} 278EXPORT_SYMBOL(nvec_write_async); 279 280/** 281 * nvec_write_sync - Write a message to nvec and read the response 282 * @nvec: An &struct nvec_chip 283 * @data: The data to write 284 * @size: The size of @data 285 * @msg: The response message received 286 * 287 * This is similar to nvec_write_async(), but waits for the 288 * request to be answered before returning. This function 289 * uses a mutex and can thus not be called from e.g. 290 * interrupt handlers. 291 * 292 * Returns: 0 on success, a negative error code on failure. 293 * The response message is returned in @msg. Shall be freed 294 * with nvec_msg_free() once no longer used. 295 * 296 */ 297int nvec_write_sync(struct nvec_chip *nvec, 298 const unsigned char *data, short size, 299 struct nvec_msg **msg) 300{ 301 mutex_lock(&nvec->sync_write_mutex); 302 303 if (msg) 304 *msg = NULL; 305 306 nvec->sync_write_pending = (data[1] << 8) + data[0]; 307 308 if (nvec_write_async(nvec, data, size) < 0) { 309 mutex_unlock(&nvec->sync_write_mutex); 310 return -ENOMEM; 311 } 312 313 dev_dbg(nvec->dev, "nvec_sync_write: 0x%04x\n", 314 nvec->sync_write_pending); 315 if (!(wait_for_completion_timeout(&nvec->sync_write, 316 msecs_to_jiffies(2000)))) { 317 dev_warn(nvec->dev, 318 "Timeout waiting for sync write to complete\n"); 319 mutex_unlock(&nvec->sync_write_mutex); 320 return -ETIMEDOUT; 321 } 322 323 dev_dbg(nvec->dev, "nvec_sync_write: pong!\n"); 324 325 if (msg) 326 *msg = nvec->last_sync_msg; 327 else 328 nvec_msg_free(nvec, nvec->last_sync_msg); 329 330 mutex_unlock(&nvec->sync_write_mutex); 331 332 return 0; 333} 334EXPORT_SYMBOL(nvec_write_sync); 335 336/** 337 * nvec_toggle_global_events - enables or disables global event reporting 338 * @nvec: nvec handle 339 * @state: true for enable, false for disable 340 * 341 * This switches on/off global event reports by the embedded controller. 342 */ 343static void nvec_toggle_global_events(struct nvec_chip *nvec, bool state) 344{ 345 unsigned char global_events[] = { NVEC_SLEEP, GLOBAL_EVENTS, state }; 346 347 nvec_write_async(nvec, global_events, 3); 348} 349 350/** 351 * nvec_event_mask - fill the command string with event bitfield 352 * @ev: points to event command string 353 * @mask: bit to insert into the event mask 354 * 355 * Configure event command expects a 32 bit bitfield which describes 356 * which events to enable. The bitfield has the following structure 357 * (from highest byte to lowest): 358 * system state bits 7-0 359 * system state bits 15-8 360 * oem system state bits 7-0 361 * oem system state bits 15-8 362 */ 363static void nvec_event_mask(char *ev, u32 mask) 364{ 365 ev[3] = mask >> 16 & 0xff; 366 ev[4] = mask >> 24 & 0xff; 367 ev[5] = mask >> 0 & 0xff; 368 ev[6] = mask >> 8 & 0xff; 369} 370 371/** 372 * nvec_request_master - Process outgoing messages 373 * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip) 374 * 375 * Processes all outgoing requests by sending the request and awaiting the 376 * response, then continuing with the next request. Once a request has a 377 * matching response, it will be freed and removed from the list. 378 */ 379static void nvec_request_master(struct work_struct *work) 380{ 381 struct nvec_chip *nvec = container_of(work, struct nvec_chip, tx_work); 382 unsigned long flags; 383 long err; 384 struct nvec_msg *msg; 385 386 spin_lock_irqsave(&nvec->tx_lock, flags); 387 while (!list_empty(&nvec->tx_data)) { 388 msg = list_first_entry(&nvec->tx_data, struct nvec_msg, node); 389 spin_unlock_irqrestore(&nvec->tx_lock, flags); 390 nvec_gpio_set_value(nvec, 0); 391 err = wait_for_completion_interruptible_timeout(&nvec->ec_transfer, 392 msecs_to_jiffies(5000)); 393 394 if (err == 0) { 395 dev_warn(nvec->dev, "Timeout waiting for ec transfer\n"); 396 nvec_gpio_set_value(nvec, 1); 397 msg->pos = 0; 398 } 399 400 spin_lock_irqsave(&nvec->tx_lock, flags); 401 402 if (err > 0) { 403 list_del_init(&msg->node); 404 nvec_msg_free(nvec, msg); 405 } 406 } 407 spin_unlock_irqrestore(&nvec->tx_lock, flags); 408} 409 410/** 411 * parse_msg - Print some information and call the notifiers on an RX message 412 * @nvec: A &struct nvec_chip 413 * @msg: A message received by @nvec 414 * 415 * Paarse some pieces of the message and then call the chain of notifiers 416 * registered via nvec_register_notifier. 417 */ 418static int parse_msg(struct nvec_chip *nvec, struct nvec_msg *msg) 419{ 420 if ((msg->data[0] & 1 << 7) == 0 && msg->data[3]) { 421 dev_err(nvec->dev, "ec responded %*ph\n", 4, msg->data); 422 return -EINVAL; 423 } 424 425 if ((msg->data[0] >> 7) == 1 && (msg->data[0] & 0x0f) == 5) 426 print_hex_dump(KERN_WARNING, "ec system event ", 427 DUMP_PREFIX_NONE, 16, 1, msg->data, 428 msg->data[1] + 2, true); 429 430 atomic_notifier_call_chain(&nvec->notifier_list, msg->data[0] & 0x8f, 431 msg->data); 432 433 return 0; 434} 435 436/** 437 * nvec_dispatch - Process messages received from the EC 438 * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip) 439 * 440 * Process messages previously received from the EC and put into the RX 441 * queue of the &struct nvec_chip instance associated with @work. 442 */ 443static void nvec_dispatch(struct work_struct *work) 444{ 445 struct nvec_chip *nvec = container_of(work, struct nvec_chip, rx_work); 446 unsigned long flags; 447 struct nvec_msg *msg; 448 449 spin_lock_irqsave(&nvec->rx_lock, flags); 450 while (!list_empty(&nvec->rx_data)) { 451 msg = list_first_entry(&nvec->rx_data, struct nvec_msg, node); 452 list_del_init(&msg->node); 453 spin_unlock_irqrestore(&nvec->rx_lock, flags); 454 455 if (nvec->sync_write_pending == 456 (msg->data[2] << 8) + msg->data[0]) { 457 dev_dbg(nvec->dev, "Sync write completed!\n"); 458 nvec->sync_write_pending = 0; 459 nvec->last_sync_msg = msg; 460 complete(&nvec->sync_write); 461 } else { 462 parse_msg(nvec, msg); 463 nvec_msg_free(nvec, msg); 464 } 465 spin_lock_irqsave(&nvec->rx_lock, flags); 466 } 467 spin_unlock_irqrestore(&nvec->rx_lock, flags); 468} 469 470/** 471 * nvec_tx_completed - Complete the current transfer 472 * @nvec: A &struct nvec_chip 473 * 474 * This is called when we have received an END_TRANS on a TX transfer. 475 */ 476static void nvec_tx_completed(struct nvec_chip *nvec) 477{ 478 /* We got an END_TRANS, let's skip this, maybe there's an event */ 479 if (nvec->tx->pos != nvec->tx->size) { 480 dev_err(nvec->dev, "Premature END_TRANS, resending\n"); 481 nvec->tx->pos = 0; 482 nvec_gpio_set_value(nvec, 0); 483 } else { 484 nvec->state = 0; 485 } 486} 487 488/** 489 * nvec_rx_completed - Complete the current transfer 490 * @nvec: A &struct nvec_chip 491 * 492 * This is called when we have received an END_TRANS on a RX transfer. 493 */ 494static void nvec_rx_completed(struct nvec_chip *nvec) 495{ 496 if (nvec->rx->pos != nvec_msg_size(nvec->rx)) { 497 dev_err(nvec->dev, "RX incomplete: Expected %u bytes, got %u\n", 498 (uint)nvec_msg_size(nvec->rx), 499 (uint)nvec->rx->pos); 500 501 nvec_msg_free(nvec, nvec->rx); 502 nvec->state = 0; 503 504 /* Battery quirk - Often incomplete, and likes to crash */ 505 if (nvec->rx->data[0] == NVEC_BAT) 506 complete(&nvec->ec_transfer); 507 508 return; 509 } 510 511 spin_lock(&nvec->rx_lock); 512 513 /* 514 * Add the received data to the work list and move the ring buffer 515 * pointer to the next entry. 516 */ 517 list_add_tail(&nvec->rx->node, &nvec->rx_data); 518 519 spin_unlock(&nvec->rx_lock); 520 521 nvec->state = 0; 522 523 if (!nvec_msg_is_event(nvec->rx)) 524 complete(&nvec->ec_transfer); 525 526 schedule_work(&nvec->rx_work); 527} 528 529/** 530 * nvec_invalid_flags - Send an error message about invalid flags and jump 531 * @nvec: The nvec device 532 * @status: The status flags 533 * @reset: Whether we shall jump to state 0. 534 */ 535static void nvec_invalid_flags(struct nvec_chip *nvec, unsigned int status, 536 bool reset) 537{ 538 dev_err(nvec->dev, "unexpected status flags 0x%02x during state %i\n", 539 status, nvec->state); 540 if (reset) 541 nvec->state = 0; 542} 543 544/** 545 * nvec_tx_set - Set the message to transfer (nvec->tx) 546 * @nvec: A &struct nvec_chip 547 * 548 * Gets the first entry from the tx_data list of @nvec and sets the 549 * tx member to it. If the tx_data list is empty, this uses the 550 * tx_scratch message to send a no operation message. 551 */ 552static void nvec_tx_set(struct nvec_chip *nvec) 553{ 554 spin_lock(&nvec->tx_lock); 555 if (list_empty(&nvec->tx_data)) { 556 dev_err(nvec->dev, "empty tx - sending no-op\n"); 557 memcpy(nvec->tx_scratch.data, "\x02\x07\x02", 3); 558 nvec->tx_scratch.size = 3; 559 nvec->tx_scratch.pos = 0; 560 nvec->tx = &nvec->tx_scratch; 561 list_add_tail(&nvec->tx->node, &nvec->tx_data); 562 } else { 563 nvec->tx = list_first_entry(&nvec->tx_data, struct nvec_msg, 564 node); 565 nvec->tx->pos = 0; 566 } 567 spin_unlock(&nvec->tx_lock); 568 569 dev_dbg(nvec->dev, "Sending message of length %u, command 0x%x\n", 570 (uint)nvec->tx->size, nvec->tx->data[1]); 571} 572 573/** 574 * tegra_i2c_writel - safely write to an I2C client controller register 575 * @val: value to be written 576 * @reg: register to write to 577 * 578 * A write to an I2C controller register needs to be read back to make sure 579 * that the value has arrived. 580 */ 581static void tegra_i2c_writel(u32 val, void *reg) 582{ 583 writel_relaxed(val, reg); 584 585 /* read back register to make sure that register writes completed */ 586 readl_relaxed(reg); 587} 588 589/** 590 * nvec_interrupt - Interrupt handler 591 * @irq: The IRQ 592 * @dev: The nvec device 593 * 594 * Interrupt handler that fills our RX buffers and empties our TX 595 * buffers. This uses a finite state machine with ridiculous amounts 596 * of error checking, in order to be fairly reliable. 597 */ 598static irqreturn_t nvec_interrupt(int irq, void *dev) 599{ 600 unsigned long status; 601 unsigned int received = 0; 602 unsigned char to_send = 0xff; 603 const unsigned long irq_mask = I2C_SL_IRQ | END_TRANS | RCVD | RNW; 604 struct nvec_chip *nvec = dev; 605 unsigned int state = nvec->state; 606 607 status = readl(nvec->base + I2C_SL_STATUS); 608 609 /* Filter out some errors */ 610 if ((status & irq_mask) == 0 && (status & ~irq_mask) != 0) { 611 dev_err(nvec->dev, "Unexpected irq mask %lx\n", status); 612 return IRQ_HANDLED; 613 } 614 if ((status & I2C_SL_IRQ) == 0) { 615 dev_err(nvec->dev, "Spurious IRQ\n"); 616 return IRQ_HANDLED; 617 } 618 619 /* The EC did not request a read, so it send us something, read it */ 620 if ((status & RNW) == 0) { 621 received = readl(nvec->base + I2C_SL_RCVD); 622 if (status & RCVD) 623 tegra_i2c_writel(0, nvec->base + I2C_SL_RCVD); 624 } 625 626 if (status == (I2C_SL_IRQ | RCVD)) 627 nvec->state = 0; 628 629 switch (nvec->state) { 630 case 0: /* Verify that its a transfer start, the rest later */ 631 if (status != (I2C_SL_IRQ | RCVD)) 632 nvec_invalid_flags(nvec, status, false); 633 break; 634 case 1: /* Command byte */ 635 if (status != I2C_SL_IRQ) { 636 nvec_invalid_flags(nvec, status, true); 637 } else { 638 nvec->rx = nvec_msg_alloc(nvec, NVEC_MSG_RX); 639 /* Should not happen in a normal world */ 640 if (unlikely(!nvec->rx)) { 641 nvec->state = 0; 642 break; 643 } 644 nvec->rx->data[0] = received; 645 nvec->rx->pos = 1; 646 nvec->state = 2; 647 } 648 break; 649 case 2: /* first byte after command */ 650 if (status == (I2C_SL_IRQ | RNW | RCVD)) { 651 udelay(33); 652 if (nvec->rx->data[0] != 0x01) { 653 dev_err(nvec->dev, 654 "Read without prior read command\n"); 655 nvec->state = 0; 656 break; 657 } 658 nvec_msg_free(nvec, nvec->rx); 659 nvec->state = 3; 660 nvec_tx_set(nvec); 661 to_send = nvec->tx->data[0]; 662 nvec->tx->pos = 1; 663 } else if (status == (I2C_SL_IRQ)) { 664 nvec->rx->data[1] = received; 665 nvec->rx->pos = 2; 666 nvec->state = 4; 667 } else { 668 nvec_invalid_flags(nvec, status, true); 669 } 670 break; 671 case 3: /* EC does a block read, we transmit data */ 672 if (status & END_TRANS) { 673 nvec_tx_completed(nvec); 674 } else if ((status & RNW) == 0 || (status & RCVD)) { 675 nvec_invalid_flags(nvec, status, true); 676 } else if (nvec->tx && nvec->tx->pos < nvec->tx->size) { 677 to_send = nvec->tx->data[nvec->tx->pos++]; 678 } else { 679 dev_err(nvec->dev, 680 "tx buffer underflow on %p (%u > %u)\n", 681 nvec->tx, 682 (uint)(nvec->tx ? nvec->tx->pos : 0), 683 (uint)(nvec->tx ? nvec->tx->size : 0)); 684 nvec->state = 0; 685 } 686 break; 687 case 4: /* EC does some write, we read the data */ 688 if ((status & (END_TRANS | RNW)) == END_TRANS) 689 nvec_rx_completed(nvec); 690 else if (status & (RNW | RCVD)) 691 nvec_invalid_flags(nvec, status, true); 692 else if (nvec->rx && nvec->rx->pos < NVEC_MSG_SIZE) 693 nvec->rx->data[nvec->rx->pos++] = received; 694 else 695 dev_err(nvec->dev, 696 "RX buffer overflow on %p: Trying to write byte %u of %u\n", 697 nvec->rx, nvec->rx ? nvec->rx->pos : 0, 698 NVEC_MSG_SIZE); 699 break; 700 default: 701 nvec->state = 0; 702 } 703 704 /* If we are told that a new transfer starts, verify it */ 705 if ((status & (RCVD | RNW)) == RCVD) { 706 if (received != nvec->i2c_addr) 707 dev_err(nvec->dev, 708 "received address 0x%02x, expected 0x%02x\n", 709 received, nvec->i2c_addr); 710 nvec->state = 1; 711 } 712 713 /* Send data if requested, but not on end of transmission */ 714 if ((status & (RNW | END_TRANS)) == RNW) 715 tegra_i2c_writel(to_send, nvec->base + I2C_SL_RCVD); 716 717 /* If we have send the first byte */ 718 if (status == (I2C_SL_IRQ | RNW | RCVD)) 719 nvec_gpio_set_value(nvec, 1); 720 721 dev_dbg(nvec->dev, 722 "Handled: %s 0x%02x, %s 0x%02x in state %u [%s%s%s]\n", 723 (status & RNW) == 0 ? "received" : "R=", 724 received, 725 (status & (RNW | END_TRANS)) ? "sent" : "S=", 726 to_send, 727 state, 728 status & END_TRANS ? " END_TRANS" : "", 729 status & RCVD ? " RCVD" : "", 730 status & RNW ? " RNW" : ""); 731 732 return IRQ_HANDLED; 733} 734 735static void tegra_init_i2c_slave(struct nvec_chip *nvec) 736{ 737 u32 val; 738 739 clk_prepare_enable(nvec->i2c_clk); 740 741 reset_control_assert(nvec->rst); 742 udelay(2); 743 reset_control_deassert(nvec->rst); 744 745 val = I2C_CNFG_NEW_MASTER_SFM | I2C_CNFG_PACKET_MODE_EN | 746 (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT); 747 tegra_i2c_writel(val, nvec->base + I2C_CNFG); 748 749 clk_set_rate(nvec->i2c_clk, 8 * 80000); 750 751 tegra_i2c_writel(I2C_SL_NEWSL, nvec->base + I2C_SL_CNFG); 752 tegra_i2c_writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT); 753 754 tegra_i2c_writel(nvec->i2c_addr >> 1, nvec->base + I2C_SL_ADDR1); 755 tegra_i2c_writel(0, nvec->base + I2C_SL_ADDR2); 756 757 enable_irq(nvec->irq); 758} 759 760#ifdef CONFIG_PM_SLEEP 761static void nvec_disable_i2c_slave(struct nvec_chip *nvec) 762{ 763 disable_irq(nvec->irq); 764 tegra_i2c_writel(I2C_SL_NEWSL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG); 765 clk_disable_unprepare(nvec->i2c_clk); 766} 767#endif 768 769static void nvec_power_off(void) 770{ 771 char ap_pwr_down[] = { NVEC_SLEEP, AP_PWR_DOWN }; 772 773 nvec_toggle_global_events(nvec_power_handle, false); 774 nvec_write_async(nvec_power_handle, ap_pwr_down, 2); 775} 776 777static int tegra_nvec_probe(struct platform_device *pdev) 778{ 779 int err, ret; 780 struct clk *i2c_clk; 781 struct device *dev = &pdev->dev; 782 struct nvec_chip *nvec; 783 struct nvec_msg *msg; 784 void __iomem *base; 785 char get_firmware_version[] = { NVEC_CNTL, GET_FIRMWARE_VERSION }, 786 unmute_speakers[] = { NVEC_OEM0, 0x10, 0x59, 0x95 }, 787 enable_event[7] = { NVEC_SYS, CNF_EVENT_REPORTING, true }; 788 789 if (!dev->of_node) { 790 dev_err(dev, "must be instantiated using device tree\n"); 791 return -ENODEV; 792 } 793 794 nvec = devm_kzalloc(dev, sizeof(struct nvec_chip), GFP_KERNEL); 795 if (!nvec) 796 return -ENOMEM; 797 798 platform_set_drvdata(pdev, nvec); 799 nvec->dev = dev; 800 801 if (of_property_read_u32(dev->of_node, "slave-addr", &nvec->i2c_addr)) { 802 dev_err(dev, "no i2c address specified"); 803 return -ENODEV; 804 } 805 806 base = devm_platform_ioremap_resource(pdev, 0); 807 if (IS_ERR(base)) 808 return PTR_ERR(base); 809 810 nvec->irq = platform_get_irq(pdev, 0); 811 if (nvec->irq < 0) 812 return -ENODEV; 813 814 i2c_clk = devm_clk_get(dev, "div-clk"); 815 if (IS_ERR(i2c_clk)) { 816 dev_err(dev, "failed to get controller clock\n"); 817 return -ENODEV; 818 } 819 820 nvec->rst = devm_reset_control_get_exclusive(dev, "i2c"); 821 if (IS_ERR(nvec->rst)) { 822 dev_err(dev, "failed to get controller reset\n"); 823 return PTR_ERR(nvec->rst); 824 } 825 826 nvec->base = base; 827 nvec->i2c_clk = i2c_clk; 828 nvec->rx = &nvec->msg_pool[0]; 829 830 ATOMIC_INIT_NOTIFIER_HEAD(&nvec->notifier_list); 831 832 init_completion(&nvec->sync_write); 833 init_completion(&nvec->ec_transfer); 834 mutex_init(&nvec->sync_write_mutex); 835 spin_lock_init(&nvec->tx_lock); 836 spin_lock_init(&nvec->rx_lock); 837 INIT_LIST_HEAD(&nvec->rx_data); 838 INIT_LIST_HEAD(&nvec->tx_data); 839 INIT_WORK(&nvec->rx_work, nvec_dispatch); 840 INIT_WORK(&nvec->tx_work, nvec_request_master); 841 842 nvec->gpiod = devm_gpiod_get(dev, "request", GPIOD_OUT_HIGH); 843 if (IS_ERR(nvec->gpiod)) { 844 dev_err(dev, "couldn't request gpio\n"); 845 return PTR_ERR(nvec->gpiod); 846 } 847 848 err = devm_request_irq(dev, nvec->irq, nvec_interrupt, IRQF_NO_AUTOEN, 849 "nvec", nvec); 850 if (err) { 851 dev_err(dev, "couldn't request irq\n"); 852 return -ENODEV; 853 } 854 855 tegra_init_i2c_slave(nvec); 856 857 /* enable event reporting */ 858 nvec_toggle_global_events(nvec, true); 859 860 nvec->nvec_status_notifier.notifier_call = nvec_status_notifier; 861 nvec_register_notifier(nvec, &nvec->nvec_status_notifier, 0); 862 863 nvec_power_handle = nvec; 864 pm_power_off = nvec_power_off; 865 866 /* Get Firmware Version */ 867 err = nvec_write_sync(nvec, get_firmware_version, 2, &msg); 868 869 if (!err) { 870 dev_warn(dev, 871 "ec firmware version %02x.%02x.%02x / %02x\n", 872 msg->data[4], msg->data[5], 873 msg->data[6], msg->data[7]); 874 875 nvec_msg_free(nvec, msg); 876 } 877 878 ret = mfd_add_devices(dev, 0, nvec_devices, 879 ARRAY_SIZE(nvec_devices), NULL, 0, NULL); 880 if (ret) 881 dev_err(dev, "error adding subdevices\n"); 882 883 /* unmute speakers? */ 884 nvec_write_async(nvec, unmute_speakers, 4); 885 886 /* enable lid switch event */ 887 nvec_event_mask(enable_event, LID_SWITCH); 888 nvec_write_async(nvec, enable_event, 7); 889 890 /* enable power button event */ 891 nvec_event_mask(enable_event, PWR_BUTTON); 892 nvec_write_async(nvec, enable_event, 7); 893 894 return 0; 895} 896 897static void tegra_nvec_remove(struct platform_device *pdev) 898{ 899 struct nvec_chip *nvec = platform_get_drvdata(pdev); 900 901 nvec_toggle_global_events(nvec, false); 902 mfd_remove_devices(nvec->dev); 903 nvec_unregister_notifier(nvec, &nvec->nvec_status_notifier); 904 cancel_work_sync(&nvec->rx_work); 905 cancel_work_sync(&nvec->tx_work); 906 /* FIXME: needs check whether nvec is responsible for power off */ 907 pm_power_off = NULL; 908} 909 910#ifdef CONFIG_PM_SLEEP 911static int nvec_suspend(struct device *dev) 912{ 913 int err; 914 struct nvec_chip *nvec = dev_get_drvdata(dev); 915 struct nvec_msg *msg; 916 char ap_suspend[] = { NVEC_SLEEP, AP_SUSPEND }; 917 918 dev_dbg(nvec->dev, "suspending\n"); 919 920 /* keep these sync or you'll break suspend */ 921 nvec_toggle_global_events(nvec, false); 922 923 err = nvec_write_sync(nvec, ap_suspend, sizeof(ap_suspend), &msg); 924 if (!err) 925 nvec_msg_free(nvec, msg); 926 927 nvec_disable_i2c_slave(nvec); 928 929 return 0; 930} 931 932static int nvec_resume(struct device *dev) 933{ 934 struct nvec_chip *nvec = dev_get_drvdata(dev); 935 936 dev_dbg(nvec->dev, "resuming\n"); 937 tegra_init_i2c_slave(nvec); 938 nvec_toggle_global_events(nvec, true); 939 940 return 0; 941} 942#endif 943 944static SIMPLE_DEV_PM_OPS(nvec_pm_ops, nvec_suspend, nvec_resume); 945 946/* Match table for of_platform binding */ 947static const struct of_device_id nvidia_nvec_of_match[] = { 948 { .compatible = "nvidia,nvec", }, 949 {}, 950}; 951MODULE_DEVICE_TABLE(of, nvidia_nvec_of_match); 952 953static struct platform_driver nvec_device_driver = { 954 .probe = tegra_nvec_probe, 955 .remove = tegra_nvec_remove, 956 .driver = { 957 .name = "nvec", 958 .pm = &nvec_pm_ops, 959 .of_match_table = nvidia_nvec_of_match, 960 } 961}; 962 963module_platform_driver(nvec_device_driver); 964 965MODULE_ALIAS("platform:nvec"); 966MODULE_DESCRIPTION("NVIDIA compliant embedded controller interface"); 967MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>"); 968MODULE_LICENSE("GPL");