Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * NVEC: NVIDIA compliant embedded controller interface
3 *
4 * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net>
5 *
6 * Authors: Pierre-Hugues Husson <phhusson@free.fr>
7 * Ilya Petrov <ilya.muromec@gmail.com>
8 * Marc Dietrich <marvin24@gmx.de>
9 * Julian Andres Klode <jak@jak-linux.org>
10 *
11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file "COPYING" in the main directory of this archive
13 * for more details.
14 *
15 */
16
17/* #define DEBUG */
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/atomic.h>
22#include <linux/clk.h>
23#include <linux/completion.h>
24#include <linux/delay.h>
25#include <linux/err.h>
26#include <linux/gpio.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
29#include <linux/irq.h>
30#include <linux/of.h>
31#include <linux/of_gpio.h>
32#include <linux/list.h>
33#include <linux/mfd/core.h>
34#include <linux/mutex.h>
35#include <linux/notifier.h>
36#include <linux/slab.h>
37#include <linux/spinlock.h>
38#include <linux/workqueue.h>
39#include <linux/clk/tegra.h>
40
41#include "nvec.h"
42
43#define I2C_CNFG 0x00
44#define I2C_CNFG_PACKET_MODE_EN (1<<10)
45#define I2C_CNFG_NEW_MASTER_SFM (1<<11)
46#define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12
47
48#define I2C_SL_CNFG 0x20
49#define I2C_SL_NEWSL (1<<2)
50#define I2C_SL_NACK (1<<1)
51#define I2C_SL_RESP (1<<0)
52#define I2C_SL_IRQ (1<<3)
53#define END_TRANS (1<<4)
54#define RCVD (1<<2)
55#define RNW (1<<1)
56
57#define I2C_SL_RCVD 0x24
58#define I2C_SL_STATUS 0x28
59#define I2C_SL_ADDR1 0x2c
60#define I2C_SL_ADDR2 0x30
61#define I2C_SL_DELAY_COUNT 0x3c
62
63/**
64 * enum nvec_msg_category - Message categories for nvec_msg_alloc()
65 * @NVEC_MSG_RX: The message is an incoming message (from EC)
66 * @NVEC_MSG_TX: The message is an outgoing message (to EC)
67 */
68enum nvec_msg_category {
69 NVEC_MSG_RX,
70 NVEC_MSG_TX,
71};
72
73enum nvec_sleep_subcmds {
74 GLOBAL_EVENTS,
75 AP_PWR_DOWN,
76 AP_SUSPEND,
77};
78
79#define CNF_EVENT_REPORTING 0x01
80#define GET_FIRMWARE_VERSION 0x15
81#define LID_SWITCH BIT(1)
82#define PWR_BUTTON BIT(15)
83
84static struct nvec_chip *nvec_power_handle;
85
86static struct mfd_cell nvec_devices[] = {
87 {
88 .name = "nvec-kbd",
89 .id = 1,
90 },
91 {
92 .name = "nvec-mouse",
93 .id = 1,
94 },
95 {
96 .name = "nvec-power",
97 .id = 1,
98 },
99 {
100 .name = "nvec-power",
101 .id = 2,
102 },
103 {
104 .name = "nvec-paz00",
105 .id = 1,
106 },
107};
108
109/**
110 * nvec_register_notifier - Register a notifier with nvec
111 * @nvec: A &struct nvec_chip
112 * @nb: The notifier block to register
113 *
114 * Registers a notifier with @nvec. The notifier will be added to an atomic
115 * notifier chain that is called for all received messages except those that
116 * correspond to a request initiated by nvec_write_sync().
117 */
118int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb,
119 unsigned int events)
120{
121 return atomic_notifier_chain_register(&nvec->notifier_list, nb);
122}
123EXPORT_SYMBOL_GPL(nvec_register_notifier);
124
125/**
126 * nvec_unregister_notifier - Unregister a notifier with nvec
127 * @nvec: A &struct nvec_chip
128 * @nb: The notifier block to unregister
129 *
130 * Unregisters a notifier with @nvec. The notifier will be removed from the
131 * atomic notifier chain.
132 */
133int nvec_unregister_notifier(struct nvec_chip *nvec, struct notifier_block *nb)
134{
135 return atomic_notifier_chain_unregister(&nvec->notifier_list, nb);
136}
137EXPORT_SYMBOL_GPL(nvec_unregister_notifier);
138
139/**
140 * nvec_status_notifier - The final notifier
141 *
142 * Prints a message about control events not handled in the notifier
143 * chain.
144 */
145static int nvec_status_notifier(struct notifier_block *nb,
146 unsigned long event_type, void *data)
147{
148 struct nvec_chip *nvec = container_of(nb, struct nvec_chip,
149 nvec_status_notifier);
150 unsigned char *msg = (unsigned char *)data;
151
152 if (event_type != NVEC_CNTL)
153 return NOTIFY_DONE;
154
155 dev_warn(nvec->dev, "unhandled msg type %ld\n", event_type);
156 print_hex_dump(KERN_WARNING, "payload: ", DUMP_PREFIX_NONE, 16, 1,
157 msg, msg[1] + 2, true);
158
159 return NOTIFY_OK;
160}
161
162/**
163 * nvec_msg_alloc:
164 * @nvec: A &struct nvec_chip
165 * @category: Pool category, see &enum nvec_msg_category
166 *
167 * Allocate a single &struct nvec_msg object from the message pool of
168 * @nvec. The result shall be passed to nvec_msg_free() if no longer
169 * used.
170 *
171 * Outgoing messages are placed in the upper 75% of the pool, keeping the
172 * lower 25% available for RX buffers only. The reason is to prevent a
173 * situation where all buffers are full and a message is thus endlessly
174 * retried because the response could never be processed.
175 */
176static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec,
177 enum nvec_msg_category category)
178{
179 int i = (category == NVEC_MSG_TX) ? (NVEC_POOL_SIZE / 4) : 0;
180
181 for (; i < NVEC_POOL_SIZE; i++) {
182 if (atomic_xchg(&nvec->msg_pool[i].used, 1) == 0) {
183 dev_vdbg(nvec->dev, "INFO: Allocate %i\n", i);
184 return &nvec->msg_pool[i];
185 }
186 }
187
188 dev_err(nvec->dev, "could not allocate %s buffer\n",
189 (category == NVEC_MSG_TX) ? "TX" : "RX");
190
191 return NULL;
192}
193
194/**
195 * nvec_msg_free:
196 * @nvec: A &struct nvec_chip
197 * @msg: A message (must be allocated by nvec_msg_alloc() and belong to @nvec)
198 *
199 * Free the given message
200 */
201void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg)
202{
203 if (msg != &nvec->tx_scratch)
204 dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool);
205 atomic_set(&msg->used, 0);
206}
207EXPORT_SYMBOL_GPL(nvec_msg_free);
208
209/**
210 * nvec_msg_is_event - Return %true if @msg is an event
211 * @msg: A message
212 */
213static bool nvec_msg_is_event(struct nvec_msg *msg)
214{
215 return msg->data[0] >> 7;
216}
217
218/**
219 * nvec_msg_size - Get the size of a message
220 * @msg: The message to get the size for
221 *
222 * This only works for received messages, not for outgoing messages.
223 */
224static size_t nvec_msg_size(struct nvec_msg *msg)
225{
226 bool is_event = nvec_msg_is_event(msg);
227 int event_length = (msg->data[0] & 0x60) >> 5;
228
229 /* for variable size, payload size in byte 1 + count (1) + cmd (1) */
230 if (!is_event || event_length == NVEC_VAR_SIZE)
231 return (msg->pos || msg->size) ? (msg->data[1] + 2) : 0;
232 else if (event_length == NVEC_2BYTES)
233 return 2;
234 else if (event_length == NVEC_3BYTES)
235 return 3;
236 else
237 return 0;
238}
239
240/**
241 * nvec_gpio_set_value - Set the GPIO value
242 * @nvec: A &struct nvec_chip
243 * @value: The value to write (0 or 1)
244 *
245 * Like gpio_set_value(), but generating debugging information
246 */
247static void nvec_gpio_set_value(struct nvec_chip *nvec, int value)
248{
249 dev_dbg(nvec->dev, "GPIO changed from %u to %u\n",
250 gpio_get_value(nvec->gpio), value);
251 gpio_set_value(nvec->gpio, value);
252}
253
254/**
255 * nvec_write_async - Asynchronously write a message to NVEC
256 * @nvec: An nvec_chip instance
257 * @data: The message data, starting with the request type
258 * @size: The size of @data
259 *
260 * Queue a single message to be transferred to the embedded controller
261 * and return immediately.
262 *
263 * Returns: 0 on success, a negative error code on failure. If a failure
264 * occured, the nvec driver may print an error.
265 */
266int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
267 short size)
268{
269 struct nvec_msg *msg;
270 unsigned long flags;
271
272 msg = nvec_msg_alloc(nvec, NVEC_MSG_TX);
273
274 if (msg == NULL)
275 return -ENOMEM;
276
277 msg->data[0] = size;
278 memcpy(msg->data + 1, data, size);
279 msg->size = size + 1;
280
281 spin_lock_irqsave(&nvec->tx_lock, flags);
282 list_add_tail(&msg->node, &nvec->tx_data);
283 spin_unlock_irqrestore(&nvec->tx_lock, flags);
284
285 schedule_work(&nvec->tx_work);
286
287 return 0;
288}
289EXPORT_SYMBOL(nvec_write_async);
290
291/**
292 * nvec_write_sync - Write a message to nvec and read the response
293 * @nvec: An &struct nvec_chip
294 * @data: The data to write
295 * @size: The size of @data
296 *
297 * This is similar to nvec_write_async(), but waits for the
298 * request to be answered before returning. This function
299 * uses a mutex and can thus not be called from e.g.
300 * interrupt handlers.
301 *
302 * Returns: A pointer to the response message on success,
303 * %NULL on failure. Free with nvec_msg_free() once no longer
304 * used.
305 */
306struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec,
307 const unsigned char *data, short size)
308{
309 struct nvec_msg *msg;
310
311 mutex_lock(&nvec->sync_write_mutex);
312
313 nvec->sync_write_pending = (data[1] << 8) + data[0];
314
315 if (nvec_write_async(nvec, data, size) < 0) {
316 mutex_unlock(&nvec->sync_write_mutex);
317 return NULL;
318 }
319
320 dev_dbg(nvec->dev, "nvec_sync_write: 0x%04x\n",
321 nvec->sync_write_pending);
322 if (!(wait_for_completion_timeout(&nvec->sync_write,
323 msecs_to_jiffies(2000)))) {
324 dev_warn(nvec->dev, "timeout waiting for sync write to complete\n");
325 mutex_unlock(&nvec->sync_write_mutex);
326 return NULL;
327 }
328
329 dev_dbg(nvec->dev, "nvec_sync_write: pong!\n");
330
331 msg = nvec->last_sync_msg;
332
333 mutex_unlock(&nvec->sync_write_mutex);
334
335 return msg;
336}
337EXPORT_SYMBOL(nvec_write_sync);
338
339/**
340 * nvec_toggle_global_events - enables or disables global event reporting
341 * @nvec: nvec handle
342 * @state: true for enable, false for disable
343 *
344 * This switches on/off global event reports by the embedded controller.
345 */
346static void nvec_toggle_global_events(struct nvec_chip *nvec, bool state)
347{
348 unsigned char global_events[] = { NVEC_SLEEP, GLOBAL_EVENTS, state };
349
350 nvec_write_async(nvec, global_events, 3);
351}
352
353/**
354 * nvec_event_mask - fill the command string with event bitfield
355 * ev: points to event command string
356 * mask: bit to insert into the event mask
357 *
358 * Configure event command expects a 32 bit bitfield which describes
359 * which events to enable. The bitfield has the following structure
360 * (from highest byte to lowest):
361 * system state bits 7-0
362 * system state bits 15-8
363 * oem system state bits 7-0
364 * oem system state bits 15-8
365 */
366static void nvec_event_mask(char *ev, u32 mask)
367{
368 ev[3] = mask >> 16 & 0xff;
369 ev[4] = mask >> 24 & 0xff;
370 ev[5] = mask >> 0 & 0xff;
371 ev[6] = mask >> 8 & 0xff;
372}
373
374/**
375 * nvec_request_master - Process outgoing messages
376 * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
377 *
378 * Processes all outgoing requests by sending the request and awaiting the
379 * response, then continuing with the next request. Once a request has a
380 * matching response, it will be freed and removed from the list.
381 */
382static void nvec_request_master(struct work_struct *work)
383{
384 struct nvec_chip *nvec = container_of(work, struct nvec_chip, tx_work);
385 unsigned long flags;
386 long err;
387 struct nvec_msg *msg;
388
389 spin_lock_irqsave(&nvec->tx_lock, flags);
390 while (!list_empty(&nvec->tx_data)) {
391 msg = list_first_entry(&nvec->tx_data, struct nvec_msg, node);
392 spin_unlock_irqrestore(&nvec->tx_lock, flags);
393 nvec_gpio_set_value(nvec, 0);
394 err = wait_for_completion_interruptible_timeout(
395 &nvec->ec_transfer, msecs_to_jiffies(5000));
396
397 if (err == 0) {
398 dev_warn(nvec->dev, "timeout waiting for ec transfer\n");
399 nvec_gpio_set_value(nvec, 1);
400 msg->pos = 0;
401 }
402
403 spin_lock_irqsave(&nvec->tx_lock, flags);
404
405 if (err > 0) {
406 list_del_init(&msg->node);
407 nvec_msg_free(nvec, msg);
408 }
409 }
410 spin_unlock_irqrestore(&nvec->tx_lock, flags);
411}
412
413/**
414 * parse_msg - Print some information and call the notifiers on an RX message
415 * @nvec: A &struct nvec_chip
416 * @msg: A message received by @nvec
417 *
418 * Paarse some pieces of the message and then call the chain of notifiers
419 * registered via nvec_register_notifier.
420 */
421static int parse_msg(struct nvec_chip *nvec, struct nvec_msg *msg)
422{
423 if ((msg->data[0] & 1 << 7) == 0 && msg->data[3]) {
424 dev_err(nvec->dev, "ec responded %*ph\n", 4, msg->data);
425 return -EINVAL;
426 }
427
428 if ((msg->data[0] >> 7) == 1 && (msg->data[0] & 0x0f) == 5)
429 print_hex_dump(KERN_WARNING, "ec system event ",
430 DUMP_PREFIX_NONE, 16, 1, msg->data,
431 msg->data[1] + 2, true);
432
433 atomic_notifier_call_chain(&nvec->notifier_list, msg->data[0] & 0x8f,
434 msg->data);
435
436 return 0;
437}
438
439/**
440 * nvec_dispatch - Process messages received from the EC
441 * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
442 *
443 * Process messages previously received from the EC and put into the RX
444 * queue of the &struct nvec_chip instance associated with @work.
445 */
446static void nvec_dispatch(struct work_struct *work)
447{
448 struct nvec_chip *nvec = container_of(work, struct nvec_chip, rx_work);
449 unsigned long flags;
450 struct nvec_msg *msg;
451
452 spin_lock_irqsave(&nvec->rx_lock, flags);
453 while (!list_empty(&nvec->rx_data)) {
454 msg = list_first_entry(&nvec->rx_data, struct nvec_msg, node);
455 list_del_init(&msg->node);
456 spin_unlock_irqrestore(&nvec->rx_lock, flags);
457
458 if (nvec->sync_write_pending ==
459 (msg->data[2] << 8) + msg->data[0]) {
460 dev_dbg(nvec->dev, "sync write completed!\n");
461 nvec->sync_write_pending = 0;
462 nvec->last_sync_msg = msg;
463 complete(&nvec->sync_write);
464 } else {
465 parse_msg(nvec, msg);
466 nvec_msg_free(nvec, msg);
467 }
468 spin_lock_irqsave(&nvec->rx_lock, flags);
469 }
470 spin_unlock_irqrestore(&nvec->rx_lock, flags);
471}
472
473/**
474 * nvec_tx_completed - Complete the current transfer
475 * @nvec: A &struct nvec_chip
476 *
477 * This is called when we have received an END_TRANS on a TX transfer.
478 */
479static void nvec_tx_completed(struct nvec_chip *nvec)
480{
481 /* We got an END_TRANS, let's skip this, maybe there's an event */
482 if (nvec->tx->pos != nvec->tx->size) {
483 dev_err(nvec->dev, "premature END_TRANS, resending\n");
484 nvec->tx->pos = 0;
485 nvec_gpio_set_value(nvec, 0);
486 } else {
487 nvec->state = 0;
488 }
489}
490
491/**
492 * nvec_rx_completed - Complete the current transfer
493 * @nvec: A &struct nvec_chip
494 *
495 * This is called when we have received an END_TRANS on a RX transfer.
496 */
497static void nvec_rx_completed(struct nvec_chip *nvec)
498{
499 if (nvec->rx->pos != nvec_msg_size(nvec->rx)) {
500 dev_err(nvec->dev, "RX incomplete: Expected %u bytes, got %u\n",
501 (uint) nvec_msg_size(nvec->rx),
502 (uint) nvec->rx->pos);
503
504 nvec_msg_free(nvec, nvec->rx);
505 nvec->state = 0;
506
507 /* Battery quirk - Often incomplete, and likes to crash */
508 if (nvec->rx->data[0] == NVEC_BAT)
509 complete(&nvec->ec_transfer);
510
511 return;
512 }
513
514 spin_lock(&nvec->rx_lock);
515
516 /* add the received data to the work list
517 and move the ring buffer pointer to the next entry */
518 list_add_tail(&nvec->rx->node, &nvec->rx_data);
519
520 spin_unlock(&nvec->rx_lock);
521
522 nvec->state = 0;
523
524 if (!nvec_msg_is_event(nvec->rx))
525 complete(&nvec->ec_transfer);
526
527 schedule_work(&nvec->rx_work);
528}
529
530/**
531 * nvec_invalid_flags - Send an error message about invalid flags and jump
532 * @nvec: The nvec device
533 * @status: The status flags
534 * @reset: Whether we shall jump to state 0.
535 */
536static void nvec_invalid_flags(struct nvec_chip *nvec, unsigned int status,
537 bool reset)
538{
539 dev_err(nvec->dev, "unexpected status flags 0x%02x during state %i\n",
540 status, nvec->state);
541 if (reset)
542 nvec->state = 0;
543}
544
545/**
546 * nvec_tx_set - Set the message to transfer (nvec->tx)
547 * @nvec: A &struct nvec_chip
548 *
549 * Gets the first entry from the tx_data list of @nvec and sets the
550 * tx member to it. If the tx_data list is empty, this uses the
551 * tx_scratch message to send a no operation message.
552 */
553static void nvec_tx_set(struct nvec_chip *nvec)
554{
555 spin_lock(&nvec->tx_lock);
556 if (list_empty(&nvec->tx_data)) {
557 dev_err(nvec->dev, "empty tx - sending no-op\n");
558 memcpy(nvec->tx_scratch.data, "\x02\x07\x02", 3);
559 nvec->tx_scratch.size = 3;
560 nvec->tx_scratch.pos = 0;
561 nvec->tx = &nvec->tx_scratch;
562 list_add_tail(&nvec->tx->node, &nvec->tx_data);
563 } else {
564 nvec->tx = list_first_entry(&nvec->tx_data, struct nvec_msg,
565 node);
566 nvec->tx->pos = 0;
567 }
568 spin_unlock(&nvec->tx_lock);
569
570 dev_dbg(nvec->dev, "Sending message of length %u, command 0x%x\n",
571 (uint)nvec->tx->size, nvec->tx->data[1]);
572}
573
574/**
575 * nvec_interrupt - Interrupt handler
576 * @irq: The IRQ
577 * @dev: The nvec device
578 *
579 * Interrupt handler that fills our RX buffers and empties our TX
580 * buffers. This uses a finite state machine with ridiculous amounts
581 * of error checking, in order to be fairly reliable.
582 */
583static irqreturn_t nvec_interrupt(int irq, void *dev)
584{
585 unsigned long status;
586 unsigned int received = 0;
587 unsigned char to_send = 0xff;
588 const unsigned long irq_mask = I2C_SL_IRQ | END_TRANS | RCVD | RNW;
589 struct nvec_chip *nvec = dev;
590 unsigned int state = nvec->state;
591
592 status = readl(nvec->base + I2C_SL_STATUS);
593
594 /* Filter out some errors */
595 if ((status & irq_mask) == 0 && (status & ~irq_mask) != 0) {
596 dev_err(nvec->dev, "unexpected irq mask %lx\n", status);
597 return IRQ_HANDLED;
598 }
599 if ((status & I2C_SL_IRQ) == 0) {
600 dev_err(nvec->dev, "Spurious IRQ\n");
601 return IRQ_HANDLED;
602 }
603
604 /* The EC did not request a read, so it send us something, read it */
605 if ((status & RNW) == 0) {
606 received = readl(nvec->base + I2C_SL_RCVD);
607 if (status & RCVD)
608 writel(0, nvec->base + I2C_SL_RCVD);
609 }
610
611 if (status == (I2C_SL_IRQ | RCVD))
612 nvec->state = 0;
613
614 switch (nvec->state) {
615 case 0: /* Verify that its a transfer start, the rest later */
616 if (status != (I2C_SL_IRQ | RCVD))
617 nvec_invalid_flags(nvec, status, false);
618 break;
619 case 1: /* command byte */
620 if (status != I2C_SL_IRQ) {
621 nvec_invalid_flags(nvec, status, true);
622 } else {
623 nvec->rx = nvec_msg_alloc(nvec, NVEC_MSG_RX);
624 /* Should not happen in a normal world */
625 if (unlikely(nvec->rx == NULL)) {
626 nvec->state = 0;
627 break;
628 }
629 nvec->rx->data[0] = received;
630 nvec->rx->pos = 1;
631 nvec->state = 2;
632 }
633 break;
634 case 2: /* first byte after command */
635 if (status == (I2C_SL_IRQ | RNW | RCVD)) {
636 udelay(33);
637 if (nvec->rx->data[0] != 0x01) {
638 dev_err(nvec->dev,
639 "Read without prior read command\n");
640 nvec->state = 0;
641 break;
642 }
643 nvec_msg_free(nvec, nvec->rx);
644 nvec->state = 3;
645 nvec_tx_set(nvec);
646 BUG_ON(nvec->tx->size < 1);
647 to_send = nvec->tx->data[0];
648 nvec->tx->pos = 1;
649 } else if (status == (I2C_SL_IRQ)) {
650 BUG_ON(nvec->rx == NULL);
651 nvec->rx->data[1] = received;
652 nvec->rx->pos = 2;
653 nvec->state = 4;
654 } else {
655 nvec_invalid_flags(nvec, status, true);
656 }
657 break;
658 case 3: /* EC does a block read, we transmit data */
659 if (status & END_TRANS) {
660 nvec_tx_completed(nvec);
661 } else if ((status & RNW) == 0 || (status & RCVD)) {
662 nvec_invalid_flags(nvec, status, true);
663 } else if (nvec->tx && nvec->tx->pos < nvec->tx->size) {
664 to_send = nvec->tx->data[nvec->tx->pos++];
665 } else {
666 dev_err(nvec->dev, "tx buffer underflow on %p (%u > %u)\n",
667 nvec->tx,
668 (uint) (nvec->tx ? nvec->tx->pos : 0),
669 (uint) (nvec->tx ? nvec->tx->size : 0));
670 nvec->state = 0;
671 }
672 break;
673 case 4: /* EC does some write, we read the data */
674 if ((status & (END_TRANS | RNW)) == END_TRANS)
675 nvec_rx_completed(nvec);
676 else if (status & (RNW | RCVD))
677 nvec_invalid_flags(nvec, status, true);
678 else if (nvec->rx && nvec->rx->pos < NVEC_MSG_SIZE)
679 nvec->rx->data[nvec->rx->pos++] = received;
680 else
681 dev_err(nvec->dev,
682 "RX buffer overflow on %p: "
683 "Trying to write byte %u of %u\n",
684 nvec->rx, nvec->rx ? nvec->rx->pos : 0,
685 NVEC_MSG_SIZE);
686 break;
687 default:
688 nvec->state = 0;
689 }
690
691 /* If we are told that a new transfer starts, verify it */
692 if ((status & (RCVD | RNW)) == RCVD) {
693 if (received != nvec->i2c_addr)
694 dev_err(nvec->dev,
695 "received address 0x%02x, expected 0x%02x\n",
696 received, nvec->i2c_addr);
697 nvec->state = 1;
698 }
699
700 /* Send data if requested, but not on end of transmission */
701 if ((status & (RNW | END_TRANS)) == RNW)
702 writel(to_send, nvec->base + I2C_SL_RCVD);
703
704 /* If we have send the first byte */
705 if (status == (I2C_SL_IRQ | RNW | RCVD))
706 nvec_gpio_set_value(nvec, 1);
707
708 dev_dbg(nvec->dev,
709 "Handled: %s 0x%02x, %s 0x%02x in state %u [%s%s%s]\n",
710 (status & RNW) == 0 ? "received" : "R=",
711 received,
712 (status & (RNW | END_TRANS)) ? "sent" : "S=",
713 to_send,
714 state,
715 status & END_TRANS ? " END_TRANS" : "",
716 status & RCVD ? " RCVD" : "",
717 status & RNW ? " RNW" : "");
718
719
720 /*
721 * TODO: A correct fix needs to be found for this.
722 *
723 * We experience less incomplete messages with this delay than without
724 * it, but we don't know why. Help is appreciated.
725 */
726 udelay(100);
727
728 return IRQ_HANDLED;
729}
730
731static void tegra_init_i2c_slave(struct nvec_chip *nvec)
732{
733 u32 val;
734
735 clk_prepare_enable(nvec->i2c_clk);
736
737 tegra_periph_reset_assert(nvec->i2c_clk);
738 udelay(2);
739 tegra_periph_reset_deassert(nvec->i2c_clk);
740
741 val = I2C_CNFG_NEW_MASTER_SFM | I2C_CNFG_PACKET_MODE_EN |
742 (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
743 writel(val, nvec->base + I2C_CNFG);
744
745 clk_set_rate(nvec->i2c_clk, 8 * 80000);
746
747 writel(I2C_SL_NEWSL, nvec->base + I2C_SL_CNFG);
748 writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT);
749
750 writel(nvec->i2c_addr>>1, nvec->base + I2C_SL_ADDR1);
751 writel(0, nvec->base + I2C_SL_ADDR2);
752
753 enable_irq(nvec->irq);
754}
755
756#ifdef CONFIG_PM_SLEEP
757static void nvec_disable_i2c_slave(struct nvec_chip *nvec)
758{
759 disable_irq(nvec->irq);
760 writel(I2C_SL_NEWSL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
761 clk_disable_unprepare(nvec->i2c_clk);
762}
763#endif
764
765static void nvec_power_off(void)
766{
767 char ap_pwr_down[] = { NVEC_SLEEP, AP_PWR_DOWN };
768
769 nvec_toggle_global_events(nvec_power_handle, false);
770 nvec_write_async(nvec_power_handle, ap_pwr_down, 2);
771}
772
773/*
774 * Parse common device tree data
775 */
776static int nvec_i2c_parse_dt_pdata(struct nvec_chip *nvec)
777{
778 nvec->gpio = of_get_named_gpio(nvec->dev->of_node, "request-gpios", 0);
779
780 if (nvec->gpio < 0) {
781 dev_err(nvec->dev, "no gpio specified");
782 return -ENODEV;
783 }
784
785 if (of_property_read_u32(nvec->dev->of_node, "slave-addr",
786 &nvec->i2c_addr)) {
787 dev_err(nvec->dev, "no i2c address specified");
788 return -ENODEV;
789 }
790
791 return 0;
792}
793
794static int tegra_nvec_probe(struct platform_device *pdev)
795{
796 int err, ret;
797 struct clk *i2c_clk;
798 struct nvec_chip *nvec;
799 struct nvec_msg *msg;
800 struct resource *res;
801 void __iomem *base;
802 char get_firmware_version[] = { NVEC_CNTL, GET_FIRMWARE_VERSION },
803 unmute_speakers[] = { NVEC_OEM0, 0x10, 0x59, 0x95 },
804 enable_event[7] = { NVEC_SYS, CNF_EVENT_REPORTING, true };
805
806 if (!pdev->dev.of_node) {
807 dev_err(&pdev->dev, "must be instantiated using device tree\n");
808 return -ENODEV;
809 }
810
811 nvec = devm_kzalloc(&pdev->dev, sizeof(struct nvec_chip), GFP_KERNEL);
812 if (nvec == NULL) {
813 dev_err(&pdev->dev, "failed to reserve memory\n");
814 return -ENOMEM;
815 }
816 platform_set_drvdata(pdev, nvec);
817 nvec->dev = &pdev->dev;
818
819 err = nvec_i2c_parse_dt_pdata(nvec);
820 if (err < 0)
821 return err;
822
823 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
824 base = devm_ioremap_resource(&pdev->dev, res);
825 if (IS_ERR(base))
826 return PTR_ERR(base);
827
828 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
829 if (!res) {
830 dev_err(&pdev->dev, "no irq resource?\n");
831 return -ENODEV;
832 }
833
834 i2c_clk = devm_clk_get(&pdev->dev, "div-clk");
835 if (IS_ERR(i2c_clk)) {
836 dev_err(nvec->dev, "failed to get controller clock\n");
837 return -ENODEV;
838 }
839
840 nvec->base = base;
841 nvec->irq = res->start;
842 nvec->i2c_clk = i2c_clk;
843 nvec->rx = &nvec->msg_pool[0];
844
845 ATOMIC_INIT_NOTIFIER_HEAD(&nvec->notifier_list);
846
847 init_completion(&nvec->sync_write);
848 init_completion(&nvec->ec_transfer);
849 mutex_init(&nvec->sync_write_mutex);
850 spin_lock_init(&nvec->tx_lock);
851 spin_lock_init(&nvec->rx_lock);
852 INIT_LIST_HEAD(&nvec->rx_data);
853 INIT_LIST_HEAD(&nvec->tx_data);
854 INIT_WORK(&nvec->rx_work, nvec_dispatch);
855 INIT_WORK(&nvec->tx_work, nvec_request_master);
856
857 err = devm_gpio_request_one(&pdev->dev, nvec->gpio, GPIOF_OUT_INIT_HIGH,
858 "nvec gpio");
859 if (err < 0) {
860 dev_err(nvec->dev, "couldn't request gpio\n");
861 return -ENODEV;
862 }
863
864 err = devm_request_irq(&pdev->dev, nvec->irq, nvec_interrupt, 0,
865 "nvec", nvec);
866 if (err) {
867 dev_err(nvec->dev, "couldn't request irq\n");
868 return -ENODEV;
869 }
870 disable_irq(nvec->irq);
871
872 tegra_init_i2c_slave(nvec);
873
874 /* enable event reporting */
875 nvec_toggle_global_events(nvec, true);
876
877 nvec->nvec_status_notifier.notifier_call = nvec_status_notifier;
878 nvec_register_notifier(nvec, &nvec->nvec_status_notifier, 0);
879
880 nvec_power_handle = nvec;
881 pm_power_off = nvec_power_off;
882
883 /* Get Firmware Version */
884 msg = nvec_write_sync(nvec, get_firmware_version, 2);
885
886 if (msg) {
887 dev_warn(nvec->dev, "ec firmware version %02x.%02x.%02x / %02x\n",
888 msg->data[4], msg->data[5], msg->data[6], msg->data[7]);
889
890 nvec_msg_free(nvec, msg);
891 }
892
893 ret = mfd_add_devices(nvec->dev, -1, nvec_devices,
894 ARRAY_SIZE(nvec_devices), base, 0, NULL);
895 if (ret)
896 dev_err(nvec->dev, "error adding subdevices\n");
897
898 /* unmute speakers? */
899 nvec_write_async(nvec, unmute_speakers, 4);
900
901 /* enable lid switch event */
902 nvec_event_mask(enable_event, LID_SWITCH);
903 nvec_write_async(nvec, enable_event, 7);
904
905 /* enable power button event */
906 nvec_event_mask(enable_event, PWR_BUTTON);
907 nvec_write_async(nvec, enable_event, 7);
908
909 return 0;
910}
911
912static int tegra_nvec_remove(struct platform_device *pdev)
913{
914 struct nvec_chip *nvec = platform_get_drvdata(pdev);
915
916 nvec_toggle_global_events(nvec, false);
917 mfd_remove_devices(nvec->dev);
918 nvec_unregister_notifier(nvec, &nvec->nvec_status_notifier);
919 cancel_work_sync(&nvec->rx_work);
920 cancel_work_sync(&nvec->tx_work);
921 /* FIXME: needs check wether nvec is responsible for power off */
922 pm_power_off = NULL;
923
924 return 0;
925}
926
927#ifdef CONFIG_PM_SLEEP
928static int nvec_suspend(struct device *dev)
929{
930 struct platform_device *pdev = to_platform_device(dev);
931 struct nvec_chip *nvec = platform_get_drvdata(pdev);
932 struct nvec_msg *msg;
933 char ap_suspend[] = { NVEC_SLEEP, AP_SUSPEND };
934
935 dev_dbg(nvec->dev, "suspending\n");
936
937 /* keep these sync or you'll break suspend */
938 nvec_toggle_global_events(nvec, false);
939
940 msg = nvec_write_sync(nvec, ap_suspend, sizeof(ap_suspend));
941 nvec_msg_free(nvec, msg);
942
943 nvec_disable_i2c_slave(nvec);
944
945 return 0;
946}
947
948static int nvec_resume(struct device *dev)
949{
950 struct platform_device *pdev = to_platform_device(dev);
951 struct nvec_chip *nvec = platform_get_drvdata(pdev);
952
953 dev_dbg(nvec->dev, "resuming\n");
954 tegra_init_i2c_slave(nvec);
955 nvec_toggle_global_events(nvec, true);
956
957 return 0;
958}
959#endif
960
961static const SIMPLE_DEV_PM_OPS(nvec_pm_ops, nvec_suspend, nvec_resume);
962
963/* Match table for of_platform binding */
964static const struct of_device_id nvidia_nvec_of_match[] = {
965 { .compatible = "nvidia,nvec", },
966 {},
967};
968MODULE_DEVICE_TABLE(of, nvidia_nvec_of_match);
969
970static struct platform_driver nvec_device_driver = {
971 .probe = tegra_nvec_probe,
972 .remove = tegra_nvec_remove,
973 .driver = {
974 .name = "nvec",
975 .owner = THIS_MODULE,
976 .pm = &nvec_pm_ops,
977 .of_match_table = nvidia_nvec_of_match,
978 }
979};
980
981module_platform_driver(nvec_device_driver);
982
983MODULE_ALIAS("platform:nvec");
984MODULE_DESCRIPTION("NVIDIA compliant embedded controller interface");
985MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
986MODULE_LICENSE("GPL");