Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * NVEC: NVIDIA compliant embedded controller interface
3 *
4 * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net>
5 *
6 * Authors: Pierre-Hugues Husson <phhusson@free.fr>
7 * Ilya Petrov <ilya.muromec@gmail.com>
8 * Marc Dietrich <marvin24@gmx.de>
9 * Julian Andres Klode <jak@jak-linux.org>
10 *
11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file "COPYING" in the main directory of this archive
13 * for more details.
14 *
15 */
16
17/* #define DEBUG */
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/atomic.h>
22#include <linux/clk.h>
23#include <linux/completion.h>
24#include <linux/delay.h>
25#include <linux/err.h>
26#include <linux/gpio.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
29#include <linux/irq.h>
30#include <linux/of.h>
31#include <linux/of_gpio.h>
32#include <linux/list.h>
33#include <linux/mfd/core.h>
34#include <linux/mutex.h>
35#include <linux/notifier.h>
36#include <linux/platform_device.h>
37#include <linux/slab.h>
38#include <linux/spinlock.h>
39#include <linux/workqueue.h>
40#include <linux/clk/tegra.h>
41
42#include "nvec.h"
43
44#define I2C_CNFG 0x00
45#define I2C_CNFG_PACKET_MODE_EN (1<<10)
46#define I2C_CNFG_NEW_MASTER_SFM (1<<11)
47#define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12
48
49#define I2C_SL_CNFG 0x20
50#define I2C_SL_NEWSL (1<<2)
51#define I2C_SL_NACK (1<<1)
52#define I2C_SL_RESP (1<<0)
53#define I2C_SL_IRQ (1<<3)
54#define END_TRANS (1<<4)
55#define RCVD (1<<2)
56#define RNW (1<<1)
57
58#define I2C_SL_RCVD 0x24
59#define I2C_SL_STATUS 0x28
60#define I2C_SL_ADDR1 0x2c
61#define I2C_SL_ADDR2 0x30
62#define I2C_SL_DELAY_COUNT 0x3c
63
64/**
65 * enum nvec_msg_category - Message categories for nvec_msg_alloc()
66 * @NVEC_MSG_RX: The message is an incoming message (from EC)
67 * @NVEC_MSG_TX: The message is an outgoing message (to EC)
68 */
69enum nvec_msg_category {
70 NVEC_MSG_RX,
71 NVEC_MSG_TX,
72};
73
74enum nvec_sleep_subcmds {
75 GLOBAL_EVENTS,
76 AP_PWR_DOWN,
77 AP_SUSPEND,
78};
79
80#define CNF_EVENT_REPORTING 0x01
81#define GET_FIRMWARE_VERSION 0x15
82#define LID_SWITCH BIT(1)
83#define PWR_BUTTON BIT(15)
84
85static struct nvec_chip *nvec_power_handle;
86
87static struct mfd_cell nvec_devices[] = {
88 {
89 .name = "nvec-kbd",
90 .id = 1,
91 },
92 {
93 .name = "nvec-mouse",
94 .id = 1,
95 },
96 {
97 .name = "nvec-power",
98 .id = 1,
99 },
100 {
101 .name = "nvec-power",
102 .id = 2,
103 },
104 {
105 .name = "nvec-paz00",
106 .id = 1,
107 },
108};
109
110/**
111 * nvec_register_notifier - Register a notifier with nvec
112 * @nvec: A &struct nvec_chip
113 * @nb: The notifier block to register
114 *
115 * Registers a notifier with @nvec. The notifier will be added to an atomic
116 * notifier chain that is called for all received messages except those that
117 * correspond to a request initiated by nvec_write_sync().
118 */
119int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb,
120 unsigned int events)
121{
122 return atomic_notifier_chain_register(&nvec->notifier_list, nb);
123}
124EXPORT_SYMBOL_GPL(nvec_register_notifier);
125
126/**
127 * nvec_unregister_notifier - Unregister a notifier with nvec
128 * @nvec: A &struct nvec_chip
129 * @nb: The notifier block to unregister
130 *
131 * Unregisters a notifier with @nvec. The notifier will be removed from the
132 * atomic notifier chain.
133 */
134int nvec_unregister_notifier(struct nvec_chip *nvec, struct notifier_block *nb)
135{
136 return atomic_notifier_chain_unregister(&nvec->notifier_list, nb);
137}
138EXPORT_SYMBOL_GPL(nvec_unregister_notifier);
139
140/**
141 * nvec_status_notifier - The final notifier
142 *
143 * Prints a message about control events not handled in the notifier
144 * chain.
145 */
146static int nvec_status_notifier(struct notifier_block *nb,
147 unsigned long event_type, void *data)
148{
149 struct nvec_chip *nvec = container_of(nb, struct nvec_chip,
150 nvec_status_notifier);
151 unsigned char *msg = (unsigned char *)data;
152
153 if (event_type != NVEC_CNTL)
154 return NOTIFY_DONE;
155
156 dev_warn(nvec->dev, "unhandled msg type %ld\n", event_type);
157 print_hex_dump(KERN_WARNING, "payload: ", DUMP_PREFIX_NONE, 16, 1,
158 msg, msg[1] + 2, true);
159
160 return NOTIFY_OK;
161}
162
163/**
164 * nvec_msg_alloc:
165 * @nvec: A &struct nvec_chip
166 * @category: Pool category, see &enum nvec_msg_category
167 *
168 * Allocate a single &struct nvec_msg object from the message pool of
169 * @nvec. The result shall be passed to nvec_msg_free() if no longer
170 * used.
171 *
172 * Outgoing messages are placed in the upper 75% of the pool, keeping the
173 * lower 25% available for RX buffers only. The reason is to prevent a
174 * situation where all buffers are full and a message is thus endlessly
175 * retried because the response could never be processed.
176 */
177static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec,
178 enum nvec_msg_category category)
179{
180 int i = (category == NVEC_MSG_TX) ? (NVEC_POOL_SIZE / 4) : 0;
181
182 for (; i < NVEC_POOL_SIZE; i++) {
183 if (atomic_xchg(&nvec->msg_pool[i].used, 1) == 0) {
184 dev_vdbg(nvec->dev, "INFO: Allocate %i\n", i);
185 return &nvec->msg_pool[i];
186 }
187 }
188
189 dev_err(nvec->dev, "could not allocate %s buffer\n",
190 (category == NVEC_MSG_TX) ? "TX" : "RX");
191
192 return NULL;
193}
194
195/**
196 * nvec_msg_free:
197 * @nvec: A &struct nvec_chip
198 * @msg: A message (must be allocated by nvec_msg_alloc() and belong to @nvec)
199 *
200 * Free the given message
201 */
202void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg)
203{
204 if (msg != &nvec->tx_scratch)
205 dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool);
206 atomic_set(&msg->used, 0);
207}
208EXPORT_SYMBOL_GPL(nvec_msg_free);
209
210/**
211 * nvec_msg_is_event - Return %true if @msg is an event
212 * @msg: A message
213 */
214static bool nvec_msg_is_event(struct nvec_msg *msg)
215{
216 return msg->data[0] >> 7;
217}
218
219/**
220 * nvec_msg_size - Get the size of a message
221 * @msg: The message to get the size for
222 *
223 * This only works for received messages, not for outgoing messages.
224 */
225static size_t nvec_msg_size(struct nvec_msg *msg)
226{
227 bool is_event = nvec_msg_is_event(msg);
228 int event_length = (msg->data[0] & 0x60) >> 5;
229
230 /* for variable size, payload size in byte 1 + count (1) + cmd (1) */
231 if (!is_event || event_length == NVEC_VAR_SIZE)
232 return (msg->pos || msg->size) ? (msg->data[1] + 2) : 0;
233 else if (event_length == NVEC_2BYTES)
234 return 2;
235 else if (event_length == NVEC_3BYTES)
236 return 3;
237 else
238 return 0;
239}
240
241/**
242 * nvec_gpio_set_value - Set the GPIO value
243 * @nvec: A &struct nvec_chip
244 * @value: The value to write (0 or 1)
245 *
246 * Like gpio_set_value(), but generating debugging information
247 */
248static void nvec_gpio_set_value(struct nvec_chip *nvec, int value)
249{
250 dev_dbg(nvec->dev, "GPIO changed from %u to %u\n",
251 gpio_get_value(nvec->gpio), value);
252 gpio_set_value(nvec->gpio, value);
253}
254
255/**
256 * nvec_write_async - Asynchronously write a message to NVEC
257 * @nvec: An nvec_chip instance
258 * @data: The message data, starting with the request type
259 * @size: The size of @data
260 *
261 * Queue a single message to be transferred to the embedded controller
262 * and return immediately.
263 *
264 * Returns: 0 on success, a negative error code on failure. If a failure
265 * occured, the nvec driver may print an error.
266 */
267int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
268 short size)
269{
270 struct nvec_msg *msg;
271 unsigned long flags;
272
273 msg = nvec_msg_alloc(nvec, NVEC_MSG_TX);
274
275 if (msg == NULL)
276 return -ENOMEM;
277
278 msg->data[0] = size;
279 memcpy(msg->data + 1, data, size);
280 msg->size = size + 1;
281
282 spin_lock_irqsave(&nvec->tx_lock, flags);
283 list_add_tail(&msg->node, &nvec->tx_data);
284 spin_unlock_irqrestore(&nvec->tx_lock, flags);
285
286 schedule_work(&nvec->tx_work);
287
288 return 0;
289}
290EXPORT_SYMBOL(nvec_write_async);
291
292/**
293 * nvec_write_sync - Write a message to nvec and read the response
294 * @nvec: An &struct nvec_chip
295 * @data: The data to write
296 * @size: The size of @data
297 *
298 * This is similar to nvec_write_async(), but waits for the
299 * request to be answered before returning. This function
300 * uses a mutex and can thus not be called from e.g.
301 * interrupt handlers.
302 *
303 * Returns: A pointer to the response message on success,
304 * %NULL on failure. Free with nvec_msg_free() once no longer
305 * used.
306 */
307struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec,
308 const unsigned char *data, short size)
309{
310 struct nvec_msg *msg;
311
312 mutex_lock(&nvec->sync_write_mutex);
313
314 nvec->sync_write_pending = (data[1] << 8) + data[0];
315
316 if (nvec_write_async(nvec, data, size) < 0) {
317 mutex_unlock(&nvec->sync_write_mutex);
318 return NULL;
319 }
320
321 dev_dbg(nvec->dev, "nvec_sync_write: 0x%04x\n",
322 nvec->sync_write_pending);
323 if (!(wait_for_completion_timeout(&nvec->sync_write,
324 msecs_to_jiffies(2000)))) {
325 dev_warn(nvec->dev, "timeout waiting for sync write to complete\n");
326 mutex_unlock(&nvec->sync_write_mutex);
327 return NULL;
328 }
329
330 dev_dbg(nvec->dev, "nvec_sync_write: pong!\n");
331
332 msg = nvec->last_sync_msg;
333
334 mutex_unlock(&nvec->sync_write_mutex);
335
336 return msg;
337}
338EXPORT_SYMBOL(nvec_write_sync);
339
340/**
341 * nvec_toggle_global_events - enables or disables global event reporting
342 * @nvec: nvec handle
343 * @state: true for enable, false for disable
344 *
345 * This switches on/off global event reports by the embedded controller.
346 */
347static void nvec_toggle_global_events(struct nvec_chip *nvec, bool state)
348{
349 unsigned char global_events[] = { NVEC_SLEEP, GLOBAL_EVENTS, state };
350
351 nvec_write_async(nvec, global_events, 3);
352}
353
354/**
355 * nvec_event_mask - fill the command string with event bitfield
356 * ev: points to event command string
357 * mask: bit to insert into the event mask
358 *
359 * Configure event command expects a 32 bit bitfield which describes
360 * which events to enable. The bitfield has the following structure
361 * (from highest byte to lowest):
362 * system state bits 7-0
363 * system state bits 15-8
364 * oem system state bits 7-0
365 * oem system state bits 15-8
366 */
367static void nvec_event_mask(char *ev, u32 mask)
368{
369 ev[3] = mask >> 16 & 0xff;
370 ev[4] = mask >> 24 & 0xff;
371 ev[5] = mask >> 0 & 0xff;
372 ev[6] = mask >> 8 & 0xff;
373}
374
375/**
376 * nvec_request_master - Process outgoing messages
377 * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
378 *
379 * Processes all outgoing requests by sending the request and awaiting the
380 * response, then continuing with the next request. Once a request has a
381 * matching response, it will be freed and removed from the list.
382 */
383static void nvec_request_master(struct work_struct *work)
384{
385 struct nvec_chip *nvec = container_of(work, struct nvec_chip, tx_work);
386 unsigned long flags;
387 long err;
388 struct nvec_msg *msg;
389
390 spin_lock_irqsave(&nvec->tx_lock, flags);
391 while (!list_empty(&nvec->tx_data)) {
392 msg = list_first_entry(&nvec->tx_data, struct nvec_msg, node);
393 spin_unlock_irqrestore(&nvec->tx_lock, flags);
394 nvec_gpio_set_value(nvec, 0);
395 err = wait_for_completion_interruptible_timeout(
396 &nvec->ec_transfer, msecs_to_jiffies(5000));
397
398 if (err == 0) {
399 dev_warn(nvec->dev, "timeout waiting for ec transfer\n");
400 nvec_gpio_set_value(nvec, 1);
401 msg->pos = 0;
402 }
403
404 spin_lock_irqsave(&nvec->tx_lock, flags);
405
406 if (err > 0) {
407 list_del_init(&msg->node);
408 nvec_msg_free(nvec, msg);
409 }
410 }
411 spin_unlock_irqrestore(&nvec->tx_lock, flags);
412}
413
414/**
415 * parse_msg - Print some information and call the notifiers on an RX message
416 * @nvec: A &struct nvec_chip
417 * @msg: A message received by @nvec
418 *
419 * Paarse some pieces of the message and then call the chain of notifiers
420 * registered via nvec_register_notifier.
421 */
422static int parse_msg(struct nvec_chip *nvec, struct nvec_msg *msg)
423{
424 if ((msg->data[0] & 1 << 7) == 0 && msg->data[3]) {
425 dev_err(nvec->dev, "ec responded %*ph\n", 4, msg->data);
426 return -EINVAL;
427 }
428
429 if ((msg->data[0] >> 7) == 1 && (msg->data[0] & 0x0f) == 5)
430 print_hex_dump(KERN_WARNING, "ec system event ",
431 DUMP_PREFIX_NONE, 16, 1, msg->data,
432 msg->data[1] + 2, true);
433
434 atomic_notifier_call_chain(&nvec->notifier_list, msg->data[0] & 0x8f,
435 msg->data);
436
437 return 0;
438}
439
440/**
441 * nvec_dispatch - Process messages received from the EC
442 * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
443 *
444 * Process messages previously received from the EC and put into the RX
445 * queue of the &struct nvec_chip instance associated with @work.
446 */
447static void nvec_dispatch(struct work_struct *work)
448{
449 struct nvec_chip *nvec = container_of(work, struct nvec_chip, rx_work);
450 unsigned long flags;
451 struct nvec_msg *msg;
452
453 spin_lock_irqsave(&nvec->rx_lock, flags);
454 while (!list_empty(&nvec->rx_data)) {
455 msg = list_first_entry(&nvec->rx_data, struct nvec_msg, node);
456 list_del_init(&msg->node);
457 spin_unlock_irqrestore(&nvec->rx_lock, flags);
458
459 if (nvec->sync_write_pending ==
460 (msg->data[2] << 8) + msg->data[0]) {
461 dev_dbg(nvec->dev, "sync write completed!\n");
462 nvec->sync_write_pending = 0;
463 nvec->last_sync_msg = msg;
464 complete(&nvec->sync_write);
465 } else {
466 parse_msg(nvec, msg);
467 nvec_msg_free(nvec, msg);
468 }
469 spin_lock_irqsave(&nvec->rx_lock, flags);
470 }
471 spin_unlock_irqrestore(&nvec->rx_lock, flags);
472}
473
474/**
475 * nvec_tx_completed - Complete the current transfer
476 * @nvec: A &struct nvec_chip
477 *
478 * This is called when we have received an END_TRANS on a TX transfer.
479 */
480static void nvec_tx_completed(struct nvec_chip *nvec)
481{
482 /* We got an END_TRANS, let's skip this, maybe there's an event */
483 if (nvec->tx->pos != nvec->tx->size) {
484 dev_err(nvec->dev, "premature END_TRANS, resending\n");
485 nvec->tx->pos = 0;
486 nvec_gpio_set_value(nvec, 0);
487 } else {
488 nvec->state = 0;
489 }
490}
491
492/**
493 * nvec_rx_completed - Complete the current transfer
494 * @nvec: A &struct nvec_chip
495 *
496 * This is called when we have received an END_TRANS on a RX transfer.
497 */
498static void nvec_rx_completed(struct nvec_chip *nvec)
499{
500 if (nvec->rx->pos != nvec_msg_size(nvec->rx)) {
501 dev_err(nvec->dev, "RX incomplete: Expected %u bytes, got %u\n",
502 (uint) nvec_msg_size(nvec->rx),
503 (uint) nvec->rx->pos);
504
505 nvec_msg_free(nvec, nvec->rx);
506 nvec->state = 0;
507
508 /* Battery quirk - Often incomplete, and likes to crash */
509 if (nvec->rx->data[0] == NVEC_BAT)
510 complete(&nvec->ec_transfer);
511
512 return;
513 }
514
515 spin_lock(&nvec->rx_lock);
516
517 /* add the received data to the work list
518 and move the ring buffer pointer to the next entry */
519 list_add_tail(&nvec->rx->node, &nvec->rx_data);
520
521 spin_unlock(&nvec->rx_lock);
522
523 nvec->state = 0;
524
525 if (!nvec_msg_is_event(nvec->rx))
526 complete(&nvec->ec_transfer);
527
528 schedule_work(&nvec->rx_work);
529}
530
531/**
532 * nvec_invalid_flags - Send an error message about invalid flags and jump
533 * @nvec: The nvec device
534 * @status: The status flags
535 * @reset: Whether we shall jump to state 0.
536 */
537static void nvec_invalid_flags(struct nvec_chip *nvec, unsigned int status,
538 bool reset)
539{
540 dev_err(nvec->dev, "unexpected status flags 0x%02x during state %i\n",
541 status, nvec->state);
542 if (reset)
543 nvec->state = 0;
544}
545
546/**
547 * nvec_tx_set - Set the message to transfer (nvec->tx)
548 * @nvec: A &struct nvec_chip
549 *
550 * Gets the first entry from the tx_data list of @nvec and sets the
551 * tx member to it. If the tx_data list is empty, this uses the
552 * tx_scratch message to send a no operation message.
553 */
554static void nvec_tx_set(struct nvec_chip *nvec)
555{
556 spin_lock(&nvec->tx_lock);
557 if (list_empty(&nvec->tx_data)) {
558 dev_err(nvec->dev, "empty tx - sending no-op\n");
559 memcpy(nvec->tx_scratch.data, "\x02\x07\x02", 3);
560 nvec->tx_scratch.size = 3;
561 nvec->tx_scratch.pos = 0;
562 nvec->tx = &nvec->tx_scratch;
563 list_add_tail(&nvec->tx->node, &nvec->tx_data);
564 } else {
565 nvec->tx = list_first_entry(&nvec->tx_data, struct nvec_msg,
566 node);
567 nvec->tx->pos = 0;
568 }
569 spin_unlock(&nvec->tx_lock);
570
571 dev_dbg(nvec->dev, "Sending message of length %u, command 0x%x\n",
572 (uint)nvec->tx->size, nvec->tx->data[1]);
573}
574
575/**
576 * nvec_interrupt - Interrupt handler
577 * @irq: The IRQ
578 * @dev: The nvec device
579 *
580 * Interrupt handler that fills our RX buffers and empties our TX
581 * buffers. This uses a finite state machine with ridiculous amounts
582 * of error checking, in order to be fairly reliable.
583 */
584static irqreturn_t nvec_interrupt(int irq, void *dev)
585{
586 unsigned long status;
587 unsigned int received = 0;
588 unsigned char to_send = 0xff;
589 const unsigned long irq_mask = I2C_SL_IRQ | END_TRANS | RCVD | RNW;
590 struct nvec_chip *nvec = dev;
591 unsigned int state = nvec->state;
592
593 status = readl(nvec->base + I2C_SL_STATUS);
594
595 /* Filter out some errors */
596 if ((status & irq_mask) == 0 && (status & ~irq_mask) != 0) {
597 dev_err(nvec->dev, "unexpected irq mask %lx\n", status);
598 return IRQ_HANDLED;
599 }
600 if ((status & I2C_SL_IRQ) == 0) {
601 dev_err(nvec->dev, "Spurious IRQ\n");
602 return IRQ_HANDLED;
603 }
604
605 /* The EC did not request a read, so it send us something, read it */
606 if ((status & RNW) == 0) {
607 received = readl(nvec->base + I2C_SL_RCVD);
608 if (status & RCVD)
609 writel(0, nvec->base + I2C_SL_RCVD);
610 }
611
612 if (status == (I2C_SL_IRQ | RCVD))
613 nvec->state = 0;
614
615 switch (nvec->state) {
616 case 0: /* Verify that its a transfer start, the rest later */
617 if (status != (I2C_SL_IRQ | RCVD))
618 nvec_invalid_flags(nvec, status, false);
619 break;
620 case 1: /* command byte */
621 if (status != I2C_SL_IRQ) {
622 nvec_invalid_flags(nvec, status, true);
623 } else {
624 nvec->rx = nvec_msg_alloc(nvec, NVEC_MSG_RX);
625 /* Should not happen in a normal world */
626 if (unlikely(nvec->rx == NULL)) {
627 nvec->state = 0;
628 break;
629 }
630 nvec->rx->data[0] = received;
631 nvec->rx->pos = 1;
632 nvec->state = 2;
633 }
634 break;
635 case 2: /* first byte after command */
636 if (status == (I2C_SL_IRQ | RNW | RCVD)) {
637 udelay(33);
638 if (nvec->rx->data[0] != 0x01) {
639 dev_err(nvec->dev,
640 "Read without prior read command\n");
641 nvec->state = 0;
642 break;
643 }
644 nvec_msg_free(nvec, nvec->rx);
645 nvec->state = 3;
646 nvec_tx_set(nvec);
647 BUG_ON(nvec->tx->size < 1);
648 to_send = nvec->tx->data[0];
649 nvec->tx->pos = 1;
650 } else if (status == (I2C_SL_IRQ)) {
651 BUG_ON(nvec->rx == NULL);
652 nvec->rx->data[1] = received;
653 nvec->rx->pos = 2;
654 nvec->state = 4;
655 } else {
656 nvec_invalid_flags(nvec, status, true);
657 }
658 break;
659 case 3: /* EC does a block read, we transmit data */
660 if (status & END_TRANS) {
661 nvec_tx_completed(nvec);
662 } else if ((status & RNW) == 0 || (status & RCVD)) {
663 nvec_invalid_flags(nvec, status, true);
664 } else if (nvec->tx && nvec->tx->pos < nvec->tx->size) {
665 to_send = nvec->tx->data[nvec->tx->pos++];
666 } else {
667 dev_err(nvec->dev, "tx buffer underflow on %p (%u > %u)\n",
668 nvec->tx,
669 (uint) (nvec->tx ? nvec->tx->pos : 0),
670 (uint) (nvec->tx ? nvec->tx->size : 0));
671 nvec->state = 0;
672 }
673 break;
674 case 4: /* EC does some write, we read the data */
675 if ((status & (END_TRANS | RNW)) == END_TRANS)
676 nvec_rx_completed(nvec);
677 else if (status & (RNW | RCVD))
678 nvec_invalid_flags(nvec, status, true);
679 else if (nvec->rx && nvec->rx->pos < NVEC_MSG_SIZE)
680 nvec->rx->data[nvec->rx->pos++] = received;
681 else
682 dev_err(nvec->dev,
683 "RX buffer overflow on %p: "
684 "Trying to write byte %u of %u\n",
685 nvec->rx, nvec->rx->pos, NVEC_MSG_SIZE);
686 break;
687 default:
688 nvec->state = 0;
689 }
690
691 /* If we are told that a new transfer starts, verify it */
692 if ((status & (RCVD | RNW)) == RCVD) {
693 if (received != nvec->i2c_addr)
694 dev_err(nvec->dev,
695 "received address 0x%02x, expected 0x%02x\n",
696 received, nvec->i2c_addr);
697 nvec->state = 1;
698 }
699
700 /* Send data if requested, but not on end of transmission */
701 if ((status & (RNW | END_TRANS)) == RNW)
702 writel(to_send, nvec->base + I2C_SL_RCVD);
703
704 /* If we have send the first byte */
705 if (status == (I2C_SL_IRQ | RNW | RCVD))
706 nvec_gpio_set_value(nvec, 1);
707
708 dev_dbg(nvec->dev,
709 "Handled: %s 0x%02x, %s 0x%02x in state %u [%s%s%s]\n",
710 (status & RNW) == 0 ? "received" : "R=",
711 received,
712 (status & (RNW | END_TRANS)) ? "sent" : "S=",
713 to_send,
714 state,
715 status & END_TRANS ? " END_TRANS" : "",
716 status & RCVD ? " RCVD" : "",
717 status & RNW ? " RNW" : "");
718
719
720 /*
721 * TODO: A correct fix needs to be found for this.
722 *
723 * We experience less incomplete messages with this delay than without
724 * it, but we don't know why. Help is appreciated.
725 */
726 udelay(100);
727
728 return IRQ_HANDLED;
729}
730
731static void tegra_init_i2c_slave(struct nvec_chip *nvec)
732{
733 u32 val;
734
735 clk_prepare_enable(nvec->i2c_clk);
736
737 tegra_periph_reset_assert(nvec->i2c_clk);
738 udelay(2);
739 tegra_periph_reset_deassert(nvec->i2c_clk);
740
741 val = I2C_CNFG_NEW_MASTER_SFM | I2C_CNFG_PACKET_MODE_EN |
742 (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
743 writel(val, nvec->base + I2C_CNFG);
744
745 clk_set_rate(nvec->i2c_clk, 8 * 80000);
746
747 writel(I2C_SL_NEWSL, nvec->base + I2C_SL_CNFG);
748 writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT);
749
750 writel(nvec->i2c_addr>>1, nvec->base + I2C_SL_ADDR1);
751 writel(0, nvec->base + I2C_SL_ADDR2);
752
753 enable_irq(nvec->irq);
754
755 clk_disable_unprepare(nvec->i2c_clk);
756}
757
758#ifdef CONFIG_PM_SLEEP
759static void nvec_disable_i2c_slave(struct nvec_chip *nvec)
760{
761 disable_irq(nvec->irq);
762 writel(I2C_SL_NEWSL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
763 clk_disable_unprepare(nvec->i2c_clk);
764}
765#endif
766
767static void nvec_power_off(void)
768{
769 char ap_pwr_down[] = { NVEC_SLEEP, AP_PWR_DOWN };
770
771 nvec_toggle_global_events(nvec_power_handle, false);
772 nvec_write_async(nvec_power_handle, ap_pwr_down, 2);
773}
774
775static int tegra_nvec_probe(struct platform_device *pdev)
776{
777 int err, ret;
778 struct clk *i2c_clk;
779 struct nvec_platform_data *pdata = pdev->dev.platform_data;
780 struct nvec_chip *nvec;
781 struct nvec_msg *msg;
782 struct resource *res;
783 void __iomem *base;
784 char get_firmware_version[] = { NVEC_CNTL, GET_FIRMWARE_VERSION },
785 unmute_speakers[] = { NVEC_OEM0, 0x10, 0x59, 0x95 },
786 enable_event[7] = { NVEC_SYS, CNF_EVENT_REPORTING, true };
787
788 nvec = devm_kzalloc(&pdev->dev, sizeof(struct nvec_chip), GFP_KERNEL);
789 if (nvec == NULL) {
790 dev_err(&pdev->dev, "failed to reserve memory\n");
791 return -ENOMEM;
792 }
793 platform_set_drvdata(pdev, nvec);
794 nvec->dev = &pdev->dev;
795
796 if (pdata) {
797 nvec->gpio = pdata->gpio;
798 nvec->i2c_addr = pdata->i2c_addr;
799 } else if (nvec->dev->of_node) {
800 nvec->gpio = of_get_named_gpio(nvec->dev->of_node,
801 "request-gpios", 0);
802 if (nvec->gpio < 0) {
803 dev_err(&pdev->dev, "no gpio specified");
804 return -ENODEV;
805 }
806 if (of_property_read_u32(nvec->dev->of_node,
807 "slave-addr", &nvec->i2c_addr)) {
808 dev_err(&pdev->dev, "no i2c address specified");
809 return -ENODEV;
810 }
811 } else {
812 dev_err(&pdev->dev, "no platform data\n");
813 return -ENODEV;
814 }
815
816 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
817 base = devm_ioremap_resource(&pdev->dev, res);
818 if (IS_ERR(base))
819 return PTR_ERR(base);
820
821 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
822 if (!res) {
823 dev_err(&pdev->dev, "no irq resource?\n");
824 return -ENODEV;
825 }
826
827 i2c_clk = devm_clk_get(&pdev->dev, "div-clk");
828 if (IS_ERR(i2c_clk)) {
829 dev_err(nvec->dev, "failed to get controller clock\n");
830 return -ENODEV;
831 }
832
833 nvec->base = base;
834 nvec->irq = res->start;
835 nvec->i2c_clk = i2c_clk;
836 nvec->rx = &nvec->msg_pool[0];
837
838 ATOMIC_INIT_NOTIFIER_HEAD(&nvec->notifier_list);
839
840 init_completion(&nvec->sync_write);
841 init_completion(&nvec->ec_transfer);
842 mutex_init(&nvec->sync_write_mutex);
843 spin_lock_init(&nvec->tx_lock);
844 spin_lock_init(&nvec->rx_lock);
845 INIT_LIST_HEAD(&nvec->rx_data);
846 INIT_LIST_HEAD(&nvec->tx_data);
847 INIT_WORK(&nvec->rx_work, nvec_dispatch);
848 INIT_WORK(&nvec->tx_work, nvec_request_master);
849
850 err = devm_gpio_request_one(&pdev->dev, nvec->gpio, GPIOF_OUT_INIT_HIGH,
851 "nvec gpio");
852 if (err < 0) {
853 dev_err(nvec->dev, "couldn't request gpio\n");
854 return -ENODEV;
855 }
856
857 err = devm_request_irq(&pdev->dev, nvec->irq, nvec_interrupt, 0,
858 "nvec", nvec);
859 if (err) {
860 dev_err(nvec->dev, "couldn't request irq\n");
861 return -ENODEV;
862 }
863 disable_irq(nvec->irq);
864
865 tegra_init_i2c_slave(nvec);
866
867 clk_prepare_enable(i2c_clk);
868
869
870 /* enable event reporting */
871 nvec_toggle_global_events(nvec, true);
872
873 nvec->nvec_status_notifier.notifier_call = nvec_status_notifier;
874 nvec_register_notifier(nvec, &nvec->nvec_status_notifier, 0);
875
876 nvec_power_handle = nvec;
877 pm_power_off = nvec_power_off;
878
879 /* Get Firmware Version */
880 msg = nvec_write_sync(nvec, get_firmware_version, 2);
881
882 if (msg) {
883 dev_warn(nvec->dev, "ec firmware version %02x.%02x.%02x / %02x\n",
884 msg->data[4], msg->data[5], msg->data[6], msg->data[7]);
885
886 nvec_msg_free(nvec, msg);
887 }
888
889 ret = mfd_add_devices(nvec->dev, -1, nvec_devices,
890 ARRAY_SIZE(nvec_devices), base, 0, NULL);
891 if (ret)
892 dev_err(nvec->dev, "error adding subdevices\n");
893
894 /* unmute speakers? */
895 nvec_write_async(nvec, unmute_speakers, 4);
896
897 /* enable lid switch event */
898 nvec_event_mask(enable_event, LID_SWITCH);
899 nvec_write_async(nvec, enable_event, 7);
900
901 /* enable power button event */
902 nvec_event_mask(enable_event, PWR_BUTTON);
903 nvec_write_async(nvec, enable_event, 7);
904
905 return 0;
906}
907
908static int tegra_nvec_remove(struct platform_device *pdev)
909{
910 struct nvec_chip *nvec = platform_get_drvdata(pdev);
911
912 nvec_toggle_global_events(nvec, false);
913 mfd_remove_devices(nvec->dev);
914 nvec_unregister_notifier(nvec, &nvec->nvec_status_notifier);
915 cancel_work_sync(&nvec->rx_work);
916 cancel_work_sync(&nvec->tx_work);
917 /* FIXME: needs check wether nvec is responsible for power off */
918 pm_power_off = NULL;
919
920 return 0;
921}
922
923#ifdef CONFIG_PM_SLEEP
924static int nvec_suspend(struct device *dev)
925{
926 struct platform_device *pdev = to_platform_device(dev);
927 struct nvec_chip *nvec = platform_get_drvdata(pdev);
928 struct nvec_msg *msg;
929 char ap_suspend[] = { NVEC_SLEEP, AP_SUSPEND };
930
931 dev_dbg(nvec->dev, "suspending\n");
932
933 /* keep these sync or you'll break suspend */
934 nvec_toggle_global_events(nvec, false);
935
936 msg = nvec_write_sync(nvec, ap_suspend, sizeof(ap_suspend));
937 nvec_msg_free(nvec, msg);
938
939 nvec_disable_i2c_slave(nvec);
940
941 return 0;
942}
943
944static int nvec_resume(struct device *dev)
945{
946 struct platform_device *pdev = to_platform_device(dev);
947 struct nvec_chip *nvec = platform_get_drvdata(pdev);
948
949 dev_dbg(nvec->dev, "resuming\n");
950 tegra_init_i2c_slave(nvec);
951 nvec_toggle_global_events(nvec, true);
952
953 return 0;
954}
955#endif
956
957static const SIMPLE_DEV_PM_OPS(nvec_pm_ops, nvec_suspend, nvec_resume);
958
959/* Match table for of_platform binding */
960static const struct of_device_id nvidia_nvec_of_match[] = {
961 { .compatible = "nvidia,nvec", },
962 {},
963};
964MODULE_DEVICE_TABLE(of, nvidia_nvec_of_match);
965
966static struct platform_driver nvec_device_driver = {
967 .probe = tegra_nvec_probe,
968 .remove = tegra_nvec_remove,
969 .driver = {
970 .name = "nvec",
971 .owner = THIS_MODULE,
972 .pm = &nvec_pm_ops,
973 .of_match_table = nvidia_nvec_of_match,
974 }
975};
976
977module_platform_driver(nvec_device_driver);
978
979MODULE_ALIAS("platform:nvec");
980MODULE_DESCRIPTION("NVIDIA compliant embedded controller interface");
981MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
982MODULE_LICENSE("GPL");