Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * NVEC: NVIDIA compliant embedded controller interface
3 *
4 * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net>
5 *
6 * Authors: Pierre-Hugues Husson <phhusson@free.fr>
7 * Ilya Petrov <ilya.muromec@gmail.com>
8 * Marc Dietrich <marvin24@gmx.de>
9 * Julian Andres Klode <jak@jak-linux.org>
10 *
11 * This file is subject to the terms and conditions of the GNU General Public
12 * License. See the file "COPYING" in the main directory of this archive
13 * for more details.
14 *
15 */
16
17/* #define DEBUG */
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/atomic.h>
22#include <linux/clk.h>
23#include <linux/completion.h>
24#include <linux/delay.h>
25#include <linux/err.h>
26#include <linux/gpio.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
29#include <linux/irq.h>
30#include <linux/list.h>
31#include <linux/mfd/core.h>
32#include <linux/mutex.h>
33#include <linux/notifier.h>
34#include <linux/platform_device.h>
35#include <linux/slab.h>
36#include <linux/spinlock.h>
37#include <linux/workqueue.h>
38
39#include <mach/clk.h>
40#include <mach/iomap.h>
41
42#include "nvec.h"
43
44#define I2C_CNFG 0x00
45#define I2C_CNFG_PACKET_MODE_EN (1<<10)
46#define I2C_CNFG_NEW_MASTER_SFM (1<<11)
47#define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12
48
49#define I2C_SL_CNFG 0x20
50#define I2C_SL_NEWL (1<<2)
51#define I2C_SL_NACK (1<<1)
52#define I2C_SL_RESP (1<<0)
53#define I2C_SL_IRQ (1<<3)
54#define END_TRANS (1<<4)
55#define RCVD (1<<2)
56#define RNW (1<<1)
57
58#define I2C_SL_RCVD 0x24
59#define I2C_SL_STATUS 0x28
60#define I2C_SL_ADDR1 0x2c
61#define I2C_SL_ADDR2 0x30
62#define I2C_SL_DELAY_COUNT 0x3c
63
64/**
65 * enum nvec_msg_category - Message categories for nvec_msg_alloc()
66 * @NVEC_MSG_RX: The message is an incoming message (from EC)
67 * @NVEC_MSG_TX: The message is an outgoing message (to EC)
68 */
69enum nvec_msg_category {
70 NVEC_MSG_RX,
71 NVEC_MSG_TX,
72};
73
74static const unsigned char EC_DISABLE_EVENT_REPORTING[3] = "\x04\x00\x00";
75static const unsigned char EC_ENABLE_EVENT_REPORTING[3] = "\x04\x00\x01";
76static const unsigned char EC_GET_FIRMWARE_VERSION[2] = "\x07\x15";
77
78static struct nvec_chip *nvec_power_handle;
79
80static struct mfd_cell nvec_devices[] = {
81 {
82 .name = "nvec-kbd",
83 .id = 1,
84 },
85 {
86 .name = "nvec-mouse",
87 .id = 1,
88 },
89 {
90 .name = "nvec-power",
91 .id = 1,
92 },
93 {
94 .name = "nvec-power",
95 .id = 2,
96 },
97 {
98 .name = "nvec-leds",
99 .id = 1,
100 },
101};
102
103/**
104 * nvec_register_notifier - Register a notifier with nvec
105 * @nvec: A &struct nvec_chip
106 * @nb: The notifier block to register
107 *
108 * Registers a notifier with @nvec. The notifier will be added to an atomic
109 * notifier chain that is called for all received messages except those that
110 * correspond to a request initiated by nvec_write_sync().
111 */
112int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb,
113 unsigned int events)
114{
115 return atomic_notifier_chain_register(&nvec->notifier_list, nb);
116}
117EXPORT_SYMBOL_GPL(nvec_register_notifier);
118
119/**
120 * nvec_status_notifier - The final notifier
121 *
122 * Prints a message about control events not handled in the notifier
123 * chain.
124 */
125static int nvec_status_notifier(struct notifier_block *nb,
126 unsigned long event_type, void *data)
127{
128 unsigned char *msg = (unsigned char *)data;
129
130 if (event_type != NVEC_CNTL)
131 return NOTIFY_DONE;
132
133 printk(KERN_WARNING "unhandled msg type %ld\n", event_type);
134 print_hex_dump(KERN_WARNING, "payload: ", DUMP_PREFIX_NONE, 16, 1,
135 msg, msg[1] + 2, true);
136
137 return NOTIFY_OK;
138}
139
140/**
141 * nvec_msg_alloc:
142 * @nvec: A &struct nvec_chip
143 * @category: Pool category, see &enum nvec_msg_category
144 *
145 * Allocate a single &struct nvec_msg object from the message pool of
146 * @nvec. The result shall be passed to nvec_msg_free() if no longer
147 * used.
148 *
149 * Outgoing messages are placed in the upper 75% of the pool, keeping the
150 * lower 25% available for RX buffers only. The reason is to prevent a
151 * situation where all buffers are full and a message is thus endlessly
152 * retried because the response could never be processed.
153 */
154static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec,
155 enum nvec_msg_category category)
156{
157 int i = (category == NVEC_MSG_TX) ? (NVEC_POOL_SIZE / 4) : 0;
158
159 for (; i < NVEC_POOL_SIZE; i++) {
160 if (atomic_xchg(&nvec->msg_pool[i].used, 1) == 0) {
161 dev_vdbg(nvec->dev, "INFO: Allocate %i\n", i);
162 return &nvec->msg_pool[i];
163 }
164 }
165
166 dev_err(nvec->dev, "could not allocate %s buffer\n",
167 (category == NVEC_MSG_TX) ? "TX" : "RX");
168
169 return NULL;
170}
171
172/**
173 * nvec_msg_free:
174 * @nvec: A &struct nvec_chip
175 * @msg: A message (must be allocated by nvec_msg_alloc() and belong to @nvec)
176 *
177 * Free the given message
178 */
179inline void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg)
180{
181 if (msg != &nvec->tx_scratch)
182 dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool);
183 atomic_set(&msg->used, 0);
184}
185EXPORT_SYMBOL_GPL(nvec_msg_free);
186
187/**
188 * nvec_msg_is_event - Return %true if @msg is an event
189 * @msg: A message
190 */
191static bool nvec_msg_is_event(struct nvec_msg *msg)
192{
193 return msg->data[0] >> 7;
194}
195
196/**
197 * nvec_msg_size - Get the size of a message
198 * @msg: The message to get the size for
199 *
200 * This only works for received messages, not for outgoing messages.
201 */
202static size_t nvec_msg_size(struct nvec_msg *msg)
203{
204 bool is_event = nvec_msg_is_event(msg);
205 int event_length = (msg->data[0] & 0x60) >> 5;
206
207 /* for variable size, payload size in byte 1 + count (1) + cmd (1) */
208 if (!is_event || event_length == NVEC_VAR_SIZE)
209 return (msg->pos || msg->size) ? (msg->data[1] + 2) : 0;
210 else if (event_length == NVEC_2BYTES)
211 return 2;
212 else if (event_length == NVEC_3BYTES)
213 return 3;
214 else
215 return 0;
216}
217
218/**
219 * nvec_gpio_set_value - Set the GPIO value
220 * @nvec: A &struct nvec_chip
221 * @value: The value to write (0 or 1)
222 *
223 * Like gpio_set_value(), but generating debugging information
224 */
225static void nvec_gpio_set_value(struct nvec_chip *nvec, int value)
226{
227 dev_dbg(nvec->dev, "GPIO changed from %u to %u\n",
228 gpio_get_value(nvec->gpio), value);
229 gpio_set_value(nvec->gpio, value);
230}
231
232/**
233 * nvec_write_async - Asynchronously write a message to NVEC
234 * @nvec: An nvec_chip instance
235 * @data: The message data, starting with the request type
236 * @size: The size of @data
237 *
238 * Queue a single message to be transferred to the embedded controller
239 * and return immediately.
240 *
241 * Returns: 0 on success, a negative error code on failure. If a failure
242 * occured, the nvec driver may print an error.
243 */
244int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
245 short size)
246{
247 struct nvec_msg *msg;
248 unsigned long flags;
249
250 msg = nvec_msg_alloc(nvec, NVEC_MSG_TX);
251
252 if (msg == NULL)
253 return -ENOMEM;
254
255 msg->data[0] = size;
256 memcpy(msg->data + 1, data, size);
257 msg->size = size + 1;
258
259 spin_lock_irqsave(&nvec->tx_lock, flags);
260 list_add_tail(&msg->node, &nvec->tx_data);
261 spin_unlock_irqrestore(&nvec->tx_lock, flags);
262
263 queue_work(nvec->wq, &nvec->tx_work);
264
265 return 0;
266}
267EXPORT_SYMBOL(nvec_write_async);
268
269/**
270 * nvec_write_sync - Write a message to nvec and read the response
271 * @nvec: An &struct nvec_chip
272 * @data: The data to write
273 * @size: The size of @data
274 *
275 * This is similar to nvec_write_async(), but waits for the
276 * request to be answered before returning. This function
277 * uses a mutex and can thus not be called from e.g.
278 * interrupt handlers.
279 *
280 * Returns: A pointer to the response message on success,
281 * %NULL on failure. Free with nvec_msg_free() once no longer
282 * used.
283 */
284struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec,
285 const unsigned char *data, short size)
286{
287 struct nvec_msg *msg;
288
289 mutex_lock(&nvec->sync_write_mutex);
290
291 nvec->sync_write_pending = (data[1] << 8) + data[0];
292
293 if (nvec_write_async(nvec, data, size) < 0)
294 return NULL;
295
296 dev_dbg(nvec->dev, "nvec_sync_write: 0x%04x\n",
297 nvec->sync_write_pending);
298 if (!(wait_for_completion_timeout(&nvec->sync_write,
299 msecs_to_jiffies(2000)))) {
300 dev_warn(nvec->dev, "timeout waiting for sync write to complete\n");
301 mutex_unlock(&nvec->sync_write_mutex);
302 return NULL;
303 }
304
305 dev_dbg(nvec->dev, "nvec_sync_write: pong!\n");
306
307 msg = nvec->last_sync_msg;
308
309 mutex_unlock(&nvec->sync_write_mutex);
310
311 return msg;
312}
313EXPORT_SYMBOL(nvec_write_sync);
314
315/**
316 * nvec_request_master - Process outgoing messages
317 * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
318 *
319 * Processes all outgoing requests by sending the request and awaiting the
320 * response, then continuing with the next request. Once a request has a
321 * matching response, it will be freed and removed from the list.
322 */
323static void nvec_request_master(struct work_struct *work)
324{
325 struct nvec_chip *nvec = container_of(work, struct nvec_chip, tx_work);
326 unsigned long flags;
327 long err;
328 struct nvec_msg *msg;
329
330 spin_lock_irqsave(&nvec->tx_lock, flags);
331 while (!list_empty(&nvec->tx_data)) {
332 msg = list_first_entry(&nvec->tx_data, struct nvec_msg, node);
333 spin_unlock_irqrestore(&nvec->tx_lock, flags);
334 nvec_gpio_set_value(nvec, 0);
335 err = wait_for_completion_interruptible_timeout(
336 &nvec->ec_transfer, msecs_to_jiffies(5000));
337
338 if (err == 0) {
339 dev_warn(nvec->dev, "timeout waiting for ec transfer\n");
340 nvec_gpio_set_value(nvec, 1);
341 msg->pos = 0;
342 }
343
344 spin_lock_irqsave(&nvec->tx_lock, flags);
345
346 if (err > 0) {
347 list_del_init(&msg->node);
348 nvec_msg_free(nvec, msg);
349 }
350 }
351 spin_unlock_irqrestore(&nvec->tx_lock, flags);
352}
353
354/**
355 * parse_msg - Print some information and call the notifiers on an RX message
356 * @nvec: A &struct nvec_chip
357 * @msg: A message received by @nvec
358 *
359 * Paarse some pieces of the message and then call the chain of notifiers
360 * registered via nvec_register_notifier.
361 */
362static int parse_msg(struct nvec_chip *nvec, struct nvec_msg *msg)
363{
364 if ((msg->data[0] & 1 << 7) == 0 && msg->data[3]) {
365 dev_err(nvec->dev, "ec responded %02x %02x %02x %02x\n",
366 msg->data[0], msg->data[1], msg->data[2], msg->data[3]);
367 return -EINVAL;
368 }
369
370 if ((msg->data[0] >> 7) == 1 && (msg->data[0] & 0x0f) == 5)
371 print_hex_dump(KERN_WARNING, "ec system event ",
372 DUMP_PREFIX_NONE, 16, 1, msg->data,
373 msg->data[1] + 2, true);
374
375 atomic_notifier_call_chain(&nvec->notifier_list, msg->data[0] & 0x8f,
376 msg->data);
377
378 return 0;
379}
380
381/**
382 * nvec_dispatch - Process messages received from the EC
383 * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
384 *
385 * Process messages previously received from the EC and put into the RX
386 * queue of the &struct nvec_chip instance associated with @work.
387 */
388static void nvec_dispatch(struct work_struct *work)
389{
390 struct nvec_chip *nvec = container_of(work, struct nvec_chip, rx_work);
391 unsigned long flags;
392 struct nvec_msg *msg;
393
394 spin_lock_irqsave(&nvec->rx_lock, flags);
395 while (!list_empty(&nvec->rx_data)) {
396 msg = list_first_entry(&nvec->rx_data, struct nvec_msg, node);
397 list_del_init(&msg->node);
398 spin_unlock_irqrestore(&nvec->rx_lock, flags);
399
400 if (nvec->sync_write_pending ==
401 (msg->data[2] << 8) + msg->data[0]) {
402 dev_dbg(nvec->dev, "sync write completed!\n");
403 nvec->sync_write_pending = 0;
404 nvec->last_sync_msg = msg;
405 complete(&nvec->sync_write);
406 } else {
407 parse_msg(nvec, msg);
408 nvec_msg_free(nvec, msg);
409 }
410 spin_lock_irqsave(&nvec->rx_lock, flags);
411 }
412 spin_unlock_irqrestore(&nvec->rx_lock, flags);
413}
414
415/**
416 * nvec_tx_completed - Complete the current transfer
417 * @nvec: A &struct nvec_chip
418 *
419 * This is called when we have received an END_TRANS on a TX transfer.
420 */
421static void nvec_tx_completed(struct nvec_chip *nvec)
422{
423 /* We got an END_TRANS, let's skip this, maybe there's an event */
424 if (nvec->tx->pos != nvec->tx->size) {
425 dev_err(nvec->dev, "premature END_TRANS, resending\n");
426 nvec->tx->pos = 0;
427 nvec_gpio_set_value(nvec, 0);
428 } else {
429 nvec->state = 0;
430 }
431}
432
433/**
434 * nvec_rx_completed - Complete the current transfer
435 * @nvec: A &struct nvec_chip
436 *
437 * This is called when we have received an END_TRANS on a RX transfer.
438 */
439static void nvec_rx_completed(struct nvec_chip *nvec)
440{
441 if (nvec->rx->pos != nvec_msg_size(nvec->rx)) {
442 dev_err(nvec->dev, "RX incomplete: Expected %u bytes, got %u\n",
443 (uint) nvec_msg_size(nvec->rx),
444 (uint) nvec->rx->pos);
445
446 nvec_msg_free(nvec, nvec->rx);
447 nvec->state = 0;
448
449 /* Battery quirk - Often incomplete, and likes to crash */
450 if (nvec->rx->data[0] == NVEC_BAT)
451 complete(&nvec->ec_transfer);
452
453 return;
454 }
455
456 spin_lock(&nvec->rx_lock);
457
458 /* add the received data to the work list
459 and move the ring buffer pointer to the next entry */
460 list_add_tail(&nvec->rx->node, &nvec->rx_data);
461
462 spin_unlock(&nvec->rx_lock);
463
464 nvec->state = 0;
465
466 if (!nvec_msg_is_event(nvec->rx))
467 complete(&nvec->ec_transfer);
468
469 queue_work(nvec->wq, &nvec->rx_work);
470}
471
472/**
473 * nvec_invalid_flags - Send an error message about invalid flags and jump
474 * @nvec: The nvec device
475 * @status: The status flags
476 * @reset: Whether we shall jump to state 0.
477 */
478static void nvec_invalid_flags(struct nvec_chip *nvec, unsigned int status,
479 bool reset)
480{
481 dev_err(nvec->dev, "unexpected status flags 0x%02x during state %i\n",
482 status, nvec->state);
483 if (reset)
484 nvec->state = 0;
485}
486
487/**
488 * nvec_tx_set - Set the message to transfer (nvec->tx)
489 * @nvec: A &struct nvec_chip
490 *
491 * Gets the first entry from the tx_data list of @nvec and sets the
492 * tx member to it. If the tx_data list is empty, this uses the
493 * tx_scratch message to send a no operation message.
494 */
495static void nvec_tx_set(struct nvec_chip *nvec)
496{
497 spin_lock(&nvec->tx_lock);
498 if (list_empty(&nvec->tx_data)) {
499 dev_err(nvec->dev, "empty tx - sending no-op\n");
500 memcpy(nvec->tx_scratch.data, "\x02\x07\x02", 3);
501 nvec->tx_scratch.size = 3;
502 nvec->tx_scratch.pos = 0;
503 nvec->tx = &nvec->tx_scratch;
504 list_add_tail(&nvec->tx->node, &nvec->tx_data);
505 } else {
506 nvec->tx = list_first_entry(&nvec->tx_data, struct nvec_msg,
507 node);
508 nvec->tx->pos = 0;
509 }
510 spin_unlock(&nvec->tx_lock);
511
512 dev_dbg(nvec->dev, "Sending message of length %u, command 0x%x\n",
513 (uint)nvec->tx->size, nvec->tx->data[1]);
514}
515
516/**
517 * nvec_interrupt - Interrupt handler
518 * @irq: The IRQ
519 * @dev: The nvec device
520 *
521 * Interrupt handler that fills our RX buffers and empties our TX
522 * buffers. This uses a finite state machine with ridiculous amounts
523 * of error checking, in order to be fairly reliable.
524 */
525static irqreturn_t nvec_interrupt(int irq, void *dev)
526{
527 unsigned long status;
528 unsigned int received = 0;
529 unsigned char to_send = 0xff;
530 const unsigned long irq_mask = I2C_SL_IRQ | END_TRANS | RCVD | RNW;
531 struct nvec_chip *nvec = dev;
532 unsigned int state = nvec->state;
533
534 status = readl(nvec->base + I2C_SL_STATUS);
535
536 /* Filter out some errors */
537 if ((status & irq_mask) == 0 && (status & ~irq_mask) != 0) {
538 dev_err(nvec->dev, "unexpected irq mask %lx\n", status);
539 return IRQ_HANDLED;
540 }
541 if ((status & I2C_SL_IRQ) == 0) {
542 dev_err(nvec->dev, "Spurious IRQ\n");
543 return IRQ_HANDLED;
544 }
545
546 /* The EC did not request a read, so it send us something, read it */
547 if ((status & RNW) == 0) {
548 received = readl(nvec->base + I2C_SL_RCVD);
549 if (status & RCVD)
550 writel(0, nvec->base + I2C_SL_RCVD);
551 }
552
553 if (status == (I2C_SL_IRQ | RCVD))
554 nvec->state = 0;
555
556 switch (nvec->state) {
557 case 0: /* Verify that its a transfer start, the rest later */
558 if (status != (I2C_SL_IRQ | RCVD))
559 nvec_invalid_flags(nvec, status, false);
560 break;
561 case 1: /* command byte */
562 if (status != I2C_SL_IRQ) {
563 nvec_invalid_flags(nvec, status, true);
564 } else {
565 nvec->rx = nvec_msg_alloc(nvec, NVEC_MSG_RX);
566 /* Should not happen in a normal world */
567 if (unlikely(nvec->rx == NULL)) {
568 nvec->state = 0;
569 break;
570 }
571 nvec->rx->data[0] = received;
572 nvec->rx->pos = 1;
573 nvec->state = 2;
574 }
575 break;
576 case 2: /* first byte after command */
577 if (status == (I2C_SL_IRQ | RNW | RCVD)) {
578 udelay(33);
579 if (nvec->rx->data[0] != 0x01) {
580 dev_err(nvec->dev,
581 "Read without prior read command\n");
582 nvec->state = 0;
583 break;
584 }
585 nvec_msg_free(nvec, nvec->rx);
586 nvec->state = 3;
587 nvec_tx_set(nvec);
588 BUG_ON(nvec->tx->size < 1);
589 to_send = nvec->tx->data[0];
590 nvec->tx->pos = 1;
591 } else if (status == (I2C_SL_IRQ)) {
592 BUG_ON(nvec->rx == NULL);
593 nvec->rx->data[1] = received;
594 nvec->rx->pos = 2;
595 nvec->state = 4;
596 } else {
597 nvec_invalid_flags(nvec, status, true);
598 }
599 break;
600 case 3: /* EC does a block read, we transmit data */
601 if (status & END_TRANS) {
602 nvec_tx_completed(nvec);
603 } else if ((status & RNW) == 0 || (status & RCVD)) {
604 nvec_invalid_flags(nvec, status, true);
605 } else if (nvec->tx && nvec->tx->pos < nvec->tx->size) {
606 to_send = nvec->tx->data[nvec->tx->pos++];
607 } else {
608 dev_err(nvec->dev, "tx buffer underflow on %p (%u > %u)\n",
609 nvec->tx,
610 (uint) (nvec->tx ? nvec->tx->pos : 0),
611 (uint) (nvec->tx ? nvec->tx->size : 0));
612 nvec->state = 0;
613 }
614 break;
615 case 4: /* EC does some write, we read the data */
616 if ((status & (END_TRANS | RNW)) == END_TRANS)
617 nvec_rx_completed(nvec);
618 else if (status & (RNW | RCVD))
619 nvec_invalid_flags(nvec, status, true);
620 else if (nvec->rx && nvec->rx->pos < NVEC_MSG_SIZE)
621 nvec->rx->data[nvec->rx->pos++] = received;
622 else
623 dev_err(nvec->dev,
624 "RX buffer overflow on %p: "
625 "Trying to write byte %u of %u\n",
626 nvec->rx, nvec->rx->pos, NVEC_MSG_SIZE);
627 break;
628 default:
629 nvec->state = 0;
630 }
631
632 /* If we are told that a new transfer starts, verify it */
633 if ((status & (RCVD | RNW)) == RCVD) {
634 if (received != nvec->i2c_addr)
635 dev_err(nvec->dev,
636 "received address 0x%02x, expected 0x%02x\n",
637 received, nvec->i2c_addr);
638 nvec->state = 1;
639 }
640
641 /* Send data if requested, but not on end of transmission */
642 if ((status & (RNW | END_TRANS)) == RNW)
643 writel(to_send, nvec->base + I2C_SL_RCVD);
644
645 /* If we have send the first byte */
646 if (status == (I2C_SL_IRQ | RNW | RCVD))
647 nvec_gpio_set_value(nvec, 1);
648
649 dev_dbg(nvec->dev,
650 "Handled: %s 0x%02x, %s 0x%02x in state %u [%s%s%s]\n",
651 (status & RNW) == 0 ? "received" : "R=",
652 received,
653 (status & (RNW | END_TRANS)) ? "sent" : "S=",
654 to_send,
655 state,
656 status & END_TRANS ? " END_TRANS" : "",
657 status & RCVD ? " RCVD" : "",
658 status & RNW ? " RNW" : "");
659
660
661 /*
662 * TODO: A correct fix needs to be found for this.
663 *
664 * We experience less incomplete messages with this delay than without
665 * it, but we don't know why. Help is appreciated.
666 */
667 udelay(100);
668
669 return IRQ_HANDLED;
670}
671
672static void tegra_init_i2c_slave(struct nvec_chip *nvec)
673{
674 u32 val;
675
676 clk_enable(nvec->i2c_clk);
677
678 tegra_periph_reset_assert(nvec->i2c_clk);
679 udelay(2);
680 tegra_periph_reset_deassert(nvec->i2c_clk);
681
682 val = I2C_CNFG_NEW_MASTER_SFM | I2C_CNFG_PACKET_MODE_EN |
683 (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
684 writel(val, nvec->base + I2C_CNFG);
685
686 clk_set_rate(nvec->i2c_clk, 8 * 80000);
687
688 writel(I2C_SL_NEWL, nvec->base + I2C_SL_CNFG);
689 writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT);
690
691 writel(nvec->i2c_addr>>1, nvec->base + I2C_SL_ADDR1);
692 writel(0, nvec->base + I2C_SL_ADDR2);
693
694 enable_irq(nvec->irq);
695
696 clk_disable(nvec->i2c_clk);
697}
698
699static void nvec_disable_i2c_slave(struct nvec_chip *nvec)
700{
701 disable_irq(nvec->irq);
702 writel(I2C_SL_NEWL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
703 clk_disable(nvec->i2c_clk);
704}
705
706static void nvec_power_off(void)
707{
708 nvec_write_async(nvec_power_handle, EC_DISABLE_EVENT_REPORTING, 3);
709 nvec_write_async(nvec_power_handle, "\x04\x01", 2);
710}
711
712static int __devinit tegra_nvec_probe(struct platform_device *pdev)
713{
714 int err, ret;
715 struct clk *i2c_clk;
716 struct nvec_platform_data *pdata = pdev->dev.platform_data;
717 struct nvec_chip *nvec;
718 struct nvec_msg *msg;
719 struct resource *res;
720 struct resource *iomem;
721 void __iomem *base;
722
723 nvec = kzalloc(sizeof(struct nvec_chip), GFP_KERNEL);
724 if (nvec == NULL) {
725 dev_err(&pdev->dev, "failed to reserve memory\n");
726 return -ENOMEM;
727 }
728 platform_set_drvdata(pdev, nvec);
729 nvec->dev = &pdev->dev;
730 nvec->gpio = pdata->gpio;
731 nvec->i2c_addr = pdata->i2c_addr;
732
733 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
734 if (!res) {
735 dev_err(&pdev->dev, "no mem resource?\n");
736 return -ENODEV;
737 }
738
739 iomem = request_mem_region(res->start, resource_size(res), pdev->name);
740 if (!iomem) {
741 dev_err(&pdev->dev, "I2C region already claimed\n");
742 return -EBUSY;
743 }
744
745 base = ioremap(iomem->start, resource_size(iomem));
746 if (!base) {
747 dev_err(&pdev->dev, "Can't ioremap I2C region\n");
748 return -ENOMEM;
749 }
750
751 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
752 if (!res) {
753 dev_err(&pdev->dev, "no irq resource?\n");
754 ret = -ENODEV;
755 goto err_iounmap;
756 }
757
758 i2c_clk = clk_get_sys("tegra-i2c.2", NULL);
759 if (IS_ERR(i2c_clk)) {
760 dev_err(nvec->dev, "failed to get controller clock\n");
761 goto err_iounmap;
762 }
763
764 nvec->base = base;
765 nvec->irq = res->start;
766 nvec->i2c_clk = i2c_clk;
767 nvec->rx = &nvec->msg_pool[0];
768
769 /* Set the gpio to low when we've got something to say */
770 err = gpio_request(nvec->gpio, "nvec gpio");
771 if (err < 0)
772 dev_err(nvec->dev, "couldn't request gpio\n");
773
774 ATOMIC_INIT_NOTIFIER_HEAD(&nvec->notifier_list);
775
776 init_completion(&nvec->sync_write);
777 init_completion(&nvec->ec_transfer);
778 mutex_init(&nvec->sync_write_mutex);
779 spin_lock_init(&nvec->tx_lock);
780 spin_lock_init(&nvec->rx_lock);
781 INIT_LIST_HEAD(&nvec->rx_data);
782 INIT_LIST_HEAD(&nvec->tx_data);
783 INIT_WORK(&nvec->rx_work, nvec_dispatch);
784 INIT_WORK(&nvec->tx_work, nvec_request_master);
785 nvec->wq = alloc_workqueue("nvec", WQ_NON_REENTRANT, 2);
786
787 err = request_irq(nvec->irq, nvec_interrupt, 0, "nvec", nvec);
788 if (err) {
789 dev_err(nvec->dev, "couldn't request irq\n");
790 goto failed;
791 }
792 disable_irq(nvec->irq);
793
794 tegra_init_i2c_slave(nvec);
795
796 clk_enable(i2c_clk);
797
798 gpio_direction_output(nvec->gpio, 1);
799 gpio_set_value(nvec->gpio, 1);
800
801 /* enable event reporting */
802 nvec_write_async(nvec, EC_ENABLE_EVENT_REPORTING,
803 sizeof(EC_ENABLE_EVENT_REPORTING));
804
805 nvec->nvec_status_notifier.notifier_call = nvec_status_notifier;
806 nvec_register_notifier(nvec, &nvec->nvec_status_notifier, 0);
807
808 nvec_power_handle = nvec;
809 pm_power_off = nvec_power_off;
810
811 /* Get Firmware Version */
812 msg = nvec_write_sync(nvec, EC_GET_FIRMWARE_VERSION,
813 sizeof(EC_GET_FIRMWARE_VERSION));
814
815 if (msg) {
816 dev_warn(nvec->dev, "ec firmware version %02x.%02x.%02x / %02x\n",
817 msg->data[4], msg->data[5], msg->data[6], msg->data[7]);
818
819 nvec_msg_free(nvec, msg);
820 }
821
822 ret = mfd_add_devices(nvec->dev, -1, nvec_devices,
823 ARRAY_SIZE(nvec_devices), base, 0);
824 if (ret)
825 dev_err(nvec->dev, "error adding subdevices\n");
826
827 /* unmute speakers? */
828 nvec_write_async(nvec, "\x0d\x10\x59\x95", 4);
829
830 /* enable lid switch event */
831 nvec_write_async(nvec, "\x01\x01\x01\x00\x00\x02\x00", 7);
832
833 /* enable power button event */
834 nvec_write_async(nvec, "\x01\x01\x01\x00\x00\x80\x00", 7);
835
836 return 0;
837
838err_iounmap:
839 iounmap(base);
840failed:
841 kfree(nvec);
842 return -ENOMEM;
843}
844
845static int __devexit tegra_nvec_remove(struct platform_device *pdev)
846{
847 struct nvec_chip *nvec = platform_get_drvdata(pdev);
848
849 nvec_write_async(nvec, EC_DISABLE_EVENT_REPORTING, 3);
850 mfd_remove_devices(nvec->dev);
851 free_irq(nvec->irq, &nvec_interrupt);
852 iounmap(nvec->base);
853 gpio_free(nvec->gpio);
854 destroy_workqueue(nvec->wq);
855 kfree(nvec);
856
857 return 0;
858}
859
860#ifdef CONFIG_PM
861
862static int tegra_nvec_suspend(struct platform_device *pdev, pm_message_t state)
863{
864 struct nvec_chip *nvec = platform_get_drvdata(pdev);
865 struct nvec_msg *msg;
866
867 dev_dbg(nvec->dev, "suspending\n");
868
869 /* keep these sync or you'll break suspend */
870 msg = nvec_write_sync(nvec, EC_DISABLE_EVENT_REPORTING, 3);
871 nvec_msg_free(nvec, msg);
872 msg = nvec_write_sync(nvec, "\x04\x02", 2);
873 nvec_msg_free(nvec, msg);
874
875 nvec_disable_i2c_slave(nvec);
876
877 return 0;
878}
879
880static int tegra_nvec_resume(struct platform_device *pdev)
881{
882 struct nvec_chip *nvec = platform_get_drvdata(pdev);
883
884 dev_dbg(nvec->dev, "resuming\n");
885 tegra_init_i2c_slave(nvec);
886 nvec_write_async(nvec, EC_ENABLE_EVENT_REPORTING, 3);
887
888 return 0;
889}
890
891#else
892#define tegra_nvec_suspend NULL
893#define tegra_nvec_resume NULL
894#endif
895
896static struct platform_driver nvec_device_driver = {
897 .probe = tegra_nvec_probe,
898 .remove = __devexit_p(tegra_nvec_remove),
899 .suspend = tegra_nvec_suspend,
900 .resume = tegra_nvec_resume,
901 .driver = {
902 .name = "nvec",
903 .owner = THIS_MODULE,
904 }
905};
906
907static int __init tegra_nvec_init(void)
908{
909 return platform_driver_register(&nvec_device_driver);
910}
911
912module_init(tegra_nvec_init);
913
914MODULE_ALIAS("platform:nvec");
915MODULE_DESCRIPTION("NVIDIA compliant embedded controller interface");
916MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
917MODULE_LICENSE("GPL");