Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * NVEC: NVIDIA compliant embedded controller interface
4 *
5 * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net>
6 *
7 * Authors: Pierre-Hugues Husson <phhusson@free.fr>
8 * Ilya Petrov <ilya.muromec@gmail.com>
9 * Marc Dietrich <marvin24@gmx.de>
10 * Julian Andres Klode <jak@jak-linux.org>
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/atomic.h>
16#include <linux/clk.h>
17#include <linux/completion.h>
18#include <linux/delay.h>
19#include <linux/err.h>
20#include <linux/gpio/consumer.h>
21#include <linux/interrupt.h>
22#include <linux/io.h>
23#include <linux/irq.h>
24#include <linux/of.h>
25#include <linux/list.h>
26#include <linux/mfd/core.h>
27#include <linux/mutex.h>
28#include <linux/notifier.h>
29#include <linux/slab.h>
30#include <linux/spinlock.h>
31#include <linux/workqueue.h>
32
33#include "nvec.h"
34
35#define I2C_CNFG 0x00
36#define I2C_CNFG_PACKET_MODE_EN BIT(10)
37#define I2C_CNFG_NEW_MASTER_SFM BIT(11)
38#define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12
39
40#define I2C_SL_CNFG 0x20
41#define I2C_SL_NEWSL BIT(2)
42#define I2C_SL_NACK BIT(1)
43#define I2C_SL_RESP BIT(0)
44#define I2C_SL_IRQ BIT(3)
45#define END_TRANS BIT(4)
46#define RCVD BIT(2)
47#define RNW BIT(1)
48
49#define I2C_SL_RCVD 0x24
50#define I2C_SL_STATUS 0x28
51#define I2C_SL_ADDR1 0x2c
52#define I2C_SL_ADDR2 0x30
53#define I2C_SL_DELAY_COUNT 0x3c
54
55/**
56 * enum nvec_msg_category - Message categories for nvec_msg_alloc()
57 * @NVEC_MSG_RX: The message is an incoming message (from EC)
58 * @NVEC_MSG_TX: The message is an outgoing message (to EC)
59 */
60enum nvec_msg_category {
61 NVEC_MSG_RX,
62 NVEC_MSG_TX,
63};
64
65enum nvec_sleep_subcmds {
66 GLOBAL_EVENTS,
67 AP_PWR_DOWN,
68 AP_SUSPEND,
69};
70
71#define CNF_EVENT_REPORTING 0x01
72#define GET_FIRMWARE_VERSION 0x15
73#define LID_SWITCH BIT(1)
74#define PWR_BUTTON BIT(15)
75
76static struct nvec_chip *nvec_power_handle;
77
78static const struct mfd_cell nvec_devices[] = {
79 {
80 .name = "nvec-kbd",
81 },
82 {
83 .name = "nvec-mouse",
84 },
85 {
86 .name = "nvec-power",
87 .id = 0,
88 },
89 {
90 .name = "nvec-power",
91 .id = 1,
92 },
93 {
94 .name = "nvec-paz00",
95 },
96};
97
98/**
99 * nvec_register_notifier - Register a notifier with nvec
100 * @nvec: A &struct nvec_chip
101 * @nb: The notifier block to register
102 * @events: Unused
103 *
104 * Registers a notifier with @nvec. The notifier will be added to an atomic
105 * notifier chain that is called for all received messages except those that
106 * correspond to a request initiated by nvec_write_sync().
107 */
108int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb,
109 unsigned int events)
110{
111 return atomic_notifier_chain_register(&nvec->notifier_list, nb);
112}
113EXPORT_SYMBOL_GPL(nvec_register_notifier);
114
115/**
116 * nvec_unregister_notifier - Unregister a notifier with nvec
117 * @nvec: A &struct nvec_chip
118 * @nb: The notifier block to unregister
119 *
120 * Unregisters a notifier with @nvec. The notifier will be removed from the
121 * atomic notifier chain.
122 */
123int nvec_unregister_notifier(struct nvec_chip *nvec, struct notifier_block *nb)
124{
125 return atomic_notifier_chain_unregister(&nvec->notifier_list, nb);
126}
127EXPORT_SYMBOL_GPL(nvec_unregister_notifier);
128
129/*
130 * nvec_status_notifier - The final notifier
131 *
132 * Prints a message about control events not handled in the notifier
133 * chain.
134 */
135static int nvec_status_notifier(struct notifier_block *nb,
136 unsigned long event_type, void *data)
137{
138 struct nvec_chip *nvec = container_of(nb, struct nvec_chip,
139 nvec_status_notifier);
140 unsigned char *msg = data;
141
142 if (event_type != NVEC_CNTL)
143 return NOTIFY_DONE;
144
145 dev_warn(nvec->dev, "unhandled msg type %ld\n", event_type);
146 print_hex_dump(KERN_WARNING, "payload: ", DUMP_PREFIX_NONE, 16, 1,
147 msg, msg[1] + 2, true);
148
149 return NOTIFY_OK;
150}
151
152/**
153 * nvec_msg_alloc:
154 * @nvec: A &struct nvec_chip
155 * @category: Pool category, see &enum nvec_msg_category
156 *
157 * Allocate a single &struct nvec_msg object from the message pool of
158 * @nvec. The result shall be passed to nvec_msg_free() if no longer
159 * used.
160 *
161 * Outgoing messages are placed in the upper 75% of the pool, keeping the
162 * lower 25% available for RX buffers only. The reason is to prevent a
163 * situation where all buffers are full and a message is thus endlessly
164 * retried because the response could never be processed.
165 */
166static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec,
167 enum nvec_msg_category category)
168{
169 int i = (category == NVEC_MSG_TX) ? (NVEC_POOL_SIZE / 4) : 0;
170
171 for (; i < NVEC_POOL_SIZE; i++) {
172 if (atomic_xchg(&nvec->msg_pool[i].used, 1) == 0) {
173 dev_vdbg(nvec->dev, "INFO: Allocate %i\n", i);
174 return &nvec->msg_pool[i];
175 }
176 }
177
178 dev_err(nvec->dev, "could not allocate %s buffer\n",
179 (category == NVEC_MSG_TX) ? "TX" : "RX");
180
181 return NULL;
182}
183
184/**
185 * nvec_msg_free:
186 * @nvec: A &struct nvec_chip
187 * @msg: A message (must be allocated by nvec_msg_alloc() and belong to @nvec)
188 *
189 * Free the given message
190 */
191void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg)
192{
193 if (msg != &nvec->tx_scratch)
194 dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool);
195 atomic_set(&msg->used, 0);
196}
197EXPORT_SYMBOL_GPL(nvec_msg_free);
198
199/**
200 * nvec_msg_is_event - Return %true if @msg is an event
201 * @msg: A message
202 */
203static bool nvec_msg_is_event(struct nvec_msg *msg)
204{
205 return msg->data[0] >> 7;
206}
207
208/**
209 * nvec_msg_size - Get the size of a message
210 * @msg: The message to get the size for
211 *
212 * This only works for received messages, not for outgoing messages.
213 */
214static size_t nvec_msg_size(struct nvec_msg *msg)
215{
216 bool is_event = nvec_msg_is_event(msg);
217 int event_length = (msg->data[0] & 0x60) >> 5;
218
219 /* for variable size, payload size in byte 1 + count (1) + cmd (1) */
220 if (!is_event || event_length == NVEC_VAR_SIZE)
221 return (msg->pos || msg->size) ? (msg->data[1] + 2) : 0;
222 else if (event_length == NVEC_2BYTES)
223 return 2;
224 else if (event_length == NVEC_3BYTES)
225 return 3;
226 return 0;
227}
228
229/**
230 * nvec_gpio_set_value - Set the GPIO value
231 * @nvec: A &struct nvec_chip
232 * @value: The value to write (0 or 1)
233 *
234 * Like gpio_set_value(), but generating debugging information
235 */
236static void nvec_gpio_set_value(struct nvec_chip *nvec, int value)
237{
238 dev_dbg(nvec->dev, "GPIO changed from %u to %u\n",
239 gpiod_get_value(nvec->gpiod), value);
240 gpiod_set_value(nvec->gpiod, value);
241}
242
243/**
244 * nvec_write_async - Asynchronously write a message to NVEC
245 * @nvec: An nvec_chip instance
246 * @data: The message data, starting with the request type
247 * @size: The size of @data
248 *
249 * Queue a single message to be transferred to the embedded controller
250 * and return immediately.
251 *
252 * Returns: 0 on success, a negative error code on failure. If a failure
253 * occurred, the nvec driver may print an error.
254 */
255int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
256 short size)
257{
258 struct nvec_msg *msg;
259 unsigned long flags;
260
261 msg = nvec_msg_alloc(nvec, NVEC_MSG_TX);
262
263 if (!msg)
264 return -ENOMEM;
265
266 msg->data[0] = size;
267 memcpy(msg->data + 1, data, size);
268 msg->size = size + 1;
269
270 spin_lock_irqsave(&nvec->tx_lock, flags);
271 list_add_tail(&msg->node, &nvec->tx_data);
272 spin_unlock_irqrestore(&nvec->tx_lock, flags);
273
274 schedule_work(&nvec->tx_work);
275
276 return 0;
277}
278EXPORT_SYMBOL(nvec_write_async);
279
280/**
281 * nvec_write_sync - Write a message to nvec and read the response
282 * @nvec: An &struct nvec_chip
283 * @data: The data to write
284 * @size: The size of @data
285 * @msg: The response message received
286 *
287 * This is similar to nvec_write_async(), but waits for the
288 * request to be answered before returning. This function
289 * uses a mutex and can thus not be called from e.g.
290 * interrupt handlers.
291 *
292 * Returns: 0 on success, a negative error code on failure.
293 * The response message is returned in @msg. Shall be freed
294 * with nvec_msg_free() once no longer used.
295 *
296 */
297int nvec_write_sync(struct nvec_chip *nvec,
298 const unsigned char *data, short size,
299 struct nvec_msg **msg)
300{
301 mutex_lock(&nvec->sync_write_mutex);
302
303 if (msg != NULL)
304 *msg = NULL;
305
306 nvec->sync_write_pending = (data[1] << 8) + data[0];
307
308 if (nvec_write_async(nvec, data, size) < 0) {
309 mutex_unlock(&nvec->sync_write_mutex);
310 return -ENOMEM;
311 }
312
313 dev_dbg(nvec->dev, "nvec_sync_write: 0x%04x\n",
314 nvec->sync_write_pending);
315 if (!(wait_for_completion_timeout(&nvec->sync_write,
316 msecs_to_jiffies(2000)))) {
317 dev_warn(nvec->dev,
318 "timeout waiting for sync write to complete\n");
319 mutex_unlock(&nvec->sync_write_mutex);
320 return -ETIMEDOUT;
321 }
322
323 dev_dbg(nvec->dev, "nvec_sync_write: pong!\n");
324
325 if (msg != NULL)
326 *msg = nvec->last_sync_msg;
327 else
328 nvec_msg_free(nvec, nvec->last_sync_msg);
329
330 mutex_unlock(&nvec->sync_write_mutex);
331
332 return 0;
333}
334EXPORT_SYMBOL(nvec_write_sync);
335
336/**
337 * nvec_toggle_global_events - enables or disables global event reporting
338 * @nvec: nvec handle
339 * @state: true for enable, false for disable
340 *
341 * This switches on/off global event reports by the embedded controller.
342 */
343static void nvec_toggle_global_events(struct nvec_chip *nvec, bool state)
344{
345 unsigned char global_events[] = { NVEC_SLEEP, GLOBAL_EVENTS, state };
346
347 nvec_write_async(nvec, global_events, 3);
348}
349
350/**
351 * nvec_event_mask - fill the command string with event bitfield
352 * @ev: points to event command string
353 * @mask: bit to insert into the event mask
354 *
355 * Configure event command expects a 32 bit bitfield which describes
356 * which events to enable. The bitfield has the following structure
357 * (from highest byte to lowest):
358 * system state bits 7-0
359 * system state bits 15-8
360 * oem system state bits 7-0
361 * oem system state bits 15-8
362 */
363static void nvec_event_mask(char *ev, u32 mask)
364{
365 ev[3] = mask >> 16 & 0xff;
366 ev[4] = mask >> 24 & 0xff;
367 ev[5] = mask >> 0 & 0xff;
368 ev[6] = mask >> 8 & 0xff;
369}
370
371/**
372 * nvec_request_master - Process outgoing messages
373 * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
374 *
375 * Processes all outgoing requests by sending the request and awaiting the
376 * response, then continuing with the next request. Once a request has a
377 * matching response, it will be freed and removed from the list.
378 */
379static void nvec_request_master(struct work_struct *work)
380{
381 struct nvec_chip *nvec = container_of(work, struct nvec_chip, tx_work);
382 unsigned long flags;
383 long err;
384 struct nvec_msg *msg;
385
386 spin_lock_irqsave(&nvec->tx_lock, flags);
387 while (!list_empty(&nvec->tx_data)) {
388 msg = list_first_entry(&nvec->tx_data, struct nvec_msg, node);
389 spin_unlock_irqrestore(&nvec->tx_lock, flags);
390 nvec_gpio_set_value(nvec, 0);
391 err = wait_for_completion_interruptible_timeout(&nvec->ec_transfer,
392 msecs_to_jiffies(5000));
393
394 if (err == 0) {
395 dev_warn(nvec->dev, "timeout waiting for ec transfer\n");
396 nvec_gpio_set_value(nvec, 1);
397 msg->pos = 0;
398 }
399
400 spin_lock_irqsave(&nvec->tx_lock, flags);
401
402 if (err > 0) {
403 list_del_init(&msg->node);
404 nvec_msg_free(nvec, msg);
405 }
406 }
407 spin_unlock_irqrestore(&nvec->tx_lock, flags);
408}
409
410/**
411 * parse_msg - Print some information and call the notifiers on an RX message
412 * @nvec: A &struct nvec_chip
413 * @msg: A message received by @nvec
414 *
415 * Paarse some pieces of the message and then call the chain of notifiers
416 * registered via nvec_register_notifier.
417 */
418static int parse_msg(struct nvec_chip *nvec, struct nvec_msg *msg)
419{
420 if ((msg->data[0] & 1 << 7) == 0 && msg->data[3]) {
421 dev_err(nvec->dev, "ec responded %*ph\n", 4, msg->data);
422 return -EINVAL;
423 }
424
425 if ((msg->data[0] >> 7) == 1 && (msg->data[0] & 0x0f) == 5)
426 print_hex_dump(KERN_WARNING, "ec system event ",
427 DUMP_PREFIX_NONE, 16, 1, msg->data,
428 msg->data[1] + 2, true);
429
430 atomic_notifier_call_chain(&nvec->notifier_list, msg->data[0] & 0x8f,
431 msg->data);
432
433 return 0;
434}
435
436/**
437 * nvec_dispatch - Process messages received from the EC
438 * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
439 *
440 * Process messages previously received from the EC and put into the RX
441 * queue of the &struct nvec_chip instance associated with @work.
442 */
443static void nvec_dispatch(struct work_struct *work)
444{
445 struct nvec_chip *nvec = container_of(work, struct nvec_chip, rx_work);
446 unsigned long flags;
447 struct nvec_msg *msg;
448
449 spin_lock_irqsave(&nvec->rx_lock, flags);
450 while (!list_empty(&nvec->rx_data)) {
451 msg = list_first_entry(&nvec->rx_data, struct nvec_msg, node);
452 list_del_init(&msg->node);
453 spin_unlock_irqrestore(&nvec->rx_lock, flags);
454
455 if (nvec->sync_write_pending ==
456 (msg->data[2] << 8) + msg->data[0]) {
457 dev_dbg(nvec->dev, "sync write completed!\n");
458 nvec->sync_write_pending = 0;
459 nvec->last_sync_msg = msg;
460 complete(&nvec->sync_write);
461 } else {
462 parse_msg(nvec, msg);
463 nvec_msg_free(nvec, msg);
464 }
465 spin_lock_irqsave(&nvec->rx_lock, flags);
466 }
467 spin_unlock_irqrestore(&nvec->rx_lock, flags);
468}
469
470/**
471 * nvec_tx_completed - Complete the current transfer
472 * @nvec: A &struct nvec_chip
473 *
474 * This is called when we have received an END_TRANS on a TX transfer.
475 */
476static void nvec_tx_completed(struct nvec_chip *nvec)
477{
478 /* We got an END_TRANS, let's skip this, maybe there's an event */
479 if (nvec->tx->pos != nvec->tx->size) {
480 dev_err(nvec->dev, "premature END_TRANS, resending\n");
481 nvec->tx->pos = 0;
482 nvec_gpio_set_value(nvec, 0);
483 } else {
484 nvec->state = 0;
485 }
486}
487
488/**
489 * nvec_rx_completed - Complete the current transfer
490 * @nvec: A &struct nvec_chip
491 *
492 * This is called when we have received an END_TRANS on a RX transfer.
493 */
494static void nvec_rx_completed(struct nvec_chip *nvec)
495{
496 if (nvec->rx->pos != nvec_msg_size(nvec->rx)) {
497 dev_err(nvec->dev, "RX incomplete: Expected %u bytes, got %u\n",
498 (uint)nvec_msg_size(nvec->rx),
499 (uint)nvec->rx->pos);
500
501 nvec_msg_free(nvec, nvec->rx);
502 nvec->state = 0;
503
504 /* Battery quirk - Often incomplete, and likes to crash */
505 if (nvec->rx->data[0] == NVEC_BAT)
506 complete(&nvec->ec_transfer);
507
508 return;
509 }
510
511 spin_lock(&nvec->rx_lock);
512
513 /*
514 * Add the received data to the work list and move the ring buffer
515 * pointer to the next entry.
516 */
517 list_add_tail(&nvec->rx->node, &nvec->rx_data);
518
519 spin_unlock(&nvec->rx_lock);
520
521 nvec->state = 0;
522
523 if (!nvec_msg_is_event(nvec->rx))
524 complete(&nvec->ec_transfer);
525
526 schedule_work(&nvec->rx_work);
527}
528
529/**
530 * nvec_invalid_flags - Send an error message about invalid flags and jump
531 * @nvec: The nvec device
532 * @status: The status flags
533 * @reset: Whether we shall jump to state 0.
534 */
535static void nvec_invalid_flags(struct nvec_chip *nvec, unsigned int status,
536 bool reset)
537{
538 dev_err(nvec->dev, "unexpected status flags 0x%02x during state %i\n",
539 status, nvec->state);
540 if (reset)
541 nvec->state = 0;
542}
543
544/**
545 * nvec_tx_set - Set the message to transfer (nvec->tx)
546 * @nvec: A &struct nvec_chip
547 *
548 * Gets the first entry from the tx_data list of @nvec and sets the
549 * tx member to it. If the tx_data list is empty, this uses the
550 * tx_scratch message to send a no operation message.
551 */
552static void nvec_tx_set(struct nvec_chip *nvec)
553{
554 spin_lock(&nvec->tx_lock);
555 if (list_empty(&nvec->tx_data)) {
556 dev_err(nvec->dev, "empty tx - sending no-op\n");
557 memcpy(nvec->tx_scratch.data, "\x02\x07\x02", 3);
558 nvec->tx_scratch.size = 3;
559 nvec->tx_scratch.pos = 0;
560 nvec->tx = &nvec->tx_scratch;
561 list_add_tail(&nvec->tx->node, &nvec->tx_data);
562 } else {
563 nvec->tx = list_first_entry(&nvec->tx_data, struct nvec_msg,
564 node);
565 nvec->tx->pos = 0;
566 }
567 spin_unlock(&nvec->tx_lock);
568
569 dev_dbg(nvec->dev, "Sending message of length %u, command 0x%x\n",
570 (uint)nvec->tx->size, nvec->tx->data[1]);
571}
572
573/**
574 * nvec_interrupt - Interrupt handler
575 * @irq: The IRQ
576 * @dev: The nvec device
577 *
578 * Interrupt handler that fills our RX buffers and empties our TX
579 * buffers. This uses a finite state machine with ridiculous amounts
580 * of error checking, in order to be fairly reliable.
581 */
582static irqreturn_t nvec_interrupt(int irq, void *dev)
583{
584 unsigned long status;
585 unsigned int received = 0;
586 unsigned char to_send = 0xff;
587 const unsigned long irq_mask = I2C_SL_IRQ | END_TRANS | RCVD | RNW;
588 struct nvec_chip *nvec = dev;
589 unsigned int state = nvec->state;
590
591 status = readl(nvec->base + I2C_SL_STATUS);
592
593 /* Filter out some errors */
594 if ((status & irq_mask) == 0 && (status & ~irq_mask) != 0) {
595 dev_err(nvec->dev, "unexpected irq mask %lx\n", status);
596 return IRQ_HANDLED;
597 }
598 if ((status & I2C_SL_IRQ) == 0) {
599 dev_err(nvec->dev, "Spurious IRQ\n");
600 return IRQ_HANDLED;
601 }
602
603 /* The EC did not request a read, so it send us something, read it */
604 if ((status & RNW) == 0) {
605 received = readl(nvec->base + I2C_SL_RCVD);
606 if (status & RCVD)
607 writel(0, nvec->base + I2C_SL_RCVD);
608 }
609
610 if (status == (I2C_SL_IRQ | RCVD))
611 nvec->state = 0;
612
613 switch (nvec->state) {
614 case 0: /* Verify that its a transfer start, the rest later */
615 if (status != (I2C_SL_IRQ | RCVD))
616 nvec_invalid_flags(nvec, status, false);
617 break;
618 case 1: /* command byte */
619 if (status != I2C_SL_IRQ) {
620 nvec_invalid_flags(nvec, status, true);
621 } else {
622 nvec->rx = nvec_msg_alloc(nvec, NVEC_MSG_RX);
623 /* Should not happen in a normal world */
624 if (unlikely(!nvec->rx)) {
625 nvec->state = 0;
626 break;
627 }
628 nvec->rx->data[0] = received;
629 nvec->rx->pos = 1;
630 nvec->state = 2;
631 }
632 break;
633 case 2: /* first byte after command */
634 if (status == (I2C_SL_IRQ | RNW | RCVD)) {
635 udelay(33);
636 if (nvec->rx->data[0] != 0x01) {
637 dev_err(nvec->dev,
638 "Read without prior read command\n");
639 nvec->state = 0;
640 break;
641 }
642 nvec_msg_free(nvec, nvec->rx);
643 nvec->state = 3;
644 nvec_tx_set(nvec);
645 to_send = nvec->tx->data[0];
646 nvec->tx->pos = 1;
647 } else if (status == (I2C_SL_IRQ)) {
648 nvec->rx->data[1] = received;
649 nvec->rx->pos = 2;
650 nvec->state = 4;
651 } else {
652 nvec_invalid_flags(nvec, status, true);
653 }
654 break;
655 case 3: /* EC does a block read, we transmit data */
656 if (status & END_TRANS) {
657 nvec_tx_completed(nvec);
658 } else if ((status & RNW) == 0 || (status & RCVD)) {
659 nvec_invalid_flags(nvec, status, true);
660 } else if (nvec->tx && nvec->tx->pos < nvec->tx->size) {
661 to_send = nvec->tx->data[nvec->tx->pos++];
662 } else {
663 dev_err(nvec->dev,
664 "tx buffer underflow on %p (%u > %u)\n",
665 nvec->tx,
666 (uint)(nvec->tx ? nvec->tx->pos : 0),
667 (uint)(nvec->tx ? nvec->tx->size : 0));
668 nvec->state = 0;
669 }
670 break;
671 case 4: /* EC does some write, we read the data */
672 if ((status & (END_TRANS | RNW)) == END_TRANS)
673 nvec_rx_completed(nvec);
674 else if (status & (RNW | RCVD))
675 nvec_invalid_flags(nvec, status, true);
676 else if (nvec->rx && nvec->rx->pos < NVEC_MSG_SIZE)
677 nvec->rx->data[nvec->rx->pos++] = received;
678 else
679 dev_err(nvec->dev,
680 "RX buffer overflow on %p: Trying to write byte %u of %u\n",
681 nvec->rx, nvec->rx ? nvec->rx->pos : 0,
682 NVEC_MSG_SIZE);
683 break;
684 default:
685 nvec->state = 0;
686 }
687
688 /* If we are told that a new transfer starts, verify it */
689 if ((status & (RCVD | RNW)) == RCVD) {
690 if (received != nvec->i2c_addr)
691 dev_err(nvec->dev,
692 "received address 0x%02x, expected 0x%02x\n",
693 received, nvec->i2c_addr);
694 nvec->state = 1;
695 }
696
697 /* Send data if requested, but not on end of transmission */
698 if ((status & (RNW | END_TRANS)) == RNW)
699 writel(to_send, nvec->base + I2C_SL_RCVD);
700
701 /* If we have send the first byte */
702 if (status == (I2C_SL_IRQ | RNW | RCVD))
703 nvec_gpio_set_value(nvec, 1);
704
705 dev_dbg(nvec->dev,
706 "Handled: %s 0x%02x, %s 0x%02x in state %u [%s%s%s]\n",
707 (status & RNW) == 0 ? "received" : "R=",
708 received,
709 (status & (RNW | END_TRANS)) ? "sent" : "S=",
710 to_send,
711 state,
712 status & END_TRANS ? " END_TRANS" : "",
713 status & RCVD ? " RCVD" : "",
714 status & RNW ? " RNW" : "");
715
716 /*
717 * TODO: replace the udelay with a read back after each writel above
718 * in order to work around a hardware issue, see i2c-tegra.c
719 *
720 * Unfortunately, this change causes an initialisation issue with the
721 * touchpad, which needs to be fixed first.
722 */
723 udelay(100);
724
725 return IRQ_HANDLED;
726}
727
728static void tegra_init_i2c_slave(struct nvec_chip *nvec)
729{
730 u32 val;
731
732 clk_prepare_enable(nvec->i2c_clk);
733
734 reset_control_assert(nvec->rst);
735 udelay(2);
736 reset_control_deassert(nvec->rst);
737
738 val = I2C_CNFG_NEW_MASTER_SFM | I2C_CNFG_PACKET_MODE_EN |
739 (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
740 writel(val, nvec->base + I2C_CNFG);
741
742 clk_set_rate(nvec->i2c_clk, 8 * 80000);
743
744 writel(I2C_SL_NEWSL, nvec->base + I2C_SL_CNFG);
745 writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT);
746
747 writel(nvec->i2c_addr >> 1, nvec->base + I2C_SL_ADDR1);
748 writel(0, nvec->base + I2C_SL_ADDR2);
749
750 enable_irq(nvec->irq);
751}
752
753#ifdef CONFIG_PM_SLEEP
754static void nvec_disable_i2c_slave(struct nvec_chip *nvec)
755{
756 disable_irq(nvec->irq);
757 writel(I2C_SL_NEWSL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
758 clk_disable_unprepare(nvec->i2c_clk);
759}
760#endif
761
762static void nvec_power_off(void)
763{
764 char ap_pwr_down[] = { NVEC_SLEEP, AP_PWR_DOWN };
765
766 nvec_toggle_global_events(nvec_power_handle, false);
767 nvec_write_async(nvec_power_handle, ap_pwr_down, 2);
768}
769
770static int tegra_nvec_probe(struct platform_device *pdev)
771{
772 int err, ret;
773 struct clk *i2c_clk;
774 struct device *dev = &pdev->dev;
775 struct nvec_chip *nvec;
776 struct nvec_msg *msg;
777 void __iomem *base;
778 char get_firmware_version[] = { NVEC_CNTL, GET_FIRMWARE_VERSION },
779 unmute_speakers[] = { NVEC_OEM0, 0x10, 0x59, 0x95 },
780 enable_event[7] = { NVEC_SYS, CNF_EVENT_REPORTING, true };
781
782 if (!dev->of_node) {
783 dev_err(dev, "must be instantiated using device tree\n");
784 return -ENODEV;
785 }
786
787 nvec = devm_kzalloc(dev, sizeof(struct nvec_chip), GFP_KERNEL);
788 if (!nvec)
789 return -ENOMEM;
790
791 platform_set_drvdata(pdev, nvec);
792 nvec->dev = dev;
793
794 if (of_property_read_u32(dev->of_node, "slave-addr", &nvec->i2c_addr)) {
795 dev_err(dev, "no i2c address specified");
796 return -ENODEV;
797 }
798
799 base = devm_platform_ioremap_resource(pdev, 0);
800 if (IS_ERR(base))
801 return PTR_ERR(base);
802
803 nvec->irq = platform_get_irq(pdev, 0);
804 if (nvec->irq < 0)
805 return -ENODEV;
806
807 i2c_clk = devm_clk_get(dev, "div-clk");
808 if (IS_ERR(i2c_clk)) {
809 dev_err(dev, "failed to get controller clock\n");
810 return -ENODEV;
811 }
812
813 nvec->rst = devm_reset_control_get_exclusive(dev, "i2c");
814 if (IS_ERR(nvec->rst)) {
815 dev_err(dev, "failed to get controller reset\n");
816 return PTR_ERR(nvec->rst);
817 }
818
819 nvec->base = base;
820 nvec->i2c_clk = i2c_clk;
821 nvec->rx = &nvec->msg_pool[0];
822
823 ATOMIC_INIT_NOTIFIER_HEAD(&nvec->notifier_list);
824
825 init_completion(&nvec->sync_write);
826 init_completion(&nvec->ec_transfer);
827 mutex_init(&nvec->sync_write_mutex);
828 spin_lock_init(&nvec->tx_lock);
829 spin_lock_init(&nvec->rx_lock);
830 INIT_LIST_HEAD(&nvec->rx_data);
831 INIT_LIST_HEAD(&nvec->tx_data);
832 INIT_WORK(&nvec->rx_work, nvec_dispatch);
833 INIT_WORK(&nvec->tx_work, nvec_request_master);
834
835 nvec->gpiod = devm_gpiod_get(dev, "request", GPIOD_OUT_HIGH);
836 if (IS_ERR(nvec->gpiod)) {
837 dev_err(dev, "couldn't request gpio\n");
838 return PTR_ERR(nvec->gpiod);
839 }
840
841 err = devm_request_irq(dev, nvec->irq, nvec_interrupt, 0,
842 "nvec", nvec);
843 if (err) {
844 dev_err(dev, "couldn't request irq\n");
845 return -ENODEV;
846 }
847 disable_irq(nvec->irq);
848
849 tegra_init_i2c_slave(nvec);
850
851 /* enable event reporting */
852 nvec_toggle_global_events(nvec, true);
853
854 nvec->nvec_status_notifier.notifier_call = nvec_status_notifier;
855 nvec_register_notifier(nvec, &nvec->nvec_status_notifier, 0);
856
857 nvec_power_handle = nvec;
858 pm_power_off = nvec_power_off;
859
860 /* Get Firmware Version */
861 err = nvec_write_sync(nvec, get_firmware_version, 2, &msg);
862
863 if (!err) {
864 dev_warn(dev,
865 "ec firmware version %02x.%02x.%02x / %02x\n",
866 msg->data[4], msg->data[5],
867 msg->data[6], msg->data[7]);
868
869 nvec_msg_free(nvec, msg);
870 }
871
872 ret = mfd_add_devices(dev, 0, nvec_devices,
873 ARRAY_SIZE(nvec_devices), NULL, 0, NULL);
874 if (ret)
875 dev_err(dev, "error adding subdevices\n");
876
877 /* unmute speakers? */
878 nvec_write_async(nvec, unmute_speakers, 4);
879
880 /* enable lid switch event */
881 nvec_event_mask(enable_event, LID_SWITCH);
882 nvec_write_async(nvec, enable_event, 7);
883
884 /* enable power button event */
885 nvec_event_mask(enable_event, PWR_BUTTON);
886 nvec_write_async(nvec, enable_event, 7);
887
888 return 0;
889}
890
891static void tegra_nvec_remove(struct platform_device *pdev)
892{
893 struct nvec_chip *nvec = platform_get_drvdata(pdev);
894
895 nvec_toggle_global_events(nvec, false);
896 mfd_remove_devices(nvec->dev);
897 nvec_unregister_notifier(nvec, &nvec->nvec_status_notifier);
898 cancel_work_sync(&nvec->rx_work);
899 cancel_work_sync(&nvec->tx_work);
900 /* FIXME: needs check whether nvec is responsible for power off */
901 pm_power_off = NULL;
902}
903
904#ifdef CONFIG_PM_SLEEP
905static int nvec_suspend(struct device *dev)
906{
907 int err;
908 struct nvec_chip *nvec = dev_get_drvdata(dev);
909 struct nvec_msg *msg;
910 char ap_suspend[] = { NVEC_SLEEP, AP_SUSPEND };
911
912 dev_dbg(nvec->dev, "suspending\n");
913
914 /* keep these sync or you'll break suspend */
915 nvec_toggle_global_events(nvec, false);
916
917 err = nvec_write_sync(nvec, ap_suspend, sizeof(ap_suspend), &msg);
918 if (!err)
919 nvec_msg_free(nvec, msg);
920
921 nvec_disable_i2c_slave(nvec);
922
923 return 0;
924}
925
926static int nvec_resume(struct device *dev)
927{
928 struct nvec_chip *nvec = dev_get_drvdata(dev);
929
930 dev_dbg(nvec->dev, "resuming\n");
931 tegra_init_i2c_slave(nvec);
932 nvec_toggle_global_events(nvec, true);
933
934 return 0;
935}
936#endif
937
938static SIMPLE_DEV_PM_OPS(nvec_pm_ops, nvec_suspend, nvec_resume);
939
940/* Match table for of_platform binding */
941static const struct of_device_id nvidia_nvec_of_match[] = {
942 { .compatible = "nvidia,nvec", },
943 {},
944};
945MODULE_DEVICE_TABLE(of, nvidia_nvec_of_match);
946
947static struct platform_driver nvec_device_driver = {
948 .probe = tegra_nvec_probe,
949 .remove_new = tegra_nvec_remove,
950 .driver = {
951 .name = "nvec",
952 .pm = &nvec_pm_ops,
953 .of_match_table = nvidia_nvec_of_match,
954 }
955};
956
957module_platform_driver(nvec_device_driver);
958
959MODULE_ALIAS("platform:nvec");
960MODULE_DESCRIPTION("NVIDIA compliant embedded controller interface");
961MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
962MODULE_LICENSE("GPL");