Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * AMx3 Wkup M3 IPC driver
3 *
4 * Copyright (C) 2015 Texas Instruments, Inc.
5 *
6 * Dave Gerlach <d-gerlach@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/err.h>
19#include <linux/kernel.h>
20#include <linux/kthread.h>
21#include <linux/interrupt.h>
22#include <linux/irq.h>
23#include <linux/module.h>
24#include <linux/of.h>
25#include <linux/omap-mailbox.h>
26#include <linux/platform_device.h>
27#include <linux/remoteproc.h>
28#include <linux/suspend.h>
29#include <linux/wkup_m3_ipc.h>
30
31#define AM33XX_CTRL_IPC_REG_COUNT 0x8
32#define AM33XX_CTRL_IPC_REG_OFFSET(m) (0x4 + 4 * (m))
33
34/* AM33XX M3_TXEV_EOI register */
35#define AM33XX_CONTROL_M3_TXEV_EOI 0x00
36
37#define AM33XX_M3_TXEV_ACK (0x1 << 0)
38#define AM33XX_M3_TXEV_ENABLE (0x0 << 0)
39
40#define IPC_CMD_DS0 0x4
41#define IPC_CMD_STANDBY 0xc
42#define IPC_CMD_IDLE 0x10
43#define IPC_CMD_RESET 0xe
44#define DS_IPC_DEFAULT 0xffffffff
45#define M3_VERSION_UNKNOWN 0x0000ffff
46#define M3_BASELINE_VERSION 0x191
47#define M3_STATUS_RESP_MASK (0xffff << 16)
48#define M3_FW_VERSION_MASK 0xffff
49#define M3_WAKE_SRC_MASK 0xff
50
51#define M3_STATE_UNKNOWN 0
52#define M3_STATE_RESET 1
53#define M3_STATE_INITED 2
54#define M3_STATE_MSG_FOR_LP 3
55#define M3_STATE_MSG_FOR_RESET 4
56
57static struct wkup_m3_ipc *m3_ipc_state;
58
59static const struct wkup_m3_wakeup_src wakeups[] = {
60 {.irq_nr = 16, .src = "PRCM"},
61 {.irq_nr = 35, .src = "USB0_PHY"},
62 {.irq_nr = 36, .src = "USB1_PHY"},
63 {.irq_nr = 40, .src = "I2C0"},
64 {.irq_nr = 41, .src = "RTC Timer"},
65 {.irq_nr = 42, .src = "RTC Alarm"},
66 {.irq_nr = 43, .src = "Timer0"},
67 {.irq_nr = 44, .src = "Timer1"},
68 {.irq_nr = 45, .src = "UART"},
69 {.irq_nr = 46, .src = "GPIO0"},
70 {.irq_nr = 48, .src = "MPU_WAKE"},
71 {.irq_nr = 49, .src = "WDT0"},
72 {.irq_nr = 50, .src = "WDT1"},
73 {.irq_nr = 51, .src = "ADC_TSC"},
74 {.irq_nr = 0, .src = "Unknown"},
75};
76
77static void am33xx_txev_eoi(struct wkup_m3_ipc *m3_ipc)
78{
79 writel(AM33XX_M3_TXEV_ACK,
80 m3_ipc->ipc_mem_base + AM33XX_CONTROL_M3_TXEV_EOI);
81}
82
83static void am33xx_txev_enable(struct wkup_m3_ipc *m3_ipc)
84{
85 writel(AM33XX_M3_TXEV_ENABLE,
86 m3_ipc->ipc_mem_base + AM33XX_CONTROL_M3_TXEV_EOI);
87}
88
89static void wkup_m3_ctrl_ipc_write(struct wkup_m3_ipc *m3_ipc,
90 u32 val, int ipc_reg_num)
91{
92 if (WARN(ipc_reg_num < 0 || ipc_reg_num > AM33XX_CTRL_IPC_REG_COUNT,
93 "ipc register operation out of range"))
94 return;
95
96 writel(val, m3_ipc->ipc_mem_base +
97 AM33XX_CTRL_IPC_REG_OFFSET(ipc_reg_num));
98}
99
100static unsigned int wkup_m3_ctrl_ipc_read(struct wkup_m3_ipc *m3_ipc,
101 int ipc_reg_num)
102{
103 if (WARN(ipc_reg_num < 0 || ipc_reg_num > AM33XX_CTRL_IPC_REG_COUNT,
104 "ipc register operation out of range"))
105 return 0;
106
107 return readl(m3_ipc->ipc_mem_base +
108 AM33XX_CTRL_IPC_REG_OFFSET(ipc_reg_num));
109}
110
111static int wkup_m3_fw_version_read(struct wkup_m3_ipc *m3_ipc)
112{
113 int val;
114
115 val = wkup_m3_ctrl_ipc_read(m3_ipc, 2);
116
117 return val & M3_FW_VERSION_MASK;
118}
119
120static irqreturn_t wkup_m3_txev_handler(int irq, void *ipc_data)
121{
122 struct wkup_m3_ipc *m3_ipc = ipc_data;
123 struct device *dev = m3_ipc->dev;
124 int ver = 0;
125
126 am33xx_txev_eoi(m3_ipc);
127
128 switch (m3_ipc->state) {
129 case M3_STATE_RESET:
130 ver = wkup_m3_fw_version_read(m3_ipc);
131
132 if (ver == M3_VERSION_UNKNOWN ||
133 ver < M3_BASELINE_VERSION) {
134 dev_warn(dev, "CM3 Firmware Version %x not supported\n",
135 ver);
136 } else {
137 dev_info(dev, "CM3 Firmware Version = 0x%x\n", ver);
138 }
139
140 m3_ipc->state = M3_STATE_INITED;
141 complete(&m3_ipc->sync_complete);
142 break;
143 case M3_STATE_MSG_FOR_RESET:
144 m3_ipc->state = M3_STATE_INITED;
145 complete(&m3_ipc->sync_complete);
146 break;
147 case M3_STATE_MSG_FOR_LP:
148 complete(&m3_ipc->sync_complete);
149 break;
150 case M3_STATE_UNKNOWN:
151 dev_warn(dev, "Unknown CM3 State\n");
152 }
153
154 am33xx_txev_enable(m3_ipc);
155
156 return IRQ_HANDLED;
157}
158
159static int wkup_m3_ping(struct wkup_m3_ipc *m3_ipc)
160{
161 struct device *dev = m3_ipc->dev;
162 mbox_msg_t dummy_msg = 0;
163 int ret;
164
165 if (!m3_ipc->mbox) {
166 dev_err(dev,
167 "No IPC channel to communicate with wkup_m3!\n");
168 return -EIO;
169 }
170
171 /*
172 * Write a dummy message to the mailbox in order to trigger the RX
173 * interrupt to alert the M3 that data is available in the IPC
174 * registers. We must enable the IRQ here and disable it after in
175 * the RX callback to avoid multiple interrupts being received
176 * by the CM3.
177 */
178 ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
179 if (ret < 0) {
180 dev_err(dev, "%s: mbox_send_message() failed: %d\n",
181 __func__, ret);
182 return ret;
183 }
184
185 ret = wait_for_completion_timeout(&m3_ipc->sync_complete,
186 msecs_to_jiffies(500));
187 if (!ret) {
188 dev_err(dev, "MPU<->CM3 sync failure\n");
189 m3_ipc->state = M3_STATE_UNKNOWN;
190 return -EIO;
191 }
192
193 mbox_client_txdone(m3_ipc->mbox, 0);
194 return 0;
195}
196
197static int wkup_m3_ping_noirq(struct wkup_m3_ipc *m3_ipc)
198{
199 struct device *dev = m3_ipc->dev;
200 mbox_msg_t dummy_msg = 0;
201 int ret;
202
203 if (!m3_ipc->mbox) {
204 dev_err(dev,
205 "No IPC channel to communicate with wkup_m3!\n");
206 return -EIO;
207 }
208
209 ret = mbox_send_message(m3_ipc->mbox, &dummy_msg);
210 if (ret < 0) {
211 dev_err(dev, "%s: mbox_send_message() failed: %d\n",
212 __func__, ret);
213 return ret;
214 }
215
216 mbox_client_txdone(m3_ipc->mbox, 0);
217 return 0;
218}
219
220static int wkup_m3_is_available(struct wkup_m3_ipc *m3_ipc)
221{
222 return ((m3_ipc->state != M3_STATE_RESET) &&
223 (m3_ipc->state != M3_STATE_UNKNOWN));
224}
225
226/* Public functions */
227/**
228 * wkup_m3_set_mem_type - Pass wkup_m3 which type of memory is in use
229 * @mem_type: memory type value read directly from emif
230 *
231 * wkup_m3 must know what memory type is in use to properly suspend
232 * and resume.
233 */
234static void wkup_m3_set_mem_type(struct wkup_m3_ipc *m3_ipc, int mem_type)
235{
236 m3_ipc->mem_type = mem_type;
237}
238
239/**
240 * wkup_m3_set_resume_address - Pass wkup_m3 resume address
241 * @addr: Physical address from which resume code should execute
242 */
243static void wkup_m3_set_resume_address(struct wkup_m3_ipc *m3_ipc, void *addr)
244{
245 m3_ipc->resume_addr = (unsigned long)addr;
246}
247
248/**
249 * wkup_m3_request_pm_status - Retrieve wkup_m3 status code after suspend
250 *
251 * Returns code representing the status of a low power mode transition.
252 * 0 - Successful transition
253 * 1 - Failure to transition to low power state
254 */
255static int wkup_m3_request_pm_status(struct wkup_m3_ipc *m3_ipc)
256{
257 unsigned int i;
258 int val;
259
260 val = wkup_m3_ctrl_ipc_read(m3_ipc, 1);
261
262 i = M3_STATUS_RESP_MASK & val;
263 i >>= __ffs(M3_STATUS_RESP_MASK);
264
265 return i;
266}
267
268/**
269 * wkup_m3_prepare_low_power - Request preparation for transition to
270 * low power state
271 * @state: A kernel suspend state to enter, either MEM or STANDBY
272 *
273 * Returns 0 if preparation was successful, otherwise returns error code
274 */
275static int wkup_m3_prepare_low_power(struct wkup_m3_ipc *m3_ipc, int state)
276{
277 struct device *dev = m3_ipc->dev;
278 int m3_power_state;
279 int ret = 0;
280
281 if (!wkup_m3_is_available(m3_ipc))
282 return -ENODEV;
283
284 switch (state) {
285 case WKUP_M3_DEEPSLEEP:
286 m3_power_state = IPC_CMD_DS0;
287 break;
288 case WKUP_M3_STANDBY:
289 m3_power_state = IPC_CMD_STANDBY;
290 break;
291 case WKUP_M3_IDLE:
292 m3_power_state = IPC_CMD_IDLE;
293 break;
294 default:
295 return 1;
296 }
297
298 /* Program each required IPC register then write defaults to others */
299 wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->resume_addr, 0);
300 wkup_m3_ctrl_ipc_write(m3_ipc, m3_power_state, 1);
301 wkup_m3_ctrl_ipc_write(m3_ipc, m3_ipc->mem_type, 4);
302
303 wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2);
304 wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 3);
305 wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 5);
306 wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 6);
307 wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 7);
308
309 m3_ipc->state = M3_STATE_MSG_FOR_LP;
310
311 if (state == WKUP_M3_IDLE)
312 ret = wkup_m3_ping_noirq(m3_ipc);
313 else
314 ret = wkup_m3_ping(m3_ipc);
315
316 if (ret) {
317 dev_err(dev, "Unable to ping CM3\n");
318 return ret;
319 }
320
321 return 0;
322}
323
324/**
325 * wkup_m3_finish_low_power - Return m3 to reset state
326 *
327 * Returns 0 if reset was successful, otherwise returns error code
328 */
329static int wkup_m3_finish_low_power(struct wkup_m3_ipc *m3_ipc)
330{
331 struct device *dev = m3_ipc->dev;
332 int ret = 0;
333
334 if (!wkup_m3_is_available(m3_ipc))
335 return -ENODEV;
336
337 wkup_m3_ctrl_ipc_write(m3_ipc, IPC_CMD_RESET, 1);
338 wkup_m3_ctrl_ipc_write(m3_ipc, DS_IPC_DEFAULT, 2);
339
340 m3_ipc->state = M3_STATE_MSG_FOR_RESET;
341
342 ret = wkup_m3_ping(m3_ipc);
343 if (ret) {
344 dev_err(dev, "Unable to ping CM3\n");
345 return ret;
346 }
347
348 return 0;
349}
350
351/**
352 * wkup_m3_request_wake_src - Get the wakeup source info passed from wkup_m3
353 * @m3_ipc: Pointer to wkup_m3_ipc context
354 */
355static const char *wkup_m3_request_wake_src(struct wkup_m3_ipc *m3_ipc)
356{
357 unsigned int wakeup_src_idx;
358 int j, val;
359
360 val = wkup_m3_ctrl_ipc_read(m3_ipc, 6);
361
362 wakeup_src_idx = val & M3_WAKE_SRC_MASK;
363
364 for (j = 0; j < ARRAY_SIZE(wakeups) - 1; j++) {
365 if (wakeups[j].irq_nr == wakeup_src_idx)
366 return wakeups[j].src;
367 }
368 return wakeups[j].src;
369}
370
371/**
372 * wkup_m3_set_rtc_only - Set the rtc_only flag
373 * @wkup_m3_wakeup: struct wkup_m3_wakeup_src * gets assigned the
374 * wakeup src value
375 */
376static void wkup_m3_set_rtc_only(struct wkup_m3_ipc *m3_ipc)
377{
378 if (m3_ipc_state)
379 m3_ipc_state->is_rtc_only = true;
380}
381
382static struct wkup_m3_ipc_ops ipc_ops = {
383 .set_mem_type = wkup_m3_set_mem_type,
384 .set_resume_address = wkup_m3_set_resume_address,
385 .prepare_low_power = wkup_m3_prepare_low_power,
386 .finish_low_power = wkup_m3_finish_low_power,
387 .request_pm_status = wkup_m3_request_pm_status,
388 .request_wake_src = wkup_m3_request_wake_src,
389 .set_rtc_only = wkup_m3_set_rtc_only,
390};
391
392/**
393 * wkup_m3_ipc_get - Return handle to wkup_m3_ipc
394 *
395 * Returns NULL if the wkup_m3 is not yet available, otherwise returns
396 * pointer to wkup_m3_ipc struct.
397 */
398struct wkup_m3_ipc *wkup_m3_ipc_get(void)
399{
400 if (m3_ipc_state)
401 get_device(m3_ipc_state->dev);
402 else
403 return NULL;
404
405 return m3_ipc_state;
406}
407EXPORT_SYMBOL_GPL(wkup_m3_ipc_get);
408
409/**
410 * wkup_m3_ipc_put - Free handle to wkup_m3_ipc returned from wkup_m3_ipc_get
411 * @m3_ipc: A pointer to wkup_m3_ipc struct returned by wkup_m3_ipc_get
412 */
413void wkup_m3_ipc_put(struct wkup_m3_ipc *m3_ipc)
414{
415 if (m3_ipc_state)
416 put_device(m3_ipc_state->dev);
417}
418EXPORT_SYMBOL_GPL(wkup_m3_ipc_put);
419
420static void wkup_m3_rproc_boot_thread(struct wkup_m3_ipc *m3_ipc)
421{
422 struct device *dev = m3_ipc->dev;
423 int ret;
424
425 init_completion(&m3_ipc->sync_complete);
426
427 ret = rproc_boot(m3_ipc->rproc);
428 if (ret)
429 dev_err(dev, "rproc_boot failed\n");
430
431 do_exit(0);
432}
433
434static int wkup_m3_ipc_probe(struct platform_device *pdev)
435{
436 struct device *dev = &pdev->dev;
437 int irq, ret;
438 phandle rproc_phandle;
439 struct rproc *m3_rproc;
440 struct resource *res;
441 struct task_struct *task;
442 struct wkup_m3_ipc *m3_ipc;
443
444 m3_ipc = devm_kzalloc(dev, sizeof(*m3_ipc), GFP_KERNEL);
445 if (!m3_ipc)
446 return -ENOMEM;
447
448 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
449 m3_ipc->ipc_mem_base = devm_ioremap_resource(dev, res);
450 if (IS_ERR(m3_ipc->ipc_mem_base)) {
451 dev_err(dev, "could not ioremap ipc_mem\n");
452 return PTR_ERR(m3_ipc->ipc_mem_base);
453 }
454
455 irq = platform_get_irq(pdev, 0);
456 if (!irq) {
457 dev_err(&pdev->dev, "no irq resource\n");
458 return -ENXIO;
459 }
460
461 ret = devm_request_irq(dev, irq, wkup_m3_txev_handler,
462 0, "wkup_m3_txev", m3_ipc);
463 if (ret) {
464 dev_err(dev, "request_irq failed\n");
465 return ret;
466 }
467
468 m3_ipc->mbox_client.dev = dev;
469 m3_ipc->mbox_client.tx_done = NULL;
470 m3_ipc->mbox_client.tx_prepare = NULL;
471 m3_ipc->mbox_client.rx_callback = NULL;
472 m3_ipc->mbox_client.tx_block = false;
473 m3_ipc->mbox_client.knows_txdone = false;
474
475 m3_ipc->mbox = mbox_request_channel(&m3_ipc->mbox_client, 0);
476
477 if (IS_ERR(m3_ipc->mbox)) {
478 dev_err(dev, "IPC Request for A8->M3 Channel failed! %ld\n",
479 PTR_ERR(m3_ipc->mbox));
480 return PTR_ERR(m3_ipc->mbox);
481 }
482
483 if (of_property_read_u32(dev->of_node, "ti,rproc", &rproc_phandle)) {
484 dev_err(&pdev->dev, "could not get rproc phandle\n");
485 ret = -ENODEV;
486 goto err_free_mbox;
487 }
488
489 m3_rproc = rproc_get_by_phandle(rproc_phandle);
490 if (!m3_rproc) {
491 dev_err(&pdev->dev, "could not get rproc handle\n");
492 ret = -EPROBE_DEFER;
493 goto err_free_mbox;
494 }
495
496 m3_ipc->rproc = m3_rproc;
497 m3_ipc->dev = dev;
498 m3_ipc->state = M3_STATE_RESET;
499
500 m3_ipc->ops = &ipc_ops;
501
502 /*
503 * Wait for firmware loading completion in a thread so we
504 * can boot the wkup_m3 as soon as it's ready without holding
505 * up kernel boot
506 */
507 task = kthread_run((void *)wkup_m3_rproc_boot_thread, m3_ipc,
508 "wkup_m3_rproc_loader");
509
510 if (IS_ERR(task)) {
511 dev_err(dev, "can't create rproc_boot thread\n");
512 ret = PTR_ERR(task);
513 goto err_put_rproc;
514 }
515
516 m3_ipc_state = m3_ipc;
517
518 return 0;
519
520err_put_rproc:
521 rproc_put(m3_rproc);
522err_free_mbox:
523 mbox_free_channel(m3_ipc->mbox);
524 return ret;
525}
526
527static int wkup_m3_ipc_remove(struct platform_device *pdev)
528{
529 mbox_free_channel(m3_ipc_state->mbox);
530
531 rproc_shutdown(m3_ipc_state->rproc);
532 rproc_put(m3_ipc_state->rproc);
533
534 m3_ipc_state = NULL;
535
536 return 0;
537}
538
539static int __maybe_unused wkup_m3_ipc_suspend(struct device *dev)
540{
541 /*
542 * Nothing needs to be done on suspend even with rtc_only flag set
543 */
544 return 0;
545}
546
547static int __maybe_unused wkup_m3_ipc_resume(struct device *dev)
548{
549 if (m3_ipc_state->is_rtc_only) {
550 rproc_shutdown(m3_ipc_state->rproc);
551 rproc_boot(m3_ipc_state->rproc);
552 }
553
554 m3_ipc_state->is_rtc_only = false;
555
556 return 0;
557}
558
559static const struct dev_pm_ops wkup_m3_ipc_pm_ops = {
560 SET_SYSTEM_SLEEP_PM_OPS(wkup_m3_ipc_suspend, wkup_m3_ipc_resume)
561};
562
563static const struct of_device_id wkup_m3_ipc_of_match[] = {
564 { .compatible = "ti,am3352-wkup-m3-ipc", },
565 { .compatible = "ti,am4372-wkup-m3-ipc", },
566 {},
567};
568MODULE_DEVICE_TABLE(of, wkup_m3_ipc_of_match);
569
570static struct platform_driver wkup_m3_ipc_driver = {
571 .probe = wkup_m3_ipc_probe,
572 .remove = wkup_m3_ipc_remove,
573 .driver = {
574 .name = "wkup_m3_ipc",
575 .of_match_table = wkup_m3_ipc_of_match,
576 .pm = &wkup_m3_ipc_pm_ops,
577 },
578};
579
580module_platform_driver(wkup_m3_ipc_driver);
581
582MODULE_LICENSE("GPL v2");
583MODULE_DESCRIPTION("wkup m3 remote processor ipc driver");
584MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>");