Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2//
3// Copyright (c) 2018 MediaTek Inc.
4
5#include <linux/bitops.h>
6#include <linux/clk.h>
7#include <linux/clk-provider.h>
8#include <linux/dma-mapping.h>
9#include <linux/errno.h>
10#include <linux/interrupt.h>
11#include <linux/io.h>
12#include <linux/iopoll.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/platform_device.h>
16#include <linux/mailbox_controller.h>
17#include <linux/mailbox/mtk-cmdq-mailbox.h>
18#include <linux/of_device.h>
19
20#define CMDQ_OP_CODE_MASK (0xff << CMDQ_OP_CODE_SHIFT)
21#define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE)
22#define CMDQ_GCE_NUM_MAX (2)
23
24#define CMDQ_CURR_IRQ_STATUS 0x10
25#define CMDQ_SYNC_TOKEN_UPDATE 0x68
26#define CMDQ_THR_SLOT_CYCLES 0x30
27#define CMDQ_THR_BASE 0x100
28#define CMDQ_THR_SIZE 0x80
29#define CMDQ_THR_WARM_RESET 0x00
30#define CMDQ_THR_ENABLE_TASK 0x04
31#define CMDQ_THR_SUSPEND_TASK 0x08
32#define CMDQ_THR_CURR_STATUS 0x0c
33#define CMDQ_THR_IRQ_STATUS 0x10
34#define CMDQ_THR_IRQ_ENABLE 0x14
35#define CMDQ_THR_CURR_ADDR 0x20
36#define CMDQ_THR_END_ADDR 0x24
37#define CMDQ_THR_WAIT_TOKEN 0x30
38#define CMDQ_THR_PRIORITY 0x40
39
40#define GCE_GCTL_VALUE 0x48
41
42#define CMDQ_THR_ACTIVE_SLOT_CYCLES 0x3200
43#define CMDQ_THR_ENABLED 0x1
44#define CMDQ_THR_DISABLED 0x0
45#define CMDQ_THR_SUSPEND 0x1
46#define CMDQ_THR_RESUME 0x0
47#define CMDQ_THR_STATUS_SUSPENDED BIT(1)
48#define CMDQ_THR_DO_WARM_RESET BIT(0)
49#define CMDQ_THR_IRQ_DONE 0x1
50#define CMDQ_THR_IRQ_ERROR 0x12
51#define CMDQ_THR_IRQ_EN (CMDQ_THR_IRQ_ERROR | CMDQ_THR_IRQ_DONE)
52#define CMDQ_THR_IS_WAITING BIT(31)
53
54#define CMDQ_JUMP_BY_OFFSET 0x10000000
55#define CMDQ_JUMP_BY_PA 0x10000001
56
57struct cmdq_thread {
58 struct mbox_chan *chan;
59 void __iomem *base;
60 struct list_head task_busy_list;
61 u32 priority;
62};
63
64struct cmdq_task {
65 struct cmdq *cmdq;
66 struct list_head list_entry;
67 dma_addr_t pa_base;
68 struct cmdq_thread *thread;
69 struct cmdq_pkt *pkt; /* the packet sent from mailbox client */
70};
71
72struct cmdq {
73 struct mbox_controller mbox;
74 void __iomem *base;
75 int irq;
76 u32 thread_nr;
77 u32 irq_mask;
78 struct cmdq_thread *thread;
79 struct clk_bulk_data clocks[CMDQ_GCE_NUM_MAX];
80 bool suspended;
81 u8 shift_pa;
82 bool control_by_sw;
83 u32 gce_num;
84};
85
86struct gce_plat {
87 u32 thread_nr;
88 u8 shift;
89 bool control_by_sw;
90 u32 gce_num;
91};
92
93u8 cmdq_get_shift_pa(struct mbox_chan *chan)
94{
95 struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox);
96
97 return cmdq->shift_pa;
98}
99EXPORT_SYMBOL(cmdq_get_shift_pa);
100
101static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
102{
103 u32 status;
104
105 writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK);
106
107 /* If already disabled, treat as suspended successful. */
108 if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
109 return 0;
110
111 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS,
112 status, status & CMDQ_THR_STATUS_SUSPENDED, 0, 10)) {
113 dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n",
114 (u32)(thread->base - cmdq->base));
115 return -EFAULT;
116 }
117
118 return 0;
119}
120
121static void cmdq_thread_resume(struct cmdq_thread *thread)
122{
123 writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK);
124}
125
126static void cmdq_init(struct cmdq *cmdq)
127{
128 int i;
129
130 WARN_ON(clk_bulk_enable(cmdq->gce_num, cmdq->clocks));
131 if (cmdq->control_by_sw)
132 writel(0x7, cmdq->base + GCE_GCTL_VALUE);
133 writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
134 for (i = 0; i <= CMDQ_MAX_EVENT; i++)
135 writel(i, cmdq->base + CMDQ_SYNC_TOKEN_UPDATE);
136 clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
137}
138
139static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread)
140{
141 u32 warm_reset;
142
143 writel(CMDQ_THR_DO_WARM_RESET, thread->base + CMDQ_THR_WARM_RESET);
144 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_WARM_RESET,
145 warm_reset, !(warm_reset & CMDQ_THR_DO_WARM_RESET),
146 0, 10)) {
147 dev_err(cmdq->mbox.dev, "reset GCE thread 0x%x failed\n",
148 (u32)(thread->base - cmdq->base));
149 return -EFAULT;
150 }
151
152 return 0;
153}
154
155static void cmdq_thread_disable(struct cmdq *cmdq, struct cmdq_thread *thread)
156{
157 cmdq_thread_reset(cmdq, thread);
158 writel(CMDQ_THR_DISABLED, thread->base + CMDQ_THR_ENABLE_TASK);
159}
160
161/* notify GCE to re-fetch commands by setting GCE thread PC */
162static void cmdq_thread_invalidate_fetched_data(struct cmdq_thread *thread)
163{
164 writel(readl(thread->base + CMDQ_THR_CURR_ADDR),
165 thread->base + CMDQ_THR_CURR_ADDR);
166}
167
168static void cmdq_task_insert_into_thread(struct cmdq_task *task)
169{
170 struct device *dev = task->cmdq->mbox.dev;
171 struct cmdq_thread *thread = task->thread;
172 struct cmdq_task *prev_task = list_last_entry(
173 &thread->task_busy_list, typeof(*task), list_entry);
174 u64 *prev_task_base = prev_task->pkt->va_base;
175
176 /* let previous task jump to this task */
177 dma_sync_single_for_cpu(dev, prev_task->pa_base,
178 prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
179 prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
180 (u64)CMDQ_JUMP_BY_PA << 32 |
181 (task->pa_base >> task->cmdq->shift_pa);
182 dma_sync_single_for_device(dev, prev_task->pa_base,
183 prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
184
185 cmdq_thread_invalidate_fetched_data(thread);
186}
187
188static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
189{
190 return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING;
191}
192
193static void cmdq_task_exec_done(struct cmdq_task *task, int sta)
194{
195 struct cmdq_task_cb *cb = &task->pkt->async_cb;
196 struct cmdq_cb_data data;
197
198 data.sta = sta;
199 data.data = cb->data;
200 data.pkt = task->pkt;
201 if (cb->cb)
202 cb->cb(data);
203
204 mbox_chan_received_data(task->thread->chan, &data);
205
206 list_del(&task->list_entry);
207}
208
209static void cmdq_task_handle_error(struct cmdq_task *task)
210{
211 struct cmdq_thread *thread = task->thread;
212 struct cmdq_task *next_task;
213 struct cmdq *cmdq = task->cmdq;
214
215 dev_err(cmdq->mbox.dev, "task 0x%p error\n", task);
216 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
217 next_task = list_first_entry_or_null(&thread->task_busy_list,
218 struct cmdq_task, list_entry);
219 if (next_task)
220 writel(next_task->pa_base >> cmdq->shift_pa,
221 thread->base + CMDQ_THR_CURR_ADDR);
222 cmdq_thread_resume(thread);
223}
224
225static void cmdq_thread_irq_handler(struct cmdq *cmdq,
226 struct cmdq_thread *thread)
227{
228 struct cmdq_task *task, *tmp, *curr_task = NULL;
229 u32 curr_pa, irq_flag, task_end_pa;
230 bool err;
231
232 irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS);
233 writel(~irq_flag, thread->base + CMDQ_THR_IRQ_STATUS);
234
235 /*
236 * When ISR call this function, another CPU core could run
237 * "release task" right before we acquire the spin lock, and thus
238 * reset / disable this GCE thread, so we need to check the enable
239 * bit of this GCE thread.
240 */
241 if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
242 return;
243
244 if (irq_flag & CMDQ_THR_IRQ_ERROR)
245 err = true;
246 else if (irq_flag & CMDQ_THR_IRQ_DONE)
247 err = false;
248 else
249 return;
250
251 curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) << cmdq->shift_pa;
252
253 list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
254 list_entry) {
255 task_end_pa = task->pa_base + task->pkt->cmd_buf_size;
256 if (curr_pa >= task->pa_base && curr_pa < task_end_pa)
257 curr_task = task;
258
259 if (!curr_task || curr_pa == task_end_pa - CMDQ_INST_SIZE) {
260 cmdq_task_exec_done(task, 0);
261 kfree(task);
262 } else if (err) {
263 cmdq_task_exec_done(task, -ENOEXEC);
264 cmdq_task_handle_error(curr_task);
265 kfree(task);
266 }
267
268 if (curr_task)
269 break;
270 }
271
272 if (list_empty(&thread->task_busy_list)) {
273 cmdq_thread_disable(cmdq, thread);
274 clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
275 }
276}
277
278static irqreturn_t cmdq_irq_handler(int irq, void *dev)
279{
280 struct cmdq *cmdq = dev;
281 unsigned long irq_status, flags = 0L;
282 int bit;
283
284 irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & cmdq->irq_mask;
285 if (!(irq_status ^ cmdq->irq_mask))
286 return IRQ_NONE;
287
288 for_each_clear_bit(bit, &irq_status, cmdq->thread_nr) {
289 struct cmdq_thread *thread = &cmdq->thread[bit];
290
291 spin_lock_irqsave(&thread->chan->lock, flags);
292 cmdq_thread_irq_handler(cmdq, thread);
293 spin_unlock_irqrestore(&thread->chan->lock, flags);
294 }
295
296 return IRQ_HANDLED;
297}
298
299static int cmdq_suspend(struct device *dev)
300{
301 struct cmdq *cmdq = dev_get_drvdata(dev);
302 struct cmdq_thread *thread;
303 int i;
304 bool task_running = false;
305
306 cmdq->suspended = true;
307
308 for (i = 0; i < cmdq->thread_nr; i++) {
309 thread = &cmdq->thread[i];
310 if (!list_empty(&thread->task_busy_list)) {
311 task_running = true;
312 break;
313 }
314 }
315
316 if (task_running)
317 dev_warn(dev, "exist running task(s) in suspend\n");
318
319 clk_bulk_unprepare(cmdq->gce_num, cmdq->clocks);
320
321 return 0;
322}
323
324static int cmdq_resume(struct device *dev)
325{
326 struct cmdq *cmdq = dev_get_drvdata(dev);
327
328 WARN_ON(clk_bulk_prepare(cmdq->gce_num, cmdq->clocks));
329 cmdq->suspended = false;
330 return 0;
331}
332
333static int cmdq_remove(struct platform_device *pdev)
334{
335 struct cmdq *cmdq = platform_get_drvdata(pdev);
336
337 clk_bulk_unprepare(cmdq->gce_num, cmdq->clocks);
338 return 0;
339}
340
341static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
342{
343 struct cmdq_pkt *pkt = (struct cmdq_pkt *)data;
344 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
345 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
346 struct cmdq_task *task;
347 unsigned long curr_pa, end_pa;
348
349 /* Client should not flush new tasks if suspended. */
350 WARN_ON(cmdq->suspended);
351
352 task = kzalloc(sizeof(*task), GFP_ATOMIC);
353 if (!task)
354 return -ENOMEM;
355
356 task->cmdq = cmdq;
357 INIT_LIST_HEAD(&task->list_entry);
358 task->pa_base = pkt->pa_base;
359 task->thread = thread;
360 task->pkt = pkt;
361
362 if (list_empty(&thread->task_busy_list)) {
363 WARN_ON(clk_bulk_enable(cmdq->gce_num, cmdq->clocks));
364
365 /*
366 * The thread reset will clear thread related register to 0,
367 * including pc, end, priority, irq, suspend and enable. Thus
368 * set CMDQ_THR_ENABLED to CMDQ_THR_ENABLE_TASK will enable
369 * thread and make it running.
370 */
371 WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
372
373 writel(task->pa_base >> cmdq->shift_pa,
374 thread->base + CMDQ_THR_CURR_ADDR);
375 writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->shift_pa,
376 thread->base + CMDQ_THR_END_ADDR);
377
378 writel(thread->priority, thread->base + CMDQ_THR_PRIORITY);
379 writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE);
380 writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
381 } else {
382 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
383 curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR) <<
384 cmdq->shift_pa;
385 end_pa = readl(thread->base + CMDQ_THR_END_ADDR) <<
386 cmdq->shift_pa;
387 /* check boundary */
388 if (curr_pa == end_pa - CMDQ_INST_SIZE ||
389 curr_pa == end_pa) {
390 /* set to this task directly */
391 writel(task->pa_base >> cmdq->shift_pa,
392 thread->base + CMDQ_THR_CURR_ADDR);
393 } else {
394 cmdq_task_insert_into_thread(task);
395 smp_mb(); /* modify jump before enable thread */
396 }
397 writel((task->pa_base + pkt->cmd_buf_size) >> cmdq->shift_pa,
398 thread->base + CMDQ_THR_END_ADDR);
399 cmdq_thread_resume(thread);
400 }
401 list_move_tail(&task->list_entry, &thread->task_busy_list);
402
403 return 0;
404}
405
406static int cmdq_mbox_startup(struct mbox_chan *chan)
407{
408 return 0;
409}
410
411static void cmdq_mbox_shutdown(struct mbox_chan *chan)
412{
413 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
414 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
415 struct cmdq_task *task, *tmp;
416 unsigned long flags;
417
418 spin_lock_irqsave(&thread->chan->lock, flags);
419 if (list_empty(&thread->task_busy_list))
420 goto done;
421
422 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
423
424 /* make sure executed tasks have success callback */
425 cmdq_thread_irq_handler(cmdq, thread);
426 if (list_empty(&thread->task_busy_list))
427 goto done;
428
429 list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
430 list_entry) {
431 cmdq_task_exec_done(task, -ECONNABORTED);
432 kfree(task);
433 }
434
435 cmdq_thread_disable(cmdq, thread);
436 clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
437
438done:
439 /*
440 * The thread->task_busy_list empty means thread already disable. The
441 * cmdq_mbox_send_data() always reset thread which clear disable and
442 * suspend statue when first pkt send to channel, so there is no need
443 * to do any operation here, only unlock and leave.
444 */
445 spin_unlock_irqrestore(&thread->chan->lock, flags);
446}
447
448static int cmdq_mbox_flush(struct mbox_chan *chan, unsigned long timeout)
449{
450 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
451 struct cmdq_task_cb *cb;
452 struct cmdq_cb_data data;
453 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
454 struct cmdq_task *task, *tmp;
455 unsigned long flags;
456 u32 enable;
457
458 spin_lock_irqsave(&thread->chan->lock, flags);
459 if (list_empty(&thread->task_busy_list))
460 goto out;
461
462 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
463 if (!cmdq_thread_is_in_wfe(thread))
464 goto wait;
465
466 list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
467 list_entry) {
468 cb = &task->pkt->async_cb;
469 data.sta = -ECONNABORTED;
470 data.data = cb->data;
471 data.pkt = task->pkt;
472 if (cb->cb)
473 cb->cb(data);
474
475 mbox_chan_received_data(task->thread->chan, &data);
476 list_del(&task->list_entry);
477 kfree(task);
478 }
479
480 cmdq_thread_resume(thread);
481 cmdq_thread_disable(cmdq, thread);
482 clk_bulk_disable(cmdq->gce_num, cmdq->clocks);
483
484out:
485 spin_unlock_irqrestore(&thread->chan->lock, flags);
486 return 0;
487
488wait:
489 cmdq_thread_resume(thread);
490 spin_unlock_irqrestore(&thread->chan->lock, flags);
491 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_ENABLE_TASK,
492 enable, enable == 0, 1, timeout)) {
493 dev_err(cmdq->mbox.dev, "Fail to wait GCE thread 0x%x done\n",
494 (u32)(thread->base - cmdq->base));
495
496 return -EFAULT;
497 }
498 return 0;
499}
500
501static const struct mbox_chan_ops cmdq_mbox_chan_ops = {
502 .send_data = cmdq_mbox_send_data,
503 .startup = cmdq_mbox_startup,
504 .shutdown = cmdq_mbox_shutdown,
505 .flush = cmdq_mbox_flush,
506};
507
508static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
509 const struct of_phandle_args *sp)
510{
511 int ind = sp->args[0];
512 struct cmdq_thread *thread;
513
514 if (ind >= mbox->num_chans)
515 return ERR_PTR(-EINVAL);
516
517 thread = (struct cmdq_thread *)mbox->chans[ind].con_priv;
518 thread->priority = sp->args[1];
519 thread->chan = &mbox->chans[ind];
520
521 return &mbox->chans[ind];
522}
523
524static int cmdq_probe(struct platform_device *pdev)
525{
526 struct device *dev = &pdev->dev;
527 struct cmdq *cmdq;
528 int err, i;
529 struct gce_plat *plat_data;
530 struct device_node *phandle = dev->of_node;
531 struct device_node *node;
532 int alias_id = 0;
533 static const char * const clk_name = "gce";
534 static const char * const clk_names[] = { "gce0", "gce1" };
535
536 cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
537 if (!cmdq)
538 return -ENOMEM;
539
540 cmdq->base = devm_platform_ioremap_resource(pdev, 0);
541 if (IS_ERR(cmdq->base))
542 return PTR_ERR(cmdq->base);
543
544 cmdq->irq = platform_get_irq(pdev, 0);
545 if (cmdq->irq < 0)
546 return cmdq->irq;
547
548 plat_data = (struct gce_plat *)of_device_get_match_data(dev);
549 if (!plat_data) {
550 dev_err(dev, "failed to get match data\n");
551 return -EINVAL;
552 }
553
554 cmdq->thread_nr = plat_data->thread_nr;
555 cmdq->shift_pa = plat_data->shift;
556 cmdq->control_by_sw = plat_data->control_by_sw;
557 cmdq->gce_num = plat_data->gce_num;
558 cmdq->irq_mask = GENMASK(cmdq->thread_nr - 1, 0);
559 err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED,
560 "mtk_cmdq", cmdq);
561 if (err < 0) {
562 dev_err(dev, "failed to register ISR (%d)\n", err);
563 return err;
564 }
565
566 dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
567 dev, cmdq->base, cmdq->irq);
568
569 if (cmdq->gce_num > 1) {
570 for_each_child_of_node(phandle->parent, node) {
571 alias_id = of_alias_get_id(node, clk_name);
572 if (alias_id >= 0 && alias_id < cmdq->gce_num) {
573 cmdq->clocks[alias_id].id = clk_names[alias_id];
574 cmdq->clocks[alias_id].clk = of_clk_get(node, 0);
575 if (IS_ERR(cmdq->clocks[alias_id].clk)) {
576 dev_err(dev, "failed to get gce clk: %d\n", alias_id);
577 return PTR_ERR(cmdq->clocks[alias_id].clk);
578 }
579 }
580 }
581 } else {
582 cmdq->clocks[alias_id].id = clk_name;
583 cmdq->clocks[alias_id].clk = devm_clk_get(&pdev->dev, clk_name);
584 if (IS_ERR(cmdq->clocks[alias_id].clk)) {
585 dev_err(dev, "failed to get gce clk\n");
586 return PTR_ERR(cmdq->clocks[alias_id].clk);
587 }
588 }
589
590 cmdq->mbox.dev = dev;
591 cmdq->mbox.chans = devm_kcalloc(dev, cmdq->thread_nr,
592 sizeof(*cmdq->mbox.chans), GFP_KERNEL);
593 if (!cmdq->mbox.chans)
594 return -ENOMEM;
595
596 cmdq->mbox.num_chans = cmdq->thread_nr;
597 cmdq->mbox.ops = &cmdq_mbox_chan_ops;
598 cmdq->mbox.of_xlate = cmdq_xlate;
599
600 /* make use of TXDONE_BY_ACK */
601 cmdq->mbox.txdone_irq = false;
602 cmdq->mbox.txdone_poll = false;
603
604 cmdq->thread = devm_kcalloc(dev, cmdq->thread_nr,
605 sizeof(*cmdq->thread), GFP_KERNEL);
606 if (!cmdq->thread)
607 return -ENOMEM;
608
609 for (i = 0; i < cmdq->thread_nr; i++) {
610 cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE +
611 CMDQ_THR_SIZE * i;
612 INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list);
613 cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i];
614 }
615
616 err = devm_mbox_controller_register(dev, &cmdq->mbox);
617 if (err < 0) {
618 dev_err(dev, "failed to register mailbox: %d\n", err);
619 return err;
620 }
621
622 platform_set_drvdata(pdev, cmdq);
623
624 WARN_ON(clk_bulk_prepare(cmdq->gce_num, cmdq->clocks));
625
626 cmdq_init(cmdq);
627
628 return 0;
629}
630
631static const struct dev_pm_ops cmdq_pm_ops = {
632 .suspend = cmdq_suspend,
633 .resume = cmdq_resume,
634};
635
636static const struct gce_plat gce_plat_v2 = {
637 .thread_nr = 16,
638 .shift = 0,
639 .control_by_sw = false,
640 .gce_num = 1
641};
642
643static const struct gce_plat gce_plat_v3 = {
644 .thread_nr = 24,
645 .shift = 0,
646 .control_by_sw = false,
647 .gce_num = 1
648};
649
650static const struct gce_plat gce_plat_v4 = {
651 .thread_nr = 24,
652 .shift = 3,
653 .control_by_sw = false,
654 .gce_num = 1
655};
656
657static const struct gce_plat gce_plat_v5 = {
658 .thread_nr = 24,
659 .shift = 3,
660 .control_by_sw = true,
661 .gce_num = 2
662};
663
664static const struct gce_plat gce_plat_v6 = {
665 .thread_nr = 24,
666 .shift = 3,
667 .control_by_sw = false,
668 .gce_num = 2
669};
670
671static const struct of_device_id cmdq_of_ids[] = {
672 {.compatible = "mediatek,mt8173-gce", .data = (void *)&gce_plat_v2},
673 {.compatible = "mediatek,mt8183-gce", .data = (void *)&gce_plat_v3},
674 {.compatible = "mediatek,mt6779-gce", .data = (void *)&gce_plat_v4},
675 {.compatible = "mediatek,mt8192-gce", .data = (void *)&gce_plat_v5},
676 {.compatible = "mediatek,mt8195-gce", .data = (void *)&gce_plat_v6},
677 {}
678};
679
680static struct platform_driver cmdq_drv = {
681 .probe = cmdq_probe,
682 .remove = cmdq_remove,
683 .driver = {
684 .name = "mtk_cmdq",
685 .pm = &cmdq_pm_ops,
686 .of_match_table = cmdq_of_ids,
687 }
688};
689
690static int __init cmdq_drv_init(void)
691{
692 return platform_driver_register(&cmdq_drv);
693}
694
695static void __exit cmdq_drv_exit(void)
696{
697 platform_driver_unregister(&cmdq_drv);
698}
699
700subsys_initcall(cmdq_drv_init);
701module_exit(cmdq_drv_exit);
702
703MODULE_LICENSE("GPL v2");