Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2//
3// Copyright (c) 2018 MediaTek Inc.
4
5#include <linux/completion.h>
6#include <linux/errno.h>
7#include <linux/dma-mapping.h>
8#include <linux/module.h>
9#include <linux/mailbox_controller.h>
10#include <linux/soc/mediatek/mtk-cmdq.h>
11
12#define CMDQ_WRITE_ENABLE_MASK BIT(0)
13#define CMDQ_POLL_ENABLE_MASK BIT(0)
14#define CMDQ_EOC_IRQ_EN BIT(0)
15
16struct cmdq_instruction {
17 union {
18 u32 value;
19 u32 mask;
20 };
21 union {
22 u16 offset;
23 u16 event;
24 };
25 u8 subsys;
26 u8 op;
27};
28
29int cmdq_dev_get_client_reg(struct device *dev,
30 struct cmdq_client_reg *client_reg, int idx)
31{
32 struct of_phandle_args spec;
33 int err;
34
35 if (!client_reg)
36 return -ENOENT;
37
38 err = of_parse_phandle_with_fixed_args(dev->of_node,
39 "mediatek,gce-client-reg",
40 3, idx, &spec);
41 if (err < 0) {
42 dev_err(dev,
43 "error %d can't parse gce-client-reg property (%d)",
44 err, idx);
45
46 return err;
47 }
48
49 client_reg->subsys = (u8)spec.args[0];
50 client_reg->offset = (u16)spec.args[1];
51 client_reg->size = (u16)spec.args[2];
52 of_node_put(spec.np);
53
54 return 0;
55}
56EXPORT_SYMBOL(cmdq_dev_get_client_reg);
57
58static void cmdq_client_timeout(struct timer_list *t)
59{
60 struct cmdq_client *client = from_timer(client, t, timer);
61
62 dev_err(client->client.dev, "cmdq timeout!\n");
63}
64
65struct cmdq_client *cmdq_mbox_create(struct device *dev, int index, u32 timeout)
66{
67 struct cmdq_client *client;
68
69 client = kzalloc(sizeof(*client), GFP_KERNEL);
70 if (!client)
71 return (struct cmdq_client *)-ENOMEM;
72
73 client->timeout_ms = timeout;
74 if (timeout != CMDQ_NO_TIMEOUT) {
75 spin_lock_init(&client->lock);
76 timer_setup(&client->timer, cmdq_client_timeout, 0);
77 }
78 client->pkt_cnt = 0;
79 client->client.dev = dev;
80 client->client.tx_block = false;
81 client->client.knows_txdone = true;
82 client->chan = mbox_request_channel(&client->client, index);
83
84 if (IS_ERR(client->chan)) {
85 long err;
86
87 dev_err(dev, "failed to request channel\n");
88 err = PTR_ERR(client->chan);
89 kfree(client);
90
91 return ERR_PTR(err);
92 }
93
94 return client;
95}
96EXPORT_SYMBOL(cmdq_mbox_create);
97
98void cmdq_mbox_destroy(struct cmdq_client *client)
99{
100 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
101 spin_lock(&client->lock);
102 del_timer_sync(&client->timer);
103 spin_unlock(&client->lock);
104 }
105 mbox_free_channel(client->chan);
106 kfree(client);
107}
108EXPORT_SYMBOL(cmdq_mbox_destroy);
109
110struct cmdq_pkt *cmdq_pkt_create(struct cmdq_client *client, size_t size)
111{
112 struct cmdq_pkt *pkt;
113 struct device *dev;
114 dma_addr_t dma_addr;
115
116 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
117 if (!pkt)
118 return ERR_PTR(-ENOMEM);
119 pkt->va_base = kzalloc(size, GFP_KERNEL);
120 if (!pkt->va_base) {
121 kfree(pkt);
122 return ERR_PTR(-ENOMEM);
123 }
124 pkt->buf_size = size;
125 pkt->cl = (void *)client;
126
127 dev = client->chan->mbox->dev;
128 dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
129 DMA_TO_DEVICE);
130 if (dma_mapping_error(dev, dma_addr)) {
131 dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
132 kfree(pkt->va_base);
133 kfree(pkt);
134 return ERR_PTR(-ENOMEM);
135 }
136
137 pkt->pa_base = dma_addr;
138
139 return pkt;
140}
141EXPORT_SYMBOL(cmdq_pkt_create);
142
143void cmdq_pkt_destroy(struct cmdq_pkt *pkt)
144{
145 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
146
147 dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
148 DMA_TO_DEVICE);
149 kfree(pkt->va_base);
150 kfree(pkt);
151}
152EXPORT_SYMBOL(cmdq_pkt_destroy);
153
154static int cmdq_pkt_append_command(struct cmdq_pkt *pkt,
155 struct cmdq_instruction inst)
156{
157 struct cmdq_instruction *cmd_ptr;
158
159 if (unlikely(pkt->cmd_buf_size + CMDQ_INST_SIZE > pkt->buf_size)) {
160 /*
161 * In the case of allocated buffer size (pkt->buf_size) is used
162 * up, the real required size (pkt->cmdq_buf_size) is still
163 * increased, so that the user knows how much memory should be
164 * ultimately allocated after appending all commands and
165 * flushing the command packet. Therefor, the user can call
166 * cmdq_pkt_create() again with the real required buffer size.
167 */
168 pkt->cmd_buf_size += CMDQ_INST_SIZE;
169 WARN_ONCE(1, "%s: buffer size %u is too small !\n",
170 __func__, (u32)pkt->buf_size);
171 return -ENOMEM;
172 }
173
174 cmd_ptr = pkt->va_base + pkt->cmd_buf_size;
175 *cmd_ptr = inst;
176 pkt->cmd_buf_size += CMDQ_INST_SIZE;
177
178 return 0;
179}
180
181int cmdq_pkt_write(struct cmdq_pkt *pkt, u8 subsys, u16 offset, u32 value)
182{
183 struct cmdq_instruction inst;
184
185 inst.op = CMDQ_CODE_WRITE;
186 inst.value = value;
187 inst.offset = offset;
188 inst.subsys = subsys;
189
190 return cmdq_pkt_append_command(pkt, inst);
191}
192EXPORT_SYMBOL(cmdq_pkt_write);
193
194int cmdq_pkt_write_mask(struct cmdq_pkt *pkt, u8 subsys,
195 u16 offset, u32 value, u32 mask)
196{
197 struct cmdq_instruction inst = { {0} };
198 u16 offset_mask = offset;
199 int err;
200
201 if (mask != 0xffffffff) {
202 inst.op = CMDQ_CODE_MASK;
203 inst.mask = ~mask;
204 err = cmdq_pkt_append_command(pkt, inst);
205 if (err < 0)
206 return err;
207
208 offset_mask |= CMDQ_WRITE_ENABLE_MASK;
209 }
210 err = cmdq_pkt_write(pkt, subsys, offset_mask, value);
211
212 return err;
213}
214EXPORT_SYMBOL(cmdq_pkt_write_mask);
215
216int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event)
217{
218 struct cmdq_instruction inst = { {0} };
219
220 if (event >= CMDQ_MAX_EVENT)
221 return -EINVAL;
222
223 inst.op = CMDQ_CODE_WFE;
224 inst.value = CMDQ_WFE_OPTION;
225 inst.event = event;
226
227 return cmdq_pkt_append_command(pkt, inst);
228}
229EXPORT_SYMBOL(cmdq_pkt_wfe);
230
231int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event)
232{
233 struct cmdq_instruction inst = { {0} };
234
235 if (event >= CMDQ_MAX_EVENT)
236 return -EINVAL;
237
238 inst.op = CMDQ_CODE_WFE;
239 inst.value = CMDQ_WFE_UPDATE;
240 inst.event = event;
241
242 return cmdq_pkt_append_command(pkt, inst);
243}
244EXPORT_SYMBOL(cmdq_pkt_clear_event);
245
246int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
247 u16 offset, u32 value)
248{
249 struct cmdq_instruction inst = { {0} };
250 int err;
251
252 inst.op = CMDQ_CODE_POLL;
253 inst.value = value;
254 inst.offset = offset;
255 inst.subsys = subsys;
256 err = cmdq_pkt_append_command(pkt, inst);
257
258 return err;
259}
260EXPORT_SYMBOL(cmdq_pkt_poll);
261
262int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
263 u16 offset, u32 value, u32 mask)
264{
265 struct cmdq_instruction inst = { {0} };
266 int err;
267
268 inst.op = CMDQ_CODE_MASK;
269 inst.mask = ~mask;
270 err = cmdq_pkt_append_command(pkt, inst);
271 if (err < 0)
272 return err;
273
274 offset = offset | CMDQ_POLL_ENABLE_MASK;
275 err = cmdq_pkt_poll(pkt, subsys, offset, value);
276
277 return err;
278}
279EXPORT_SYMBOL(cmdq_pkt_poll_mask);
280
281static int cmdq_pkt_finalize(struct cmdq_pkt *pkt)
282{
283 struct cmdq_instruction inst = { {0} };
284 int err;
285
286 /* insert EOC and generate IRQ for each command iteration */
287 inst.op = CMDQ_CODE_EOC;
288 inst.value = CMDQ_EOC_IRQ_EN;
289 err = cmdq_pkt_append_command(pkt, inst);
290 if (err < 0)
291 return err;
292
293 /* JUMP to end */
294 inst.op = CMDQ_CODE_JUMP;
295 inst.value = CMDQ_JUMP_PASS;
296 err = cmdq_pkt_append_command(pkt, inst);
297
298 return err;
299}
300
301static void cmdq_pkt_flush_async_cb(struct cmdq_cb_data data)
302{
303 struct cmdq_pkt *pkt = (struct cmdq_pkt *)data.data;
304 struct cmdq_task_cb *cb = &pkt->cb;
305 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
306
307 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
308 unsigned long flags = 0;
309
310 spin_lock_irqsave(&client->lock, flags);
311 if (--client->pkt_cnt == 0)
312 del_timer(&client->timer);
313 else
314 mod_timer(&client->timer, jiffies +
315 msecs_to_jiffies(client->timeout_ms));
316 spin_unlock_irqrestore(&client->lock, flags);
317 }
318
319 dma_sync_single_for_cpu(client->chan->mbox->dev, pkt->pa_base,
320 pkt->cmd_buf_size, DMA_TO_DEVICE);
321 if (cb->cb) {
322 data.data = cb->data;
323 cb->cb(data);
324 }
325}
326
327int cmdq_pkt_flush_async(struct cmdq_pkt *pkt, cmdq_async_flush_cb cb,
328 void *data)
329{
330 int err;
331 unsigned long flags = 0;
332 struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
333
334 err = cmdq_pkt_finalize(pkt);
335 if (err < 0)
336 return err;
337
338 pkt->cb.cb = cb;
339 pkt->cb.data = data;
340 pkt->async_cb.cb = cmdq_pkt_flush_async_cb;
341 pkt->async_cb.data = pkt;
342
343 dma_sync_single_for_device(client->chan->mbox->dev, pkt->pa_base,
344 pkt->cmd_buf_size, DMA_TO_DEVICE);
345
346 if (client->timeout_ms != CMDQ_NO_TIMEOUT) {
347 spin_lock_irqsave(&client->lock, flags);
348 if (client->pkt_cnt++ == 0)
349 mod_timer(&client->timer, jiffies +
350 msecs_to_jiffies(client->timeout_ms));
351 spin_unlock_irqrestore(&client->lock, flags);
352 }
353
354 err = mbox_send_message(client->chan, pkt);
355 if (err < 0)
356 return err;
357 /* We can send next packet immediately, so just call txdone. */
358 mbox_client_txdone(client->chan, 0);
359
360 return 0;
361}
362EXPORT_SYMBOL(cmdq_pkt_flush_async);
363
364struct cmdq_flush_completion {
365 struct completion cmplt;
366 bool err;
367};
368
369static void cmdq_pkt_flush_cb(struct cmdq_cb_data data)
370{
371 struct cmdq_flush_completion *cmplt;
372
373 cmplt = (struct cmdq_flush_completion *)data.data;
374 if (data.sta != CMDQ_CB_NORMAL)
375 cmplt->err = true;
376 else
377 cmplt->err = false;
378 complete(&cmplt->cmplt);
379}
380
381int cmdq_pkt_flush(struct cmdq_pkt *pkt)
382{
383 struct cmdq_flush_completion cmplt;
384 int err;
385
386 init_completion(&cmplt.cmplt);
387 err = cmdq_pkt_flush_async(pkt, cmdq_pkt_flush_cb, &cmplt);
388 if (err < 0)
389 return err;
390 wait_for_completion(&cmplt.cmplt);
391
392 return cmplt.err ? -EFAULT : 0;
393}
394EXPORT_SYMBOL(cmdq_pkt_flush);
395
396MODULE_LICENSE("GPL v2");