Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2021 MediaTek Inc.
4 * Author: Yunfei Dong <yunfei.dong@mediatek.com>
5 */
6
7#include <linux/freezer.h>
8#include <linux/interrupt.h>
9#include <linux/kthread.h>
10
11#include "mtk_vcodec_dec_pm.h"
12#include "mtk_vcodec_drv.h"
13#include "vdec_msg_queue.h"
14
15#define VDEC_MSG_QUEUE_TIMEOUT_MS 1500
16
17/* the size used to store lat slice header information */
18#define VDEC_LAT_SLICE_HEADER_SZ (640 * SZ_1K)
19
20/* the size used to store avc error information */
21#define VDEC_ERR_MAP_SZ_AVC (17 * SZ_1K)
22
23/* core will read the trans buffer which decoded by lat to decode again.
24 * The trans buffer size of FHD and 4K bitstreams are different.
25 */
26static int vde_msg_queue_get_trans_size(int width, int height)
27{
28 if (width > 1920 || height > 1088)
29 return 30 * SZ_1M;
30 else
31 return 6 * SZ_1M;
32}
33
34void vdec_msg_queue_init_ctx(struct vdec_msg_queue_ctx *ctx, int hardware_index)
35{
36 init_waitqueue_head(&ctx->ready_to_use);
37 INIT_LIST_HEAD(&ctx->ready_queue);
38 spin_lock_init(&ctx->ready_lock);
39 ctx->ready_num = 0;
40 ctx->hardware_index = hardware_index;
41}
42
43static struct list_head *vdec_get_buf_list(int hardware_index, struct vdec_lat_buf *buf)
44{
45 switch (hardware_index) {
46 case MTK_VDEC_CORE:
47 return &buf->core_list;
48 case MTK_VDEC_LAT0:
49 return &buf->lat_list;
50 default:
51 return NULL;
52 }
53}
54
55static void vdec_msg_queue_inc(struct vdec_msg_queue *msg_queue, int hardware_index)
56{
57 if (hardware_index == MTK_VDEC_CORE)
58 atomic_inc(&msg_queue->core_list_cnt);
59 else
60 atomic_inc(&msg_queue->lat_list_cnt);
61}
62
63static void vdec_msg_queue_dec(struct vdec_msg_queue *msg_queue, int hardware_index)
64{
65 if (hardware_index == MTK_VDEC_CORE)
66 atomic_dec(&msg_queue->core_list_cnt);
67 else
68 atomic_dec(&msg_queue->lat_list_cnt);
69}
70
71int vdec_msg_queue_qbuf(struct vdec_msg_queue_ctx *msg_ctx, struct vdec_lat_buf *buf)
72{
73 struct list_head *head;
74 int status;
75
76 head = vdec_get_buf_list(msg_ctx->hardware_index, buf);
77 if (!head) {
78 mtk_v4l2_err("fail to qbuf: %d", msg_ctx->hardware_index);
79 return -EINVAL;
80 }
81
82 spin_lock(&msg_ctx->ready_lock);
83 list_add_tail(head, &msg_ctx->ready_queue);
84 msg_ctx->ready_num++;
85
86 vdec_msg_queue_inc(&buf->ctx->msg_queue, msg_ctx->hardware_index);
87 if (msg_ctx->hardware_index != MTK_VDEC_CORE) {
88 wake_up_all(&msg_ctx->ready_to_use);
89 } else {
90 if (buf->ctx->msg_queue.core_work_cnt <
91 atomic_read(&buf->ctx->msg_queue.core_list_cnt)) {
92 status = queue_work(buf->ctx->dev->core_workqueue,
93 &buf->ctx->msg_queue.core_work);
94 if (status)
95 buf->ctx->msg_queue.core_work_cnt++;
96 }
97 }
98
99 mtk_v4l2_debug(3, "enqueue buf type: %d addr: 0x%p num: %d",
100 msg_ctx->hardware_index, buf, msg_ctx->ready_num);
101 spin_unlock(&msg_ctx->ready_lock);
102
103 return 0;
104}
105
106static bool vdec_msg_queue_wait_event(struct vdec_msg_queue_ctx *msg_ctx)
107{
108 int ret;
109
110 ret = wait_event_timeout(msg_ctx->ready_to_use,
111 !list_empty(&msg_ctx->ready_queue),
112 msecs_to_jiffies(VDEC_MSG_QUEUE_TIMEOUT_MS));
113 if (!ret)
114 return false;
115
116 return true;
117}
118
119struct vdec_lat_buf *vdec_msg_queue_dqbuf(struct vdec_msg_queue_ctx *msg_ctx)
120{
121 struct vdec_lat_buf *buf;
122 struct list_head *head;
123 int ret;
124
125 spin_lock(&msg_ctx->ready_lock);
126 if (list_empty(&msg_ctx->ready_queue)) {
127 mtk_v4l2_debug(3, "queue is NULL, type:%d num: %d",
128 msg_ctx->hardware_index, msg_ctx->ready_num);
129 spin_unlock(&msg_ctx->ready_lock);
130
131 if (msg_ctx->hardware_index == MTK_VDEC_CORE)
132 return NULL;
133
134 ret = vdec_msg_queue_wait_event(msg_ctx);
135 if (!ret)
136 return NULL;
137 spin_lock(&msg_ctx->ready_lock);
138 }
139
140 if (msg_ctx->hardware_index == MTK_VDEC_CORE)
141 buf = list_first_entry(&msg_ctx->ready_queue,
142 struct vdec_lat_buf, core_list);
143 else
144 buf = list_first_entry(&msg_ctx->ready_queue,
145 struct vdec_lat_buf, lat_list);
146
147 head = vdec_get_buf_list(msg_ctx->hardware_index, buf);
148 if (!head) {
149 spin_unlock(&msg_ctx->ready_lock);
150 mtk_v4l2_err("fail to dqbuf: %d", msg_ctx->hardware_index);
151 return NULL;
152 }
153 list_del(head);
154 vdec_msg_queue_dec(&buf->ctx->msg_queue, msg_ctx->hardware_index);
155
156 msg_ctx->ready_num--;
157 mtk_v4l2_debug(3, "dqueue buf type:%d addr: 0x%p num: %d",
158 msg_ctx->hardware_index, buf, msg_ctx->ready_num);
159 spin_unlock(&msg_ctx->ready_lock);
160
161 return buf;
162}
163
164void vdec_msg_queue_update_ube_rptr(struct vdec_msg_queue *msg_queue, uint64_t ube_rptr)
165{
166 spin_lock(&msg_queue->lat_ctx.ready_lock);
167 msg_queue->wdma_rptr_addr = ube_rptr;
168 mtk_v4l2_debug(3, "update ube rprt (0x%llx)", ube_rptr);
169 spin_unlock(&msg_queue->lat_ctx.ready_lock);
170}
171
172void vdec_msg_queue_update_ube_wptr(struct vdec_msg_queue *msg_queue, uint64_t ube_wptr)
173{
174 spin_lock(&msg_queue->lat_ctx.ready_lock);
175 msg_queue->wdma_wptr_addr = ube_wptr;
176 mtk_v4l2_debug(3, "update ube wprt: (0x%llx 0x%llx) offset: 0x%llx",
177 msg_queue->wdma_rptr_addr, msg_queue->wdma_wptr_addr,
178 ube_wptr);
179 spin_unlock(&msg_queue->lat_ctx.ready_lock);
180}
181
182bool vdec_msg_queue_wait_lat_buf_full(struct vdec_msg_queue *msg_queue)
183{
184 struct vdec_lat_buf *buf, *tmp;
185 struct list_head *list_core[3];
186 struct vdec_msg_queue_ctx *core_ctx;
187 int ret, i, in_core_count = 0, count = 0;
188 long timeout_jiff;
189
190 core_ctx = &msg_queue->ctx->dev->msg_queue_core_ctx;
191 spin_lock(&core_ctx->ready_lock);
192 list_for_each_entry_safe(buf, tmp, &core_ctx->ready_queue, core_list) {
193 if (buf && buf->ctx == msg_queue->ctx) {
194 list_core[in_core_count++] = &buf->core_list;
195 list_del(&buf->core_list);
196 }
197 }
198
199 for (i = 0; i < in_core_count; i++) {
200 list_add(list_core[in_core_count - (1 + i)], &core_ctx->ready_queue);
201 queue_work(msg_queue->ctx->dev->core_workqueue, &msg_queue->core_work);
202 }
203 spin_unlock(&core_ctx->ready_lock);
204
205 timeout_jiff = msecs_to_jiffies(1000 * (NUM_BUFFER_COUNT + 2));
206 ret = wait_event_timeout(msg_queue->ctx->msg_queue.core_dec_done,
207 msg_queue->lat_ctx.ready_num == NUM_BUFFER_COUNT,
208 timeout_jiff);
209 if (ret) {
210 mtk_v4l2_debug(3, "success to get lat buf: %d",
211 msg_queue->lat_ctx.ready_num);
212 return true;
213 }
214
215 spin_lock(&core_ctx->ready_lock);
216 list_for_each_entry_safe(buf, tmp, &core_ctx->ready_queue, core_list) {
217 if (buf && buf->ctx == msg_queue->ctx) {
218 count++;
219 list_del(&buf->core_list);
220 }
221 }
222 spin_unlock(&core_ctx->ready_lock);
223
224 mtk_v4l2_err("failed with lat buf isn't full: list(%d %d) count:%d",
225 atomic_read(&msg_queue->lat_list_cnt),
226 atomic_read(&msg_queue->core_list_cnt), count);
227
228 return false;
229}
230
231void vdec_msg_queue_deinit(struct vdec_msg_queue *msg_queue,
232 struct mtk_vcodec_ctx *ctx)
233{
234 struct vdec_lat_buf *lat_buf;
235 struct mtk_vcodec_mem *mem;
236 int i;
237
238 mem = &msg_queue->wdma_addr;
239 if (mem->va)
240 mtk_vcodec_mem_free(ctx, mem);
241 for (i = 0; i < NUM_BUFFER_COUNT; i++) {
242 lat_buf = &msg_queue->lat_buf[i];
243
244 mem = &lat_buf->wdma_err_addr;
245 if (mem->va)
246 mtk_vcodec_mem_free(ctx, mem);
247
248 mem = &lat_buf->slice_bc_addr;
249 if (mem->va)
250 mtk_vcodec_mem_free(ctx, mem);
251
252 kfree(lat_buf->private_data);
253 }
254}
255
256static void vdec_msg_queue_core_work(struct work_struct *work)
257{
258 struct vdec_msg_queue *msg_queue =
259 container_of(work, struct vdec_msg_queue, core_work);
260 struct mtk_vcodec_ctx *ctx =
261 container_of(msg_queue, struct mtk_vcodec_ctx, msg_queue);
262 struct mtk_vcodec_dev *dev = ctx->dev;
263 struct vdec_lat_buf *lat_buf;
264 int status;
265
266 lat_buf = vdec_msg_queue_dqbuf(&dev->msg_queue_core_ctx);
267 if (!lat_buf)
268 return;
269
270 ctx = lat_buf->ctx;
271 mtk_vcodec_dec_enable_hardware(ctx, MTK_VDEC_CORE);
272 mtk_vcodec_set_curr_ctx(dev, ctx, MTK_VDEC_CORE);
273
274 lat_buf->core_decode(lat_buf);
275
276 mtk_vcodec_set_curr_ctx(dev, NULL, MTK_VDEC_CORE);
277 mtk_vcodec_dec_disable_hardware(ctx, MTK_VDEC_CORE);
278 vdec_msg_queue_qbuf(&ctx->msg_queue.lat_ctx, lat_buf);
279
280 wake_up_all(&ctx->msg_queue.core_dec_done);
281 spin_lock(&dev->msg_queue_core_ctx.ready_lock);
282 lat_buf->ctx->msg_queue.core_work_cnt--;
283
284 if (lat_buf->ctx->msg_queue.core_work_cnt <
285 atomic_read(&lat_buf->ctx->msg_queue.core_list_cnt)) {
286 status = queue_work(lat_buf->ctx->dev->core_workqueue,
287 &lat_buf->ctx->msg_queue.core_work);
288 if (status)
289 lat_buf->ctx->msg_queue.core_work_cnt++;
290 }
291 spin_unlock(&dev->msg_queue_core_ctx.ready_lock);
292}
293
294int vdec_msg_queue_init(struct vdec_msg_queue *msg_queue,
295 struct mtk_vcodec_ctx *ctx, core_decode_cb_t core_decode,
296 int private_size)
297{
298 struct vdec_lat_buf *lat_buf;
299 int i, err;
300
301 /* already init msg queue */
302 if (msg_queue->wdma_addr.size)
303 return 0;
304
305 msg_queue->ctx = ctx;
306 msg_queue->core_work_cnt = 0;
307 vdec_msg_queue_init_ctx(&msg_queue->lat_ctx, MTK_VDEC_LAT0);
308 INIT_WORK(&msg_queue->core_work, vdec_msg_queue_core_work);
309
310 atomic_set(&msg_queue->lat_list_cnt, 0);
311 atomic_set(&msg_queue->core_list_cnt, 0);
312 init_waitqueue_head(&msg_queue->core_dec_done);
313
314 msg_queue->wdma_addr.size =
315 vde_msg_queue_get_trans_size(ctx->picinfo.buf_w,
316 ctx->picinfo.buf_h);
317 err = mtk_vcodec_mem_alloc(ctx, &msg_queue->wdma_addr);
318 if (err) {
319 mtk_v4l2_err("failed to allocate wdma_addr buf");
320 return -ENOMEM;
321 }
322 msg_queue->wdma_rptr_addr = msg_queue->wdma_addr.dma_addr;
323 msg_queue->wdma_wptr_addr = msg_queue->wdma_addr.dma_addr;
324
325 for (i = 0; i < NUM_BUFFER_COUNT; i++) {
326 lat_buf = &msg_queue->lat_buf[i];
327
328 lat_buf->wdma_err_addr.size = VDEC_ERR_MAP_SZ_AVC;
329 err = mtk_vcodec_mem_alloc(ctx, &lat_buf->wdma_err_addr);
330 if (err) {
331 mtk_v4l2_err("failed to allocate wdma_err_addr buf[%d]", i);
332 goto mem_alloc_err;
333 }
334
335 lat_buf->slice_bc_addr.size = VDEC_LAT_SLICE_HEADER_SZ;
336 err = mtk_vcodec_mem_alloc(ctx, &lat_buf->slice_bc_addr);
337 if (err) {
338 mtk_v4l2_err("failed to allocate wdma_addr buf[%d]", i);
339 goto mem_alloc_err;
340 }
341
342 lat_buf->private_data = kzalloc(private_size, GFP_KERNEL);
343 if (!lat_buf->private_data) {
344 err = -ENOMEM;
345 goto mem_alloc_err;
346 }
347
348 lat_buf->ctx = ctx;
349 lat_buf->core_decode = core_decode;
350 err = vdec_msg_queue_qbuf(&msg_queue->lat_ctx, lat_buf);
351 if (err) {
352 mtk_v4l2_err("failed to qbuf buf[%d]", i);
353 goto mem_alloc_err;
354 }
355 }
356 return 0;
357
358mem_alloc_err:
359 vdec_msg_queue_deinit(msg_queue, ctx);
360 return err;
361}