Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 * Authors: Christian König <christian.koenig@amd.com>
26 */
27
28#include <linux/firmware.h>
29#include <linux/module.h>
30
31#include <drm/drm.h>
32#include <drm/drm_drv.h>
33
34#include "amdgpu.h"
35#include "amdgpu_pm.h"
36#include "amdgpu_vce.h"
37#include "amdgpu_cs.h"
38#include "cikd.h"
39
40/* 1 second timeout */
41#define VCE_IDLE_TIMEOUT msecs_to_jiffies(1000)
42
43/* Firmware Names */
44#ifdef CONFIG_DRM_AMDGPU_SI
45#define FIRMWARE_VCE_V1_0 "amdgpu/vce_1_0_0.bin"
46#endif
47#ifdef CONFIG_DRM_AMDGPU_CIK
48#define FIRMWARE_BONAIRE "amdgpu/bonaire_vce.bin"
49#define FIRMWARE_KABINI "amdgpu/kabini_vce.bin"
50#define FIRMWARE_KAVERI "amdgpu/kaveri_vce.bin"
51#define FIRMWARE_HAWAII "amdgpu/hawaii_vce.bin"
52#define FIRMWARE_MULLINS "amdgpu/mullins_vce.bin"
53#endif
54#define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
55#define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
56#define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
57#define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
58#define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin"
59#define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
60#define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin"
61#define FIRMWARE_VEGAM "amdgpu/vegam_vce.bin"
62
63#define FIRMWARE_VEGA10 "amdgpu/vega10_vce.bin"
64#define FIRMWARE_VEGA12 "amdgpu/vega12_vce.bin"
65#define FIRMWARE_VEGA20 "amdgpu/vega20_vce.bin"
66
67#ifdef CONFIG_DRM_AMDGPU_SI
68MODULE_FIRMWARE(FIRMWARE_VCE_V1_0);
69#endif
70#ifdef CONFIG_DRM_AMDGPU_CIK
71MODULE_FIRMWARE(FIRMWARE_BONAIRE);
72MODULE_FIRMWARE(FIRMWARE_KABINI);
73MODULE_FIRMWARE(FIRMWARE_KAVERI);
74MODULE_FIRMWARE(FIRMWARE_HAWAII);
75MODULE_FIRMWARE(FIRMWARE_MULLINS);
76#endif
77MODULE_FIRMWARE(FIRMWARE_TONGA);
78MODULE_FIRMWARE(FIRMWARE_CARRIZO);
79MODULE_FIRMWARE(FIRMWARE_FIJI);
80MODULE_FIRMWARE(FIRMWARE_STONEY);
81MODULE_FIRMWARE(FIRMWARE_POLARIS10);
82MODULE_FIRMWARE(FIRMWARE_POLARIS11);
83MODULE_FIRMWARE(FIRMWARE_POLARIS12);
84MODULE_FIRMWARE(FIRMWARE_VEGAM);
85
86MODULE_FIRMWARE(FIRMWARE_VEGA10);
87MODULE_FIRMWARE(FIRMWARE_VEGA12);
88MODULE_FIRMWARE(FIRMWARE_VEGA20);
89
90static void amdgpu_vce_idle_work_handler(struct work_struct *work);
91static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
92 struct dma_fence **fence);
93static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
94 bool direct, struct dma_fence **fence);
95
96/**
97 * amdgpu_vce_firmware_name() - determine the firmware file name for VCE
98 *
99 * @adev: amdgpu_device pointer
100 *
101 * Each chip that has VCE IP may need a different firmware.
102 * This function returns the name of the VCE firmware file
103 * appropriate for the current chip.
104 */
105static const char *amdgpu_vce_firmware_name(struct amdgpu_device *adev)
106{
107 switch (adev->asic_type) {
108#ifdef CONFIG_DRM_AMDGPU_SI
109 case CHIP_PITCAIRN:
110 case CHIP_TAHITI:
111 case CHIP_VERDE:
112 return FIRMWARE_VCE_V1_0;
113#endif
114#ifdef CONFIG_DRM_AMDGPU_CIK
115 case CHIP_BONAIRE:
116 return FIRMWARE_BONAIRE;
117 case CHIP_KAVERI:
118 return FIRMWARE_KAVERI;
119 case CHIP_KABINI:
120 return FIRMWARE_KABINI;
121 case CHIP_HAWAII:
122 return FIRMWARE_HAWAII;
123 case CHIP_MULLINS:
124 return FIRMWARE_MULLINS;
125#endif
126 case CHIP_TONGA:
127 return FIRMWARE_TONGA;
128 case CHIP_CARRIZO:
129 return FIRMWARE_CARRIZO;
130 case CHIP_FIJI:
131 return FIRMWARE_FIJI;
132 case CHIP_STONEY:
133 return FIRMWARE_STONEY;
134 case CHIP_POLARIS10:
135 return FIRMWARE_POLARIS10;
136 case CHIP_POLARIS11:
137 return FIRMWARE_POLARIS11;
138 case CHIP_POLARIS12:
139 return FIRMWARE_POLARIS12;
140 case CHIP_VEGAM:
141 return FIRMWARE_VEGAM;
142 case CHIP_VEGA10:
143 return FIRMWARE_VEGA10;
144 case CHIP_VEGA12:
145 return FIRMWARE_VEGA12;
146 case CHIP_VEGA20:
147 return FIRMWARE_VEGA20;
148
149 default:
150 return NULL;
151 }
152}
153
154/**
155 * amdgpu_vce_early_init() - try to load VCE firmware
156 *
157 * @adev: amdgpu_device pointer
158 *
159 * Tries to load the VCE firmware.
160 *
161 * When not found, returns ENOENT so that the driver can
162 * still load and initialize the rest of the IP blocks.
163 * The GPU can function just fine without VCE, they will just
164 * not support video encoding.
165 */
166int amdgpu_vce_early_init(struct amdgpu_device *adev)
167{
168 const char *fw_name = amdgpu_vce_firmware_name(adev);
169 const struct common_firmware_header *hdr;
170 unsigned int ucode_version, version_major, version_minor, binary_id;
171 int r;
172
173 if (!fw_name)
174 return -ENOENT;
175
176 r = amdgpu_ucode_request(adev, &adev->vce.fw, AMDGPU_UCODE_REQUIRED, "%s", fw_name);
177 if (r) {
178 dev_err(adev->dev,
179 "amdgpu_vce: Firmware \"%s\" not found or failed to validate (%d)\n",
180 fw_name, r);
181
182 amdgpu_ucode_release(&adev->vce.fw);
183 return -ENOENT;
184 }
185
186 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
187
188 ucode_version = le32_to_cpu(hdr->ucode_version);
189 version_major = (ucode_version >> 20) & 0xfff;
190 version_minor = (ucode_version >> 8) & 0xfff;
191 binary_id = ucode_version & 0xff;
192 dev_info(adev->dev, "Found VCE firmware Version: %d.%d Binary ID: %d\n",
193 version_major, version_minor, binary_id);
194 adev->vce.fw_version = ((version_major << 24) | (version_minor << 16) |
195 (binary_id << 8));
196
197 return 0;
198}
199
200/**
201 * amdgpu_vce_sw_init() - allocate memory for VCE BO
202 *
203 * @adev: amdgpu_device pointer
204 * @size: size for the new BO
205 *
206 * First step to get VCE online: allocate memory for VCE BO.
207 * The VCE firmware binary is copied into the VCE BO later,
208 * in amdgpu_vce_resume. The VCE executes its code from the
209 * VCE BO and also uses the space in this BO for its stack and data.
210 *
211 * Ideally this BO should be placed in VRAM for optimal performance,
212 * although technically it also runs from system RAM (albeit slowly).
213 */
214int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
215{
216 int i, r;
217
218 if (!adev->vce.fw)
219 return -ENOENT;
220
221 r = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE,
222 AMDGPU_GEM_DOMAIN_VRAM |
223 AMDGPU_GEM_DOMAIN_GTT,
224 &adev->vce.vcpu_bo,
225 &adev->vce.gpu_addr, &adev->vce.cpu_addr);
226 if (r) {
227 dev_err(adev->dev, "(%d) failed to allocate VCE bo\n", r);
228 return r;
229 }
230
231 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
232 atomic_set(&adev->vce.handles[i], 0);
233 adev->vce.filp[i] = NULL;
234 }
235
236 INIT_DELAYED_WORK(&adev->vce.idle_work, amdgpu_vce_idle_work_handler);
237 mutex_init(&adev->vce.idle_mutex);
238
239 return 0;
240}
241
242/**
243 * amdgpu_vce_sw_fini - free memory
244 *
245 * @adev: amdgpu_device pointer
246 *
247 * Last step on VCE teardown, free firmware memory
248 */
249int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
250{
251 unsigned int i;
252
253 if (adev->vce.vcpu_bo == NULL)
254 return 0;
255
256 drm_sched_entity_destroy(&adev->vce.entity);
257
258 for (i = 0; i < adev->vce.num_rings; i++)
259 amdgpu_ring_fini(&adev->vce.ring[i]);
260
261 amdgpu_ucode_release(&adev->vce.fw);
262 mutex_destroy(&adev->vce.idle_mutex);
263
264 amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
265 (void **)&adev->vce.cpu_addr);
266
267 return 0;
268}
269
270/**
271 * amdgpu_vce_entity_init - init entity
272 *
273 * @adev: amdgpu_device pointer
274 * @ring: amdgpu_ring pointer to check
275 *
276 * Initialize the entity used for handle management in the kernel driver.
277 */
278int amdgpu_vce_entity_init(struct amdgpu_device *adev, struct amdgpu_ring *ring)
279{
280 if (ring == &adev->vce.ring[0]) {
281 struct drm_gpu_scheduler *sched = &ring->sched;
282 int r;
283
284 r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL,
285 &sched, 1, NULL);
286 if (r != 0) {
287 DRM_ERROR("Failed setting up VCE run queue.\n");
288 return r;
289 }
290 }
291
292 return 0;
293}
294
295/**
296 * amdgpu_vce_suspend - unpin VCE fw memory
297 *
298 * @adev: amdgpu_device pointer
299 *
300 */
301int amdgpu_vce_suspend(struct amdgpu_device *adev)
302{
303 int i;
304
305 cancel_delayed_work_sync(&adev->vce.idle_work);
306
307 if (adev->vce.vcpu_bo == NULL)
308 return 0;
309
310 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
311 if (atomic_read(&adev->vce.handles[i]))
312 break;
313
314 if (i == AMDGPU_MAX_VCE_HANDLES)
315 return 0;
316
317 /* TODO: suspending running encoding sessions isn't supported */
318 return -EINVAL;
319}
320
321/**
322 * amdgpu_vce_resume - pin VCE fw memory
323 *
324 * @adev: amdgpu_device pointer
325 *
326 */
327int amdgpu_vce_resume(struct amdgpu_device *adev)
328{
329 const struct common_firmware_header *hdr;
330 unsigned int offset;
331 int idx;
332
333 if (adev->vce.vcpu_bo == NULL)
334 return -EINVAL;
335
336 hdr = (const struct common_firmware_header *)adev->vce.fw->data;
337 offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
338
339 if (drm_dev_enter(adev_to_drm(adev), &idx)) {
340 memset_io(adev->vce.cpu_addr, 0, amdgpu_bo_size(adev->vce.vcpu_bo));
341 memcpy_toio(adev->vce.cpu_addr, adev->vce.fw->data + offset,
342 adev->vce.fw->size - offset);
343 drm_dev_exit(idx);
344 }
345
346 return 0;
347}
348
349/**
350 * amdgpu_vce_idle_work_handler - power off VCE
351 *
352 * @work: pointer to work structure
353 *
354 * power of VCE when it's not used any more
355 */
356static void amdgpu_vce_idle_work_handler(struct work_struct *work)
357{
358 struct amdgpu_device *adev =
359 container_of(work, struct amdgpu_device, vce.idle_work.work);
360 unsigned int i, count = 0;
361
362 for (i = 0; i < adev->vce.num_rings; i++)
363 count += amdgpu_fence_count_emitted(&adev->vce.ring[i]);
364
365 if (count == 0) {
366 if (adev->pm.dpm_enabled) {
367 amdgpu_dpm_enable_vce(adev, false);
368 } else {
369 amdgpu_asic_set_vce_clocks(adev, 0, 0);
370 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
371 AMD_PG_STATE_GATE);
372 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
373 AMD_CG_STATE_GATE);
374 }
375 } else {
376 schedule_delayed_work(&adev->vce.idle_work, VCE_IDLE_TIMEOUT);
377 }
378}
379
380/**
381 * amdgpu_vce_ring_begin_use - power up VCE
382 *
383 * @ring: amdgpu ring
384 *
385 * Make sure VCE is powerd up when we want to use it
386 */
387void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring)
388{
389 struct amdgpu_device *adev = ring->adev;
390 bool set_clocks;
391
392 if (amdgpu_sriov_vf(adev))
393 return;
394
395 mutex_lock(&adev->vce.idle_mutex);
396 set_clocks = !cancel_delayed_work_sync(&adev->vce.idle_work);
397 if (set_clocks) {
398 if (adev->pm.dpm_enabled) {
399 amdgpu_dpm_enable_vce(adev, true);
400 } else {
401 amdgpu_asic_set_vce_clocks(adev, 53300, 40000);
402 amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
403 AMD_CG_STATE_UNGATE);
404 amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
405 AMD_PG_STATE_UNGATE);
406
407 }
408 }
409 mutex_unlock(&adev->vce.idle_mutex);
410}
411
412/**
413 * amdgpu_vce_ring_end_use - power VCE down
414 *
415 * @ring: amdgpu ring
416 *
417 * Schedule work to power VCE down again
418 */
419void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring)
420{
421 if (!amdgpu_sriov_vf(ring->adev))
422 schedule_delayed_work(&ring->adev->vce.idle_work, VCE_IDLE_TIMEOUT);
423}
424
425/**
426 * amdgpu_vce_free_handles - free still open VCE handles
427 *
428 * @adev: amdgpu_device pointer
429 * @filp: drm file pointer
430 *
431 * Close all VCE handles still open by this file pointer
432 */
433void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
434{
435 struct amdgpu_ring *ring = &adev->vce.ring[0];
436 int i, r;
437
438 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
439 uint32_t handle = atomic_read(&adev->vce.handles[i]);
440
441 if (!handle || adev->vce.filp[i] != filp)
442 continue;
443
444 r = amdgpu_vce_get_destroy_msg(ring, handle, false, NULL);
445 if (r)
446 DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
447
448 adev->vce.filp[i] = NULL;
449 atomic_set(&adev->vce.handles[i], 0);
450 }
451}
452
453/**
454 * amdgpu_vce_required_gart_pages() - gets number of GART pages required by VCE
455 *
456 * @adev: amdgpu_device pointer
457 *
458 * Returns how many GART pages we need before GTT for the VCE IP block.
459 * For VCE1, see vce_v1_0_ensure_vcpu_bo_32bit_addr for details.
460 * For VCE2+, this is not needed so return zero.
461 */
462u32 amdgpu_vce_required_gart_pages(struct amdgpu_device *adev)
463{
464 /* VCE IP block not added yet, so can't use amdgpu_ip_version */
465 if (adev->family == AMDGPU_FAMILY_SI)
466 return 512;
467
468 return 0;
469}
470
471/**
472 * amdgpu_vce_get_create_msg - generate a VCE create msg
473 *
474 * @ring: ring we should submit the msg to
475 * @handle: VCE session handle to use
476 * @fence: optional fence to return
477 *
478 * Open up a stream for HW test
479 */
480static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
481 struct dma_fence **fence)
482{
483 const unsigned int ib_size_dw = 1024;
484 struct amdgpu_job *job;
485 struct amdgpu_ib *ib;
486 struct amdgpu_ib ib_msg;
487 struct dma_fence *f = NULL;
488 uint64_t addr;
489 int i, r;
490
491 r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
492 AMDGPU_FENCE_OWNER_UNDEFINED,
493 ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
494 &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
495 if (r)
496 return r;
497
498 memset(&ib_msg, 0, sizeof(ib_msg));
499 /* only one gpu page is needed, alloc +1 page to make addr aligned. */
500 r = amdgpu_ib_get(ring->adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
501 AMDGPU_IB_POOL_DIRECT,
502 &ib_msg);
503 if (r)
504 goto err;
505
506 ib = &job->ibs[0];
507 /* let addr point to page boundary */
508 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg.gpu_addr);
509
510 /* stitch together an VCE create msg */
511 ib->length_dw = 0;
512 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
513 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
514 ib->ptr[ib->length_dw++] = handle;
515
516 if ((ring->adev->vce.fw_version >> 24) >= 52)
517 ib->ptr[ib->length_dw++] = 0x00000040; /* len */
518 else
519 ib->ptr[ib->length_dw++] = 0x00000030; /* len */
520 ib->ptr[ib->length_dw++] = 0x01000001; /* create cmd */
521 ib->ptr[ib->length_dw++] = 0x00000000;
522 ib->ptr[ib->length_dw++] = 0x00000042;
523 ib->ptr[ib->length_dw++] = 0x0000000a;
524 ib->ptr[ib->length_dw++] = 0x00000001;
525 ib->ptr[ib->length_dw++] = 0x00000080;
526 ib->ptr[ib->length_dw++] = 0x00000060;
527 ib->ptr[ib->length_dw++] = 0x00000100;
528 ib->ptr[ib->length_dw++] = 0x00000100;
529 ib->ptr[ib->length_dw++] = 0x0000000c;
530 ib->ptr[ib->length_dw++] = 0x00000000;
531 if ((ring->adev->vce.fw_version >> 24) >= 52) {
532 ib->ptr[ib->length_dw++] = 0x00000000;
533 ib->ptr[ib->length_dw++] = 0x00000000;
534 ib->ptr[ib->length_dw++] = 0x00000000;
535 ib->ptr[ib->length_dw++] = 0x00000000;
536 }
537
538 ib->ptr[ib->length_dw++] = 0x00000014; /* len */
539 ib->ptr[ib->length_dw++] = 0x05000005; /* feedback buffer */
540 ib->ptr[ib->length_dw++] = upper_32_bits(addr);
541 ib->ptr[ib->length_dw++] = addr;
542 ib->ptr[ib->length_dw++] = 0x00000001;
543
544 for (i = ib->length_dw; i < ib_size_dw; ++i)
545 ib->ptr[i] = 0x0;
546
547 r = amdgpu_job_submit_direct(job, ring, &f);
548 amdgpu_ib_free(&ib_msg, f);
549 if (r)
550 goto err;
551
552 if (fence)
553 *fence = dma_fence_get(f);
554 dma_fence_put(f);
555 return 0;
556
557err:
558 amdgpu_job_free(job);
559 return r;
560}
561
562/**
563 * amdgpu_vce_get_destroy_msg - generate a VCE destroy msg
564 *
565 * @ring: ring we should submit the msg to
566 * @handle: VCE session handle to use
567 * @direct: direct or delayed pool
568 * @fence: optional fence to return
569 *
570 * Close up a stream for HW test or if userspace failed to do so
571 */
572static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
573 bool direct, struct dma_fence **fence)
574{
575 const unsigned int ib_size_dw = 1024;
576 struct amdgpu_job *job;
577 struct amdgpu_ib *ib;
578 struct dma_fence *f = NULL;
579 int i, r;
580
581 r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
582 AMDGPU_FENCE_OWNER_UNDEFINED,
583 ib_size_dw * 4,
584 direct ? AMDGPU_IB_POOL_DIRECT :
585 AMDGPU_IB_POOL_DELAYED, &job,
586 AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST);
587 if (r)
588 return r;
589
590 ib = &job->ibs[0];
591
592 /* stitch together an VCE destroy msg */
593 ib->length_dw = 0;
594 ib->ptr[ib->length_dw++] = 0x0000000c; /* len */
595 ib->ptr[ib->length_dw++] = 0x00000001; /* session cmd */
596 ib->ptr[ib->length_dw++] = handle;
597
598 ib->ptr[ib->length_dw++] = 0x00000020; /* len */
599 ib->ptr[ib->length_dw++] = 0x00000002; /* task info */
600 ib->ptr[ib->length_dw++] = 0xffffffff; /* next task info, set to 0xffffffff if no */
601 ib->ptr[ib->length_dw++] = 0x00000001; /* destroy session */
602 ib->ptr[ib->length_dw++] = 0x00000000;
603 ib->ptr[ib->length_dw++] = 0x00000000;
604 ib->ptr[ib->length_dw++] = 0xffffffff; /* feedback is not needed, set to 0xffffffff and firmware will not output feedback */
605 ib->ptr[ib->length_dw++] = 0x00000000;
606
607 ib->ptr[ib->length_dw++] = 0x00000008; /* len */
608 ib->ptr[ib->length_dw++] = 0x02000001; /* destroy cmd */
609
610 for (i = ib->length_dw; i < ib_size_dw; ++i)
611 ib->ptr[i] = 0x0;
612
613 if (direct)
614 r = amdgpu_job_submit_direct(job, ring, &f);
615 else
616 f = amdgpu_job_submit(job);
617 if (r)
618 goto err;
619
620 if (fence)
621 *fence = dma_fence_get(f);
622 dma_fence_put(f);
623 return 0;
624
625err:
626 amdgpu_job_free(job);
627 return r;
628}
629
630/**
631 * amdgpu_vce_validate_bo - make sure not to cross 4GB boundary
632 *
633 * @p: cs parser
634 * @ib: indirect buffer to use
635 * @lo: address of lower dword
636 * @hi: address of higher dword
637 * @size: minimum size
638 * @index: bs/fb index
639 *
640 * Make sure that no BO cross a 4GB boundary.
641 */
642static int amdgpu_vce_validate_bo(struct amdgpu_cs_parser *p,
643 struct amdgpu_ib *ib, int lo, int hi,
644 unsigned int size, int32_t index)
645{
646 int64_t offset = ((uint64_t)size) * ((int64_t)index);
647 struct ttm_operation_ctx ctx = { false, false };
648 struct amdgpu_bo_va_mapping *mapping;
649 unsigned int i, fpfn, lpfn;
650 struct amdgpu_bo *bo;
651 uint64_t addr;
652 int r;
653
654 addr = ((uint64_t)amdgpu_ib_get_value(ib, lo)) |
655 ((uint64_t)amdgpu_ib_get_value(ib, hi)) << 32;
656 if (index >= 0) {
657 addr += offset;
658 fpfn = PAGE_ALIGN(offset) >> PAGE_SHIFT;
659 lpfn = 0x100000000ULL >> PAGE_SHIFT;
660 } else {
661 fpfn = 0;
662 lpfn = (0x100000000ULL - PAGE_ALIGN(offset)) >> PAGE_SHIFT;
663 }
664
665 r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
666 if (r) {
667 DRM_ERROR("Can't find BO for addr 0x%010llx %d %d %d %d\n",
668 addr, lo, hi, size, index);
669 return r;
670 }
671
672 for (i = 0; i < bo->placement.num_placement; ++i) {
673 bo->placements[i].fpfn = max(bo->placements[i].fpfn, fpfn);
674 bo->placements[i].lpfn = bo->placements[i].lpfn ?
675 min(bo->placements[i].lpfn, lpfn) : lpfn;
676 }
677 return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
678}
679
680
681/**
682 * amdgpu_vce_cs_reloc - command submission relocation
683 *
684 * @p: parser context
685 * @ib: indirect buffer to use
686 * @lo: address of lower dword
687 * @hi: address of higher dword
688 * @size: minimum size
689 * @index: bs/fb index
690 *
691 * Patch relocation inside command stream with real buffer address
692 */
693static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, struct amdgpu_ib *ib,
694 int lo, int hi, unsigned int size, uint32_t index)
695{
696 struct amdgpu_bo_va_mapping *mapping;
697 struct amdgpu_bo *bo;
698 uint64_t addr;
699 int r;
700
701 if (index == 0xffffffff)
702 index = 0;
703
704 addr = ((uint64_t)amdgpu_ib_get_value(ib, lo)) |
705 ((uint64_t)amdgpu_ib_get_value(ib, hi)) << 32;
706 addr += ((uint64_t)size) * ((uint64_t)index);
707
708 r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
709 if (r) {
710 DRM_ERROR("Can't find BO for addr 0x%010llx %d %d %d %d\n",
711 addr, lo, hi, size, index);
712 return r;
713 }
714
715 if ((addr + (uint64_t)size) >
716 (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) {
717 DRM_ERROR("BO too small for addr 0x%010llx %d %d\n",
718 addr, lo, hi);
719 return -EINVAL;
720 }
721
722 addr -= mapping->start * AMDGPU_GPU_PAGE_SIZE;
723 addr += amdgpu_bo_gpu_offset(bo);
724 addr -= ((uint64_t)size) * ((uint64_t)index);
725
726 amdgpu_ib_set_value(ib, lo, lower_32_bits(addr));
727 amdgpu_ib_set_value(ib, hi, upper_32_bits(addr));
728
729 return 0;
730}
731
732/**
733 * amdgpu_vce_validate_handle - validate stream handle
734 *
735 * @p: parser context
736 * @handle: handle to validate
737 * @allocated: allocated a new handle?
738 *
739 * Validates the handle and return the found session index or -EINVAL
740 * we don't have another free session index.
741 */
742static int amdgpu_vce_validate_handle(struct amdgpu_cs_parser *p,
743 uint32_t handle, uint32_t *allocated)
744{
745 unsigned int i;
746
747 /* validate the handle */
748 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
749 if (atomic_read(&p->adev->vce.handles[i]) == handle) {
750 if (p->adev->vce.filp[i] != p->filp) {
751 DRM_ERROR("VCE handle collision detected!\n");
752 return -EINVAL;
753 }
754 return i;
755 }
756 }
757
758 /* handle not found try to alloc a new one */
759 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
760 if (!atomic_cmpxchg(&p->adev->vce.handles[i], 0, handle)) {
761 p->adev->vce.filp[i] = p->filp;
762 p->adev->vce.img_size[i] = 0;
763 *allocated |= 1 << i;
764 return i;
765 }
766 }
767
768 DRM_ERROR("No more free VCE handles!\n");
769 return -EINVAL;
770}
771
772/**
773 * amdgpu_vce_ring_parse_cs - parse and validate the command stream
774 *
775 * @p: parser context
776 * @job: the job to parse
777 * @ib: the IB to patch
778 */
779int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p,
780 struct amdgpu_job *job,
781 struct amdgpu_ib *ib)
782{
783 unsigned int fb_idx = 0, bs_idx = 0;
784 int session_idx = -1;
785 uint32_t destroyed = 0;
786 uint32_t created = 0;
787 uint32_t allocated = 0;
788 uint32_t tmp, handle = 0;
789 uint32_t dummy = 0xffffffff;
790 uint32_t *size = &dummy;
791 unsigned int idx;
792 int i, r = 0;
793
794 job->vm = NULL;
795
796 for (idx = 0; idx < ib->length_dw;) {
797 uint32_t len = amdgpu_ib_get_value(ib, idx);
798 uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
799
800 if ((len < 8) || (len & 3)) {
801 DRM_ERROR("invalid VCE command length (%d)!\n", len);
802 r = -EINVAL;
803 goto out;
804 }
805
806 switch (cmd) {
807 case 0x00000002: /* task info */
808 fb_idx = amdgpu_ib_get_value(ib, idx + 6);
809 bs_idx = amdgpu_ib_get_value(ib, idx + 7);
810 break;
811
812 case 0x03000001: /* encode */
813 r = amdgpu_vce_validate_bo(p, ib, idx + 10, idx + 9,
814 0, 0);
815 if (r)
816 goto out;
817
818 r = amdgpu_vce_validate_bo(p, ib, idx + 12, idx + 11,
819 0, 0);
820 if (r)
821 goto out;
822 break;
823
824 case 0x05000001: /* context buffer */
825 r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
826 0, 0);
827 if (r)
828 goto out;
829 break;
830
831 case 0x05000004: /* video bitstream buffer */
832 tmp = amdgpu_ib_get_value(ib, idx + 4);
833 r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
834 tmp, bs_idx);
835 if (r)
836 goto out;
837 break;
838
839 case 0x05000005: /* feedback buffer */
840 r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
841 4096, fb_idx);
842 if (r)
843 goto out;
844 break;
845
846 case 0x0500000d: /* MV buffer */
847 r = amdgpu_vce_validate_bo(p, ib, idx + 3, idx + 2,
848 0, 0);
849 if (r)
850 goto out;
851
852 r = amdgpu_vce_validate_bo(p, ib, idx + 8, idx + 7,
853 0, 0);
854 if (r)
855 goto out;
856 break;
857 }
858
859 idx += len / 4;
860 }
861
862 for (idx = 0; idx < ib->length_dw;) {
863 uint32_t len = amdgpu_ib_get_value(ib, idx);
864 uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
865
866 switch (cmd) {
867 case 0x00000001: /* session */
868 handle = amdgpu_ib_get_value(ib, idx + 2);
869 session_idx = amdgpu_vce_validate_handle(p, handle,
870 &allocated);
871 if (session_idx < 0) {
872 r = session_idx;
873 goto out;
874 }
875 size = &p->adev->vce.img_size[session_idx];
876 break;
877
878 case 0x00000002: /* task info */
879 fb_idx = amdgpu_ib_get_value(ib, idx + 6);
880 bs_idx = amdgpu_ib_get_value(ib, idx + 7);
881 break;
882
883 case 0x01000001: /* create */
884 created |= 1 << session_idx;
885 if (destroyed & (1 << session_idx)) {
886 destroyed &= ~(1 << session_idx);
887 allocated |= 1 << session_idx;
888
889 } else if (!(allocated & (1 << session_idx))) {
890 DRM_ERROR("Handle already in use!\n");
891 r = -EINVAL;
892 goto out;
893 }
894
895 *size = amdgpu_ib_get_value(ib, idx + 8) *
896 amdgpu_ib_get_value(ib, idx + 10) *
897 8 * 3 / 2;
898 break;
899
900 case 0x04000001: /* config extension */
901 case 0x04000002: /* pic control */
902 case 0x04000005: /* rate control */
903 case 0x04000007: /* motion estimation */
904 case 0x04000008: /* rdo */
905 case 0x04000009: /* vui */
906 case 0x05000002: /* auxiliary buffer */
907 case 0x05000009: /* clock table */
908 break;
909
910 case 0x0500000c: /* hw config */
911 switch (p->adev->asic_type) {
912#ifdef CONFIG_DRM_AMDGPU_CIK
913 case CHIP_KAVERI:
914 case CHIP_MULLINS:
915#endif
916 case CHIP_CARRIZO:
917 break;
918 default:
919 r = -EINVAL;
920 goto out;
921 }
922 break;
923
924 case 0x03000001: /* encode */
925 r = amdgpu_vce_cs_reloc(p, ib, idx + 10, idx + 9,
926 *size, 0);
927 if (r)
928 goto out;
929
930 r = amdgpu_vce_cs_reloc(p, ib, idx + 12, idx + 11,
931 *size / 3, 0);
932 if (r)
933 goto out;
934 break;
935
936 case 0x02000001: /* destroy */
937 destroyed |= 1 << session_idx;
938 break;
939
940 case 0x05000001: /* context buffer */
941 r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
942 *size * 2, 0);
943 if (r)
944 goto out;
945 break;
946
947 case 0x05000004: /* video bitstream buffer */
948 tmp = amdgpu_ib_get_value(ib, idx + 4);
949 r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
950 tmp, bs_idx);
951 if (r)
952 goto out;
953 break;
954
955 case 0x05000005: /* feedback buffer */
956 r = amdgpu_vce_cs_reloc(p, ib, idx + 3, idx + 2,
957 4096, fb_idx);
958 if (r)
959 goto out;
960 break;
961
962 case 0x0500000d: /* MV buffer */
963 r = amdgpu_vce_cs_reloc(p, ib, idx + 3,
964 idx + 2, *size, 0);
965 if (r)
966 goto out;
967
968 r = amdgpu_vce_cs_reloc(p, ib, idx + 8,
969 idx + 7, *size / 12, 0);
970 if (r)
971 goto out;
972 break;
973
974 default:
975 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
976 r = -EINVAL;
977 goto out;
978 }
979
980 if (session_idx == -1) {
981 DRM_ERROR("no session command at start of IB\n");
982 r = -EINVAL;
983 goto out;
984 }
985
986 idx += len / 4;
987 }
988
989 if (allocated & ~created) {
990 DRM_ERROR("New session without create command!\n");
991 r = -ENOENT;
992 }
993
994out:
995 if (!r) {
996 /* No error, free all destroyed handle slots */
997 tmp = destroyed;
998 } else {
999 /* Error during parsing, free all allocated handle slots */
1000 tmp = allocated;
1001 }
1002
1003 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
1004 if (tmp & (1 << i))
1005 atomic_set(&p->adev->vce.handles[i], 0);
1006
1007 return r;
1008}
1009
1010/**
1011 * amdgpu_vce_ring_parse_cs_vm - parse the command stream in VM mode
1012 *
1013 * @p: parser context
1014 * @job: the job to parse
1015 * @ib: the IB to patch
1016 */
1017int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p,
1018 struct amdgpu_job *job,
1019 struct amdgpu_ib *ib)
1020{
1021 int session_idx = -1;
1022 uint32_t destroyed = 0;
1023 uint32_t created = 0;
1024 uint32_t allocated = 0;
1025 uint32_t tmp, handle = 0;
1026 int i, r = 0, idx = 0;
1027
1028 while (idx < ib->length_dw) {
1029 uint32_t len = amdgpu_ib_get_value(ib, idx);
1030 uint32_t cmd = amdgpu_ib_get_value(ib, idx + 1);
1031
1032 if ((len < 8) || (len & 3)) {
1033 DRM_ERROR("invalid VCE command length (%d)!\n", len);
1034 r = -EINVAL;
1035 goto out;
1036 }
1037
1038 switch (cmd) {
1039 case 0x00000001: /* session */
1040 handle = amdgpu_ib_get_value(ib, idx + 2);
1041 session_idx = amdgpu_vce_validate_handle(p, handle,
1042 &allocated);
1043 if (session_idx < 0) {
1044 r = session_idx;
1045 goto out;
1046 }
1047 break;
1048
1049 case 0x01000001: /* create */
1050 created |= 1 << session_idx;
1051 if (destroyed & (1 << session_idx)) {
1052 destroyed &= ~(1 << session_idx);
1053 allocated |= 1 << session_idx;
1054
1055 } else if (!(allocated & (1 << session_idx))) {
1056 DRM_ERROR("Handle already in use!\n");
1057 r = -EINVAL;
1058 goto out;
1059 }
1060
1061 break;
1062
1063 case 0x02000001: /* destroy */
1064 destroyed |= 1 << session_idx;
1065 break;
1066
1067 default:
1068 break;
1069 }
1070
1071 if (session_idx == -1) {
1072 DRM_ERROR("no session command at start of IB\n");
1073 r = -EINVAL;
1074 goto out;
1075 }
1076
1077 idx += len / 4;
1078 }
1079
1080 if (allocated & ~created) {
1081 DRM_ERROR("New session without create command!\n");
1082 r = -ENOENT;
1083 }
1084
1085out:
1086 if (!r) {
1087 /* No error, free all destroyed handle slots */
1088 tmp = destroyed;
1089 } else {
1090 /* Error during parsing, free all allocated handle slots */
1091 tmp = allocated;
1092 }
1093
1094 for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i)
1095 if (tmp & (1 << i))
1096 atomic_set(&p->adev->vce.handles[i], 0);
1097
1098 return r;
1099}
1100
1101/**
1102 * amdgpu_vce_ring_emit_ib - execute indirect buffer
1103 *
1104 * @ring: engine to use
1105 * @job: job to retrieve vmid from
1106 * @ib: the IB to execute
1107 * @flags: unused
1108 *
1109 */
1110void amdgpu_vce_ring_emit_ib(struct amdgpu_ring *ring,
1111 struct amdgpu_job *job,
1112 struct amdgpu_ib *ib,
1113 uint32_t flags)
1114{
1115 amdgpu_ring_write(ring, VCE_CMD_IB);
1116 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
1117 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
1118 amdgpu_ring_write(ring, ib->length_dw);
1119}
1120
1121/**
1122 * amdgpu_vce_ring_emit_fence - add a fence command to the ring
1123 *
1124 * @ring: engine to use
1125 * @addr: address
1126 * @seq: sequence number
1127 * @flags: fence related flags
1128 *
1129 */
1130void amdgpu_vce_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
1131 unsigned int flags)
1132{
1133 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
1134
1135 amdgpu_ring_write(ring, VCE_CMD_FENCE);
1136 amdgpu_ring_write(ring, addr);
1137 amdgpu_ring_write(ring, upper_32_bits(addr));
1138 amdgpu_ring_write(ring, seq);
1139 amdgpu_ring_write(ring, VCE_CMD_TRAP);
1140 amdgpu_ring_write(ring, VCE_CMD_END);
1141}
1142
1143/**
1144 * amdgpu_vce_ring_test_ring - test if VCE ring is working
1145 *
1146 * @ring: the engine to test on
1147 *
1148 */
1149int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
1150{
1151 struct amdgpu_device *adev = ring->adev;
1152 uint32_t rptr;
1153 unsigned int i;
1154 int r, timeout = adev->usec_timeout;
1155
1156 /* skip ring test for sriov*/
1157 if (amdgpu_sriov_vf(adev))
1158 return 0;
1159
1160 r = amdgpu_ring_alloc(ring, 16);
1161 if (r)
1162 return r;
1163
1164 rptr = amdgpu_ring_get_rptr(ring);
1165
1166 amdgpu_ring_write(ring, VCE_CMD_END);
1167 amdgpu_ring_commit(ring);
1168
1169 for (i = 0; i < timeout; i++) {
1170 if (amdgpu_ring_get_rptr(ring) != rptr)
1171 break;
1172 udelay(1);
1173 }
1174
1175 if (i >= timeout)
1176 r = -ETIMEDOUT;
1177
1178 return r;
1179}
1180
1181/**
1182 * amdgpu_vce_ring_test_ib - test if VCE IBs are working
1183 *
1184 * @ring: the engine to test on
1185 * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
1186 *
1187 */
1188int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
1189{
1190 struct dma_fence *fence = NULL;
1191 long r;
1192
1193 /* skip vce ring1/2 ib test for now, since it's not reliable */
1194 if (ring != &ring->adev->vce.ring[0])
1195 return 0;
1196
1197 r = amdgpu_vce_get_create_msg(ring, 1, NULL);
1198 if (r)
1199 goto error;
1200
1201 r = amdgpu_vce_get_destroy_msg(ring, 1, true, &fence);
1202 if (r)
1203 goto error;
1204
1205 r = dma_fence_wait_timeout(fence, false, timeout);
1206 if (r == 0)
1207 r = -ETIMEDOUT;
1208 else if (r > 0)
1209 r = 0;
1210
1211error:
1212 dma_fence_put(fence);
1213 return r;
1214}
1215
1216enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring)
1217{
1218 switch (ring) {
1219 case 0:
1220 return AMDGPU_RING_PRIO_0;
1221 case 1:
1222 return AMDGPU_RING_PRIO_1;
1223 case 2:
1224 return AMDGPU_RING_PRIO_2;
1225 default:
1226 return AMDGPU_RING_PRIO_0;
1227 }
1228}