Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Christian König <christian.koenig@amd.com>
23 */
24
25#include <linux/firmware.h>
26#include <drm/drmP.h>
27#include "amdgpu.h"
28#include "amdgpu_uvd.h"
29#include "vid.h"
30#include "uvd/uvd_5_0_d.h"
31#include "uvd/uvd_5_0_sh_mask.h"
32#include "oss/oss_2_0_d.h"
33#include "oss/oss_2_0_sh_mask.h"
34#include "bif/bif_5_0_d.h"
35#include "vi.h"
36#include "smu/smu_7_1_2_d.h"
37#include "smu/smu_7_1_2_sh_mask.h"
38
39static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev);
40static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev);
41static int uvd_v5_0_start(struct amdgpu_device *adev);
42static void uvd_v5_0_stop(struct amdgpu_device *adev);
43static int uvd_v5_0_set_clockgating_state(void *handle,
44 enum amd_clockgating_state state);
45static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
46 bool enable);
47/**
48 * uvd_v5_0_ring_get_rptr - get read pointer
49 *
50 * @ring: amdgpu_ring pointer
51 *
52 * Returns the current hardware read pointer
53 */
54static uint32_t uvd_v5_0_ring_get_rptr(struct amdgpu_ring *ring)
55{
56 struct amdgpu_device *adev = ring->adev;
57
58 return RREG32(mmUVD_RBC_RB_RPTR);
59}
60
61/**
62 * uvd_v5_0_ring_get_wptr - get write pointer
63 *
64 * @ring: amdgpu_ring pointer
65 *
66 * Returns the current hardware write pointer
67 */
68static uint32_t uvd_v5_0_ring_get_wptr(struct amdgpu_ring *ring)
69{
70 struct amdgpu_device *adev = ring->adev;
71
72 return RREG32(mmUVD_RBC_RB_WPTR);
73}
74
75/**
76 * uvd_v5_0_ring_set_wptr - set write pointer
77 *
78 * @ring: amdgpu_ring pointer
79 *
80 * Commits the write pointer to the hardware
81 */
82static void uvd_v5_0_ring_set_wptr(struct amdgpu_ring *ring)
83{
84 struct amdgpu_device *adev = ring->adev;
85
86 WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
87}
88
89static int uvd_v5_0_early_init(void *handle)
90{
91 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
92
93 uvd_v5_0_set_ring_funcs(adev);
94 uvd_v5_0_set_irq_funcs(adev);
95
96 return 0;
97}
98
99static int uvd_v5_0_sw_init(void *handle)
100{
101 struct amdgpu_ring *ring;
102 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
103 int r;
104
105 /* UVD TRAP */
106 r = amdgpu_irq_add_id(adev, 124, &adev->uvd.irq);
107 if (r)
108 return r;
109
110 r = amdgpu_uvd_sw_init(adev);
111 if (r)
112 return r;
113
114 r = amdgpu_uvd_resume(adev);
115 if (r)
116 return r;
117
118 ring = &adev->uvd.ring;
119 sprintf(ring->name, "uvd");
120 r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.irq, 0);
121
122 return r;
123}
124
125static int uvd_v5_0_sw_fini(void *handle)
126{
127 int r;
128 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
129
130 r = amdgpu_uvd_suspend(adev);
131 if (r)
132 return r;
133
134 r = amdgpu_uvd_sw_fini(adev);
135 if (r)
136 return r;
137
138 return r;
139}
140
141/**
142 * uvd_v5_0_hw_init - start and test UVD block
143 *
144 * @adev: amdgpu_device pointer
145 *
146 * Initialize the hardware, boot up the VCPU and do some testing
147 */
148static int uvd_v5_0_hw_init(void *handle)
149{
150 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
151 struct amdgpu_ring *ring = &adev->uvd.ring;
152 uint32_t tmp;
153 int r;
154
155 amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
156 uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_UNGATE);
157 uvd_v5_0_enable_mgcg(adev, true);
158
159 ring->ready = true;
160 r = amdgpu_ring_test_ring(ring);
161 if (r) {
162 ring->ready = false;
163 goto done;
164 }
165
166 r = amdgpu_ring_alloc(ring, 10);
167 if (r) {
168 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
169 goto done;
170 }
171
172 tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
173 amdgpu_ring_write(ring, tmp);
174 amdgpu_ring_write(ring, 0xFFFFF);
175
176 tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
177 amdgpu_ring_write(ring, tmp);
178 amdgpu_ring_write(ring, 0xFFFFF);
179
180 tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
181 amdgpu_ring_write(ring, tmp);
182 amdgpu_ring_write(ring, 0xFFFFF);
183
184 /* Clear timeout status bits */
185 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
186 amdgpu_ring_write(ring, 0x8);
187
188 amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
189 amdgpu_ring_write(ring, 3);
190
191 amdgpu_ring_commit(ring);
192
193done:
194 if (!r)
195 DRM_INFO("UVD initialized successfully.\n");
196
197 return r;
198
199}
200
201/**
202 * uvd_v5_0_hw_fini - stop the hardware block
203 *
204 * @adev: amdgpu_device pointer
205 *
206 * Stop the UVD block, mark ring as not ready any more
207 */
208static int uvd_v5_0_hw_fini(void *handle)
209{
210 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
211 struct amdgpu_ring *ring = &adev->uvd.ring;
212
213 if (RREG32(mmUVD_STATUS) != 0)
214 uvd_v5_0_stop(adev);
215
216 ring->ready = false;
217
218 return 0;
219}
220
221static int uvd_v5_0_suspend(void *handle)
222{
223 int r;
224 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
225
226 r = uvd_v5_0_hw_fini(adev);
227 if (r)
228 return r;
229 uvd_v5_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
230
231 r = amdgpu_uvd_suspend(adev);
232 if (r)
233 return r;
234
235 return r;
236}
237
238static int uvd_v5_0_resume(void *handle)
239{
240 int r;
241 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
242
243 r = amdgpu_uvd_resume(adev);
244 if (r)
245 return r;
246
247 r = uvd_v5_0_hw_init(adev);
248 if (r)
249 return r;
250
251 return r;
252}
253
254/**
255 * uvd_v5_0_mc_resume - memory controller programming
256 *
257 * @adev: amdgpu_device pointer
258 *
259 * Let the UVD memory controller know it's offsets
260 */
261static void uvd_v5_0_mc_resume(struct amdgpu_device *adev)
262{
263 uint64_t offset;
264 uint32_t size;
265
266 /* programm memory controller bits 0-27 */
267 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
268 lower_32_bits(adev->uvd.gpu_addr));
269 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
270 upper_32_bits(adev->uvd.gpu_addr));
271
272 offset = AMDGPU_UVD_FIRMWARE_OFFSET;
273 size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
274 WREG32(mmUVD_VCPU_CACHE_OFFSET0, offset >> 3);
275 WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
276
277 offset += size;
278 size = AMDGPU_UVD_HEAP_SIZE;
279 WREG32(mmUVD_VCPU_CACHE_OFFSET1, offset >> 3);
280 WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
281
282 offset += size;
283 size = AMDGPU_UVD_STACK_SIZE +
284 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles);
285 WREG32(mmUVD_VCPU_CACHE_OFFSET2, offset >> 3);
286 WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
287
288 WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
289 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
290 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
291}
292
293/**
294 * uvd_v5_0_start - start UVD block
295 *
296 * @adev: amdgpu_device pointer
297 *
298 * Setup and start the UVD block
299 */
300static int uvd_v5_0_start(struct amdgpu_device *adev)
301{
302 struct amdgpu_ring *ring = &adev->uvd.ring;
303 uint32_t rb_bufsz, tmp;
304 uint32_t lmi_swap_cntl;
305 uint32_t mp_swap_cntl;
306 int i, j, r;
307
308 /*disable DPG */
309 WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2));
310
311 /* disable byte swapping */
312 lmi_swap_cntl = 0;
313 mp_swap_cntl = 0;
314
315 uvd_v5_0_mc_resume(adev);
316
317 /* disable interupt */
318 WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
319
320 /* stall UMC and register bus before resetting VCPU */
321 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
322 mdelay(1);
323
324 /* put LMI, VCPU, RBC etc... into reset */
325 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
326 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK |
327 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK |
328 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK |
329 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
330 mdelay(5);
331
332 /* take UVD block out of reset */
333 WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
334 mdelay(5);
335
336 /* initialize UVD memory controller */
337 WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
338 (1 << 21) | (1 << 9) | (1 << 20));
339
340#ifdef __BIG_ENDIAN
341 /* swap (8 in 32) RB and IB */
342 lmi_swap_cntl = 0xa;
343 mp_swap_cntl = 0;
344#endif
345 WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
346 WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
347
348 WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
349 WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
350 WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
351 WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
352 WREG32(mmUVD_MPC_SET_ALU, 0);
353 WREG32(mmUVD_MPC_SET_MUX, 0x88);
354
355 /* take all subblocks out of reset, except VCPU */
356 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
357 mdelay(5);
358
359 /* enable VCPU clock */
360 WREG32(mmUVD_VCPU_CNTL, 1 << 9);
361
362 /* enable UMC */
363 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
364
365 /* boot up the VCPU */
366 WREG32(mmUVD_SOFT_RESET, 0);
367 mdelay(10);
368
369 for (i = 0; i < 10; ++i) {
370 uint32_t status;
371 for (j = 0; j < 100; ++j) {
372 status = RREG32(mmUVD_STATUS);
373 if (status & 2)
374 break;
375 mdelay(10);
376 }
377 r = 0;
378 if (status & 2)
379 break;
380
381 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
382 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
383 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
384 mdelay(10);
385 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
386 mdelay(10);
387 r = -1;
388 }
389
390 if (r) {
391 DRM_ERROR("UVD not responding, giving up!!!\n");
392 return r;
393 }
394 /* enable master interrupt */
395 WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1));
396
397 /* clear the bit 4 of UVD_STATUS */
398 WREG32_P(mmUVD_STATUS, 0, ~(2 << 1));
399
400 rb_bufsz = order_base_2(ring->ring_size);
401 tmp = 0;
402 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz);
403 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
404 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
405 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_WPTR_POLL_EN, 0);
406 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
407 tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
408 /* force RBC into idle state */
409 WREG32(mmUVD_RBC_RB_CNTL, tmp);
410
411 /* set the write pointer delay */
412 WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
413
414 /* set the wb address */
415 WREG32(mmUVD_RBC_RB_RPTR_ADDR, (upper_32_bits(ring->gpu_addr) >> 2));
416
417 /* programm the RB_BASE for ring buffer */
418 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW,
419 lower_32_bits(ring->gpu_addr));
420 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH,
421 upper_32_bits(ring->gpu_addr));
422
423 /* Initialize the ring buffer's read and write pointers */
424 WREG32(mmUVD_RBC_RB_RPTR, 0);
425
426 ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
427 WREG32(mmUVD_RBC_RB_WPTR, ring->wptr);
428
429 WREG32_P(mmUVD_RBC_RB_CNTL, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK);
430
431 return 0;
432}
433
434/**
435 * uvd_v5_0_stop - stop UVD block
436 *
437 * @adev: amdgpu_device pointer
438 *
439 * stop the UVD block
440 */
441static void uvd_v5_0_stop(struct amdgpu_device *adev)
442{
443 /* force RBC into idle state */
444 WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
445
446 /* Stall UMC and register bus before resetting VCPU */
447 WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
448 mdelay(1);
449
450 /* put VCPU into reset */
451 WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
452 mdelay(5);
453
454 /* disable VCPU clock */
455 WREG32(mmUVD_VCPU_CNTL, 0x0);
456
457 /* Unstall UMC and register bus */
458 WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
459
460 WREG32(mmUVD_STATUS, 0);
461}
462
463/**
464 * uvd_v5_0_ring_emit_fence - emit an fence & trap command
465 *
466 * @ring: amdgpu_ring pointer
467 * @fence: fence to emit
468 *
469 * Write a fence and a trap command to the ring.
470 */
471static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
472 unsigned flags)
473{
474 WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
475
476 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
477 amdgpu_ring_write(ring, seq);
478 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
479 amdgpu_ring_write(ring, addr & 0xffffffff);
480 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
481 amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
482 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
483 amdgpu_ring_write(ring, 0);
484
485 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
486 amdgpu_ring_write(ring, 0);
487 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
488 amdgpu_ring_write(ring, 0);
489 amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
490 amdgpu_ring_write(ring, 2);
491}
492
493/**
494 * uvd_v5_0_ring_emit_hdp_flush - emit an hdp flush
495 *
496 * @ring: amdgpu_ring pointer
497 *
498 * Emits an hdp flush.
499 */
500static void uvd_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
501{
502 amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0));
503 amdgpu_ring_write(ring, 0);
504}
505
506/**
507 * uvd_v5_0_ring_hdp_invalidate - emit an hdp invalidate
508 *
509 * @ring: amdgpu_ring pointer
510 *
511 * Emits an hdp invalidate.
512 */
513static void uvd_v5_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
514{
515 amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0));
516 amdgpu_ring_write(ring, 1);
517}
518
519/**
520 * uvd_v5_0_ring_test_ring - register write test
521 *
522 * @ring: amdgpu_ring pointer
523 *
524 * Test if we can successfully write to the context register
525 */
526static int uvd_v5_0_ring_test_ring(struct amdgpu_ring *ring)
527{
528 struct amdgpu_device *adev = ring->adev;
529 uint32_t tmp = 0;
530 unsigned i;
531 int r;
532
533 WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
534 r = amdgpu_ring_alloc(ring, 3);
535 if (r) {
536 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
537 ring->idx, r);
538 return r;
539 }
540 amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
541 amdgpu_ring_write(ring, 0xDEADBEEF);
542 amdgpu_ring_commit(ring);
543 for (i = 0; i < adev->usec_timeout; i++) {
544 tmp = RREG32(mmUVD_CONTEXT_ID);
545 if (tmp == 0xDEADBEEF)
546 break;
547 DRM_UDELAY(1);
548 }
549
550 if (i < adev->usec_timeout) {
551 DRM_INFO("ring test on %d succeeded in %d usecs\n",
552 ring->idx, i);
553 } else {
554 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
555 ring->idx, tmp);
556 r = -EINVAL;
557 }
558 return r;
559}
560
561/**
562 * uvd_v5_0_ring_emit_ib - execute indirect buffer
563 *
564 * @ring: amdgpu_ring pointer
565 * @ib: indirect buffer to execute
566 *
567 * Write ring commands to execute the indirect buffer
568 */
569static void uvd_v5_0_ring_emit_ib(struct amdgpu_ring *ring,
570 struct amdgpu_ib *ib,
571 unsigned vm_id, bool ctx_switch)
572{
573 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW, 0));
574 amdgpu_ring_write(ring, lower_32_bits(ib->gpu_addr));
575 amdgpu_ring_write(ring, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH, 0));
576 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr));
577 amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
578 amdgpu_ring_write(ring, ib->length_dw);
579}
580
581static bool uvd_v5_0_is_idle(void *handle)
582{
583 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
584
585 return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
586}
587
588static int uvd_v5_0_wait_for_idle(void *handle)
589{
590 unsigned i;
591 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
592
593 for (i = 0; i < adev->usec_timeout; i++) {
594 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
595 return 0;
596 }
597 return -ETIMEDOUT;
598}
599
600static int uvd_v5_0_soft_reset(void *handle)
601{
602 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
603
604 uvd_v5_0_stop(adev);
605
606 WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
607 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
608 mdelay(5);
609
610 return uvd_v5_0_start(adev);
611}
612
613static int uvd_v5_0_set_interrupt_state(struct amdgpu_device *adev,
614 struct amdgpu_irq_src *source,
615 unsigned type,
616 enum amdgpu_interrupt_state state)
617{
618 // TODO
619 return 0;
620}
621
622static int uvd_v5_0_process_interrupt(struct amdgpu_device *adev,
623 struct amdgpu_irq_src *source,
624 struct amdgpu_iv_entry *entry)
625{
626 DRM_DEBUG("IH: UVD TRAP\n");
627 amdgpu_fence_process(&adev->uvd.ring);
628 return 0;
629}
630
631static void uvd_v5_0_enable_clock_gating(struct amdgpu_device *adev, bool enable)
632{
633 uint32_t data1, data3, suvd_flags;
634
635 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
636 data3 = RREG32(mmUVD_CGC_GATE);
637
638 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
639 UVD_SUVD_CGC_GATE__SIT_MASK |
640 UVD_SUVD_CGC_GATE__SMP_MASK |
641 UVD_SUVD_CGC_GATE__SCM_MASK |
642 UVD_SUVD_CGC_GATE__SDB_MASK;
643
644 if (enable) {
645 data3 |= (UVD_CGC_GATE__SYS_MASK |
646 UVD_CGC_GATE__UDEC_MASK |
647 UVD_CGC_GATE__MPEG2_MASK |
648 UVD_CGC_GATE__RBC_MASK |
649 UVD_CGC_GATE__LMI_MC_MASK |
650 UVD_CGC_GATE__IDCT_MASK |
651 UVD_CGC_GATE__MPRD_MASK |
652 UVD_CGC_GATE__MPC_MASK |
653 UVD_CGC_GATE__LBSI_MASK |
654 UVD_CGC_GATE__LRBBM_MASK |
655 UVD_CGC_GATE__UDEC_RE_MASK |
656 UVD_CGC_GATE__UDEC_CM_MASK |
657 UVD_CGC_GATE__UDEC_IT_MASK |
658 UVD_CGC_GATE__UDEC_DB_MASK |
659 UVD_CGC_GATE__UDEC_MP_MASK |
660 UVD_CGC_GATE__WCB_MASK |
661 UVD_CGC_GATE__JPEG_MASK |
662 UVD_CGC_GATE__SCPU_MASK);
663 /* only in pg enabled, we can gate clock to vcpu*/
664 if (adev->pg_flags & AMD_PG_SUPPORT_UVD)
665 data3 |= UVD_CGC_GATE__VCPU_MASK;
666 data3 &= ~UVD_CGC_GATE__REGS_MASK;
667 data1 |= suvd_flags;
668 } else {
669 data3 = 0;
670 data1 = 0;
671 }
672
673 WREG32(mmUVD_SUVD_CGC_GATE, data1);
674 WREG32(mmUVD_CGC_GATE, data3);
675}
676
677static void uvd_v5_0_set_sw_clock_gating(struct amdgpu_device *adev)
678{
679 uint32_t data, data2;
680
681 data = RREG32(mmUVD_CGC_CTRL);
682 data2 = RREG32(mmUVD_SUVD_CGC_CTRL);
683
684
685 data &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK |
686 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
687
688
689 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
690 (1 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_GATE_DLY_TIMER)) |
691 (4 << REG_FIELD_SHIFT(UVD_CGC_CTRL, CLK_OFF_DELAY));
692
693 data &= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK |
694 UVD_CGC_CTRL__UDEC_CM_MODE_MASK |
695 UVD_CGC_CTRL__UDEC_IT_MODE_MASK |
696 UVD_CGC_CTRL__UDEC_DB_MODE_MASK |
697 UVD_CGC_CTRL__UDEC_MP_MODE_MASK |
698 UVD_CGC_CTRL__SYS_MODE_MASK |
699 UVD_CGC_CTRL__UDEC_MODE_MASK |
700 UVD_CGC_CTRL__MPEG2_MODE_MASK |
701 UVD_CGC_CTRL__REGS_MODE_MASK |
702 UVD_CGC_CTRL__RBC_MODE_MASK |
703 UVD_CGC_CTRL__LMI_MC_MODE_MASK |
704 UVD_CGC_CTRL__LMI_UMC_MODE_MASK |
705 UVD_CGC_CTRL__IDCT_MODE_MASK |
706 UVD_CGC_CTRL__MPRD_MODE_MASK |
707 UVD_CGC_CTRL__MPC_MODE_MASK |
708 UVD_CGC_CTRL__LBSI_MODE_MASK |
709 UVD_CGC_CTRL__LRBBM_MODE_MASK |
710 UVD_CGC_CTRL__WCB_MODE_MASK |
711 UVD_CGC_CTRL__VCPU_MODE_MASK |
712 UVD_CGC_CTRL__JPEG_MODE_MASK |
713 UVD_CGC_CTRL__SCPU_MODE_MASK);
714 data2 &= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK |
715 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK |
716 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK |
717 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK |
718 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK);
719
720 WREG32(mmUVD_CGC_CTRL, data);
721 WREG32(mmUVD_SUVD_CGC_CTRL, data2);
722}
723
724#if 0
725static void uvd_v5_0_set_hw_clock_gating(struct amdgpu_device *adev)
726{
727 uint32_t data, data1, cgc_flags, suvd_flags;
728
729 data = RREG32(mmUVD_CGC_GATE);
730 data1 = RREG32(mmUVD_SUVD_CGC_GATE);
731
732 cgc_flags = UVD_CGC_GATE__SYS_MASK |
733 UVD_CGC_GATE__UDEC_MASK |
734 UVD_CGC_GATE__MPEG2_MASK |
735 UVD_CGC_GATE__RBC_MASK |
736 UVD_CGC_GATE__LMI_MC_MASK |
737 UVD_CGC_GATE__IDCT_MASK |
738 UVD_CGC_GATE__MPRD_MASK |
739 UVD_CGC_GATE__MPC_MASK |
740 UVD_CGC_GATE__LBSI_MASK |
741 UVD_CGC_GATE__LRBBM_MASK |
742 UVD_CGC_GATE__UDEC_RE_MASK |
743 UVD_CGC_GATE__UDEC_CM_MASK |
744 UVD_CGC_GATE__UDEC_IT_MASK |
745 UVD_CGC_GATE__UDEC_DB_MASK |
746 UVD_CGC_GATE__UDEC_MP_MASK |
747 UVD_CGC_GATE__WCB_MASK |
748 UVD_CGC_GATE__VCPU_MASK |
749 UVD_CGC_GATE__SCPU_MASK;
750
751 suvd_flags = UVD_SUVD_CGC_GATE__SRE_MASK |
752 UVD_SUVD_CGC_GATE__SIT_MASK |
753 UVD_SUVD_CGC_GATE__SMP_MASK |
754 UVD_SUVD_CGC_GATE__SCM_MASK |
755 UVD_SUVD_CGC_GATE__SDB_MASK;
756
757 data |= cgc_flags;
758 data1 |= suvd_flags;
759
760 WREG32(mmUVD_CGC_GATE, data);
761 WREG32(mmUVD_SUVD_CGC_GATE, data1);
762}
763#endif
764
765static void uvd_v5_0_enable_mgcg(struct amdgpu_device *adev,
766 bool enable)
767{
768 u32 orig, data;
769
770 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
771 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
772 data |= 0xfff;
773 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
774
775 orig = data = RREG32(mmUVD_CGC_CTRL);
776 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
777 if (orig != data)
778 WREG32(mmUVD_CGC_CTRL, data);
779 } else {
780 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
781 data &= ~0xfff;
782 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
783
784 orig = data = RREG32(mmUVD_CGC_CTRL);
785 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
786 if (orig != data)
787 WREG32(mmUVD_CGC_CTRL, data);
788 }
789}
790
791static int uvd_v5_0_set_clockgating_state(void *handle,
792 enum amd_clockgating_state state)
793{
794 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
795 bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
796
797 if (enable) {
798 /* wait for STATUS to clear */
799 if (uvd_v5_0_wait_for_idle(handle))
800 return -EBUSY;
801 uvd_v5_0_enable_clock_gating(adev, true);
802
803 /* enable HW gates because UVD is idle */
804/* uvd_v5_0_set_hw_clock_gating(adev); */
805 } else {
806 uvd_v5_0_enable_clock_gating(adev, false);
807 }
808
809 uvd_v5_0_set_sw_clock_gating(adev);
810 return 0;
811}
812
813static int uvd_v5_0_set_powergating_state(void *handle,
814 enum amd_powergating_state state)
815{
816 /* This doesn't actually powergate the UVD block.
817 * That's done in the dpm code via the SMC. This
818 * just re-inits the block as necessary. The actual
819 * gating still happens in the dpm code. We should
820 * revisit this when there is a cleaner line between
821 * the smc and the hw blocks
822 */
823 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
824 int ret = 0;
825
826 if (state == AMD_PG_STATE_GATE) {
827 uvd_v5_0_stop(adev);
828 } else {
829 ret = uvd_v5_0_start(adev);
830 if (ret)
831 goto out;
832 }
833
834out:
835 return ret;
836}
837
838static void uvd_v5_0_get_clockgating_state(void *handle, u32 *flags)
839{
840 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
841 int data;
842
843 mutex_lock(&adev->pm.mutex);
844
845 if (RREG32_SMC(ixCURRENT_PG_STATUS) &
846 CURRENT_PG_STATUS__UVD_PG_STATUS_MASK) {
847 DRM_INFO("Cannot get clockgating state when UVD is powergated.\n");
848 goto out;
849 }
850
851 /* AMD_CG_SUPPORT_UVD_MGCG */
852 data = RREG32(mmUVD_CGC_CTRL);
853 if (data & UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK)
854 *flags |= AMD_CG_SUPPORT_UVD_MGCG;
855
856out:
857 mutex_unlock(&adev->pm.mutex);
858}
859
860static const struct amd_ip_funcs uvd_v5_0_ip_funcs = {
861 .name = "uvd_v5_0",
862 .early_init = uvd_v5_0_early_init,
863 .late_init = NULL,
864 .sw_init = uvd_v5_0_sw_init,
865 .sw_fini = uvd_v5_0_sw_fini,
866 .hw_init = uvd_v5_0_hw_init,
867 .hw_fini = uvd_v5_0_hw_fini,
868 .suspend = uvd_v5_0_suspend,
869 .resume = uvd_v5_0_resume,
870 .is_idle = uvd_v5_0_is_idle,
871 .wait_for_idle = uvd_v5_0_wait_for_idle,
872 .soft_reset = uvd_v5_0_soft_reset,
873 .set_clockgating_state = uvd_v5_0_set_clockgating_state,
874 .set_powergating_state = uvd_v5_0_set_powergating_state,
875 .get_clockgating_state = uvd_v5_0_get_clockgating_state,
876};
877
878static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
879 .type = AMDGPU_RING_TYPE_UVD,
880 .align_mask = 0xf,
881 .nop = PACKET0(mmUVD_NO_OP, 0),
882 .get_rptr = uvd_v5_0_ring_get_rptr,
883 .get_wptr = uvd_v5_0_ring_get_wptr,
884 .set_wptr = uvd_v5_0_ring_set_wptr,
885 .parse_cs = amdgpu_uvd_ring_parse_cs,
886 .emit_frame_size =
887 2 + /* uvd_v5_0_ring_emit_hdp_flush */
888 2 + /* uvd_v5_0_ring_emit_hdp_invalidate */
889 14, /* uvd_v5_0_ring_emit_fence x1 no user fence */
890 .emit_ib_size = 6, /* uvd_v5_0_ring_emit_ib */
891 .emit_ib = uvd_v5_0_ring_emit_ib,
892 .emit_fence = uvd_v5_0_ring_emit_fence,
893 .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush,
894 .emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate,
895 .test_ring = uvd_v5_0_ring_test_ring,
896 .test_ib = amdgpu_uvd_ring_test_ib,
897 .insert_nop = amdgpu_ring_insert_nop,
898 .pad_ib = amdgpu_ring_generic_pad_ib,
899 .begin_use = amdgpu_uvd_ring_begin_use,
900 .end_use = amdgpu_uvd_ring_end_use,
901};
902
903static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev)
904{
905 adev->uvd.ring.funcs = &uvd_v5_0_ring_funcs;
906}
907
908static const struct amdgpu_irq_src_funcs uvd_v5_0_irq_funcs = {
909 .set = uvd_v5_0_set_interrupt_state,
910 .process = uvd_v5_0_process_interrupt,
911};
912
913static void uvd_v5_0_set_irq_funcs(struct amdgpu_device *adev)
914{
915 adev->uvd.irq.num_types = 1;
916 adev->uvd.irq.funcs = &uvd_v5_0_irq_funcs;
917}
918
919const struct amdgpu_ip_block_version uvd_v5_0_ip_block =
920{
921 .type = AMD_IP_BLOCK_TYPE_UVD,
922 .major = 5,
923 .minor = 0,
924 .rev = 0,
925 .funcs = &uvd_v5_0_ip_funcs,
926};