Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include <linux/mmu_context.h>
24
25#include "amdgpu.h"
26#include "amdgpu_amdkfd.h"
27#include "cikd.h"
28#include "cik_sdma.h"
29#include "gfx_v7_0.h"
30#include "gca/gfx_7_2_d.h"
31#include "gca/gfx_7_2_enum.h"
32#include "gca/gfx_7_2_sh_mask.h"
33#include "oss/oss_2_0_d.h"
34#include "oss/oss_2_0_sh_mask.h"
35#include "gmc/gmc_7_1_d.h"
36#include "gmc/gmc_7_1_sh_mask.h"
37#include "cik_structs.h"
38
39enum hqd_dequeue_request_type {
40 NO_ACTION = 0,
41 DRAIN_PIPE,
42 RESET_WAVES
43};
44
45enum {
46 MAX_TRAPID = 8, /* 3 bits in the bitfield. */
47 MAX_WATCH_ADDRESSES = 4
48};
49
50enum {
51 ADDRESS_WATCH_REG_ADDR_HI = 0,
52 ADDRESS_WATCH_REG_ADDR_LO,
53 ADDRESS_WATCH_REG_CNTL,
54 ADDRESS_WATCH_REG_MAX
55};
56
57/* not defined in the CI/KV reg file */
58enum {
59 ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL,
60 ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF,
61 ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000,
62 /* extend the mask to 26 bits to match the low address field */
63 ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6,
64 ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF
65};
66
67static const uint32_t watchRegs[MAX_WATCH_ADDRESSES * ADDRESS_WATCH_REG_MAX] = {
68 mmTCP_WATCH0_ADDR_H, mmTCP_WATCH0_ADDR_L, mmTCP_WATCH0_CNTL,
69 mmTCP_WATCH1_ADDR_H, mmTCP_WATCH1_ADDR_L, mmTCP_WATCH1_CNTL,
70 mmTCP_WATCH2_ADDR_H, mmTCP_WATCH2_ADDR_L, mmTCP_WATCH2_CNTL,
71 mmTCP_WATCH3_ADDR_H, mmTCP_WATCH3_ADDR_L, mmTCP_WATCH3_CNTL
72};
73
74union TCP_WATCH_CNTL_BITS {
75 struct {
76 uint32_t mask:24;
77 uint32_t vmid:4;
78 uint32_t atc:1;
79 uint32_t mode:2;
80 uint32_t valid:1;
81 } bitfields, bits;
82 uint32_t u32All;
83 signed int i32All;
84 float f32All;
85};
86
87/* Because of REG_GET_FIELD() being used, we put this function in the
88 * asic specific file.
89 */
90static int get_tile_config(struct kgd_dev *kgd,
91 struct tile_config *config)
92{
93 struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
94
95 config->gb_addr_config = adev->gfx.config.gb_addr_config;
96 config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
97 MC_ARB_RAMCFG, NOOFBANK);
98 config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
99 MC_ARB_RAMCFG, NOOFRANKS);
100
101 config->tile_config_ptr = adev->gfx.config.tile_mode_array;
102 config->num_tile_configs =
103 ARRAY_SIZE(adev->gfx.config.tile_mode_array);
104 config->macro_tile_config_ptr =
105 adev->gfx.config.macrotile_mode_array;
106 config->num_macro_tile_configs =
107 ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
108
109 return 0;
110}
111
112static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
113{
114 return (struct amdgpu_device *)kgd;
115}
116
117static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
118 uint32_t queue, uint32_t vmid)
119{
120 struct amdgpu_device *adev = get_amdgpu_device(kgd);
121 uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
122
123 mutex_lock(&adev->srbm_mutex);
124 WREG32(mmSRBM_GFX_CNTL, value);
125}
126
127static void unlock_srbm(struct kgd_dev *kgd)
128{
129 struct amdgpu_device *adev = get_amdgpu_device(kgd);
130
131 WREG32(mmSRBM_GFX_CNTL, 0);
132 mutex_unlock(&adev->srbm_mutex);
133}
134
135static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
136 uint32_t queue_id)
137{
138 struct amdgpu_device *adev = get_amdgpu_device(kgd);
139
140 uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
141 uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
142
143 lock_srbm(kgd, mec, pipe, queue_id, 0);
144}
145
146static void release_queue(struct kgd_dev *kgd)
147{
148 unlock_srbm(kgd);
149}
150
151static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
152 uint32_t sh_mem_config,
153 uint32_t sh_mem_ape1_base,
154 uint32_t sh_mem_ape1_limit,
155 uint32_t sh_mem_bases)
156{
157 struct amdgpu_device *adev = get_amdgpu_device(kgd);
158
159 lock_srbm(kgd, 0, 0, 0, vmid);
160
161 WREG32(mmSH_MEM_CONFIG, sh_mem_config);
162 WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
163 WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
164 WREG32(mmSH_MEM_BASES, sh_mem_bases);
165
166 unlock_srbm(kgd);
167}
168
169static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
170 unsigned int vmid)
171{
172 struct amdgpu_device *adev = get_amdgpu_device(kgd);
173
174 /*
175 * We have to assume that there is no outstanding mapping.
176 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
177 * a mapping is in progress or because a mapping finished and the
178 * SW cleared it. So the protocol is to always wait & clear.
179 */
180 uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
181 ATC_VMID0_PASID_MAPPING__VALID_MASK;
182
183 WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
184
185 while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
186 cpu_relax();
187 WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
188
189 /* Mapping vmid to pasid also for IH block */
190 WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
191
192 return 0;
193}
194
195static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
196{
197 struct amdgpu_device *adev = get_amdgpu_device(kgd);
198 uint32_t mec;
199 uint32_t pipe;
200
201 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
202 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
203
204 lock_srbm(kgd, mec, pipe, 0, 0);
205
206 WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
207 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
208
209 unlock_srbm(kgd);
210
211 return 0;
212}
213
214static inline uint32_t get_sdma_rlc_reg_offset(struct cik_sdma_rlc_registers *m)
215{
216 uint32_t retval;
217
218 retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
219 m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
220
221 pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n",
222 m->sdma_engine_id, m->sdma_queue_id, retval);
223
224 return retval;
225}
226
227static inline struct cik_mqd *get_mqd(void *mqd)
228{
229 return (struct cik_mqd *)mqd;
230}
231
232static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
233{
234 return (struct cik_sdma_rlc_registers *)mqd;
235}
236
237static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
238 uint32_t queue_id, uint32_t __user *wptr,
239 uint32_t wptr_shift, uint32_t wptr_mask,
240 struct mm_struct *mm)
241{
242 struct amdgpu_device *adev = get_amdgpu_device(kgd);
243 struct cik_mqd *m;
244 uint32_t *mqd_hqd;
245 uint32_t reg, wptr_val, data;
246 bool valid_wptr = false;
247
248 m = get_mqd(mqd);
249
250 acquire_queue(kgd, pipe_id, queue_id);
251
252 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */
253 mqd_hqd = &m->cp_mqd_base_addr_lo;
254
255 for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
256 WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
257
258 /* Copy userspace write pointer value to register.
259 * Activate doorbell logic to monitor subsequent changes.
260 */
261 data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
262 CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
263 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
264
265 /* read_user_ptr may take the mm->mmap_sem.
266 * release srbm_mutex to avoid circular dependency between
267 * srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
268 */
269 release_queue(kgd);
270 valid_wptr = read_user_wptr(mm, wptr, wptr_val);
271 acquire_queue(kgd, pipe_id, queue_id);
272 if (valid_wptr)
273 WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
274
275 data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
276 WREG32(mmCP_HQD_ACTIVE, data);
277
278 release_queue(kgd);
279
280 return 0;
281}
282
283static int kgd_hqd_dump(struct kgd_dev *kgd,
284 uint32_t pipe_id, uint32_t queue_id,
285 uint32_t (**dump)[2], uint32_t *n_regs)
286{
287 struct amdgpu_device *adev = get_amdgpu_device(kgd);
288 uint32_t i = 0, reg;
289#define HQD_N_REGS (35+4)
290#define DUMP_REG(addr) do { \
291 if (WARN_ON_ONCE(i >= HQD_N_REGS)) \
292 break; \
293 (*dump)[i][0] = (addr) << 2; \
294 (*dump)[i++][1] = RREG32(addr); \
295 } while (0)
296
297 *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
298 if (*dump == NULL)
299 return -ENOMEM;
300
301 acquire_queue(kgd, pipe_id, queue_id);
302
303 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
304 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
305 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2);
306 DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3);
307
308 for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
309 DUMP_REG(reg);
310
311 release_queue(kgd);
312
313 WARN_ON_ONCE(i != HQD_N_REGS);
314 *n_regs = i;
315
316 return 0;
317}
318
319static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd,
320 uint32_t __user *wptr, struct mm_struct *mm)
321{
322 struct amdgpu_device *adev = get_amdgpu_device(kgd);
323 struct cik_sdma_rlc_registers *m;
324 unsigned long end_jiffies;
325 uint32_t sdma_rlc_reg_offset;
326 uint32_t data;
327
328 m = get_sdma_mqd(mqd);
329 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
330
331 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
332 m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
333
334 end_jiffies = msecs_to_jiffies(2000) + jiffies;
335 while (true) {
336 data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
337 if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
338 break;
339 if (time_after(jiffies, end_jiffies)) {
340 pr_err("SDMA RLC not idle in %s\n", __func__);
341 return -ETIME;
342 }
343 usleep_range(500, 1000);
344 }
345
346 data = REG_SET_FIELD(m->sdma_rlc_doorbell, SDMA0_RLC0_DOORBELL,
347 ENABLE, 1);
348 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
349 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
350 m->sdma_rlc_rb_rptr);
351
352 if (read_user_wptr(mm, wptr, data))
353 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data);
354 else
355 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
356 m->sdma_rlc_rb_rptr);
357
358 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR,
359 m->sdma_rlc_virtual_addr);
360 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
361 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
362 m->sdma_rlc_rb_base_hi);
363 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
364 m->sdma_rlc_rb_rptr_addr_lo);
365 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
366 m->sdma_rlc_rb_rptr_addr_hi);
367
368 data = REG_SET_FIELD(m->sdma_rlc_rb_cntl, SDMA0_RLC0_RB_CNTL,
369 RB_ENABLE, 1);
370 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
371
372 return 0;
373}
374
375static int kgd_hqd_sdma_dump(struct kgd_dev *kgd,
376 uint32_t engine_id, uint32_t queue_id,
377 uint32_t (**dump)[2], uint32_t *n_regs)
378{
379 struct amdgpu_device *adev = get_amdgpu_device(kgd);
380 uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
381 queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
382 uint32_t i = 0, reg;
383#undef HQD_N_REGS
384#define HQD_N_REGS (19+4)
385
386 *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL);
387 if (*dump == NULL)
388 return -ENOMEM;
389
390 for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
391 DUMP_REG(sdma_offset + reg);
392 for (reg = mmSDMA0_RLC0_VIRTUAL_ADDR; reg <= mmSDMA0_RLC0_WATERMARK;
393 reg++)
394 DUMP_REG(sdma_offset + reg);
395
396 WARN_ON_ONCE(i != HQD_N_REGS);
397 *n_regs = i;
398
399 return 0;
400}
401
402static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
403 uint32_t pipe_id, uint32_t queue_id)
404{
405 struct amdgpu_device *adev = get_amdgpu_device(kgd);
406 uint32_t act;
407 bool retval = false;
408 uint32_t low, high;
409
410 acquire_queue(kgd, pipe_id, queue_id);
411 act = RREG32(mmCP_HQD_ACTIVE);
412 if (act) {
413 low = lower_32_bits(queue_address >> 8);
414 high = upper_32_bits(queue_address >> 8);
415
416 if (low == RREG32(mmCP_HQD_PQ_BASE) &&
417 high == RREG32(mmCP_HQD_PQ_BASE_HI))
418 retval = true;
419 }
420 release_queue(kgd);
421 return retval;
422}
423
424static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
425{
426 struct amdgpu_device *adev = get_amdgpu_device(kgd);
427 struct cik_sdma_rlc_registers *m;
428 uint32_t sdma_rlc_reg_offset;
429 uint32_t sdma_rlc_rb_cntl;
430
431 m = get_sdma_mqd(mqd);
432 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
433
434 sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
435
436 if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
437 return true;
438
439 return false;
440}
441
442static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
443 enum kfd_preempt_type reset_type,
444 unsigned int utimeout, uint32_t pipe_id,
445 uint32_t queue_id)
446{
447 struct amdgpu_device *adev = get_amdgpu_device(kgd);
448 uint32_t temp;
449 enum hqd_dequeue_request_type type;
450 unsigned long flags, end_jiffies;
451 int retry;
452
453 if (adev->in_gpu_reset)
454 return -EIO;
455
456 acquire_queue(kgd, pipe_id, queue_id);
457 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
458
459 switch (reset_type) {
460 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
461 type = DRAIN_PIPE;
462 break;
463 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
464 type = RESET_WAVES;
465 break;
466 default:
467 type = DRAIN_PIPE;
468 break;
469 }
470
471 /* Workaround: If IQ timer is active and the wait time is close to or
472 * equal to 0, dequeueing is not safe. Wait until either the wait time
473 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
474 * cleared before continuing. Also, ensure wait times are set to at
475 * least 0x3.
476 */
477 local_irq_save(flags);
478 preempt_disable();
479 retry = 5000; /* wait for 500 usecs at maximum */
480 while (true) {
481 temp = RREG32(mmCP_HQD_IQ_TIMER);
482 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
483 pr_debug("HW is processing IQ\n");
484 goto loop;
485 }
486 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
487 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
488 == 3) /* SEM-rearm is safe */
489 break;
490 /* Wait time 3 is safe for CP, but our MMIO read/write
491 * time is close to 1 microsecond, so check for 10 to
492 * leave more buffer room
493 */
494 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
495 >= 10)
496 break;
497 pr_debug("IQ timer is active\n");
498 } else
499 break;
500loop:
501 if (!retry) {
502 pr_err("CP HQD IQ timer status time out\n");
503 break;
504 }
505 ndelay(100);
506 --retry;
507 }
508 retry = 1000;
509 while (true) {
510 temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
511 if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
512 break;
513 pr_debug("Dequeue request is pending\n");
514
515 if (!retry) {
516 pr_err("CP HQD dequeue request time out\n");
517 break;
518 }
519 ndelay(100);
520 --retry;
521 }
522 local_irq_restore(flags);
523 preempt_enable();
524
525 WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
526
527 end_jiffies = (utimeout * HZ / 1000) + jiffies;
528 while (true) {
529 temp = RREG32(mmCP_HQD_ACTIVE);
530 if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
531 break;
532 if (time_after(jiffies, end_jiffies)) {
533 pr_err("cp queue preemption time out\n");
534 release_queue(kgd);
535 return -ETIME;
536 }
537 usleep_range(500, 1000);
538 }
539
540 release_queue(kgd);
541 return 0;
542}
543
544static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
545 unsigned int utimeout)
546{
547 struct amdgpu_device *adev = get_amdgpu_device(kgd);
548 struct cik_sdma_rlc_registers *m;
549 uint32_t sdma_rlc_reg_offset;
550 uint32_t temp;
551 unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
552
553 m = get_sdma_mqd(mqd);
554 sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
555
556 temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
557 temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
558 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
559
560 while (true) {
561 temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
562 if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
563 break;
564 if (time_after(jiffies, end_jiffies)) {
565 pr_err("SDMA RLC not idle in %s\n", __func__);
566 return -ETIME;
567 }
568 usleep_range(500, 1000);
569 }
570
571 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
572 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
573 RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
574 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
575
576 m->sdma_rlc_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
577
578 return 0;
579}
580
581static int kgd_address_watch_disable(struct kgd_dev *kgd)
582{
583 struct amdgpu_device *adev = get_amdgpu_device(kgd);
584 union TCP_WATCH_CNTL_BITS cntl;
585 unsigned int i;
586
587 cntl.u32All = 0;
588
589 cntl.bitfields.valid = 0;
590 cntl.bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK;
591 cntl.bitfields.atc = 1;
592
593 /* Turning off this address until we set all the registers */
594 for (i = 0; i < MAX_WATCH_ADDRESSES; i++)
595 WREG32(watchRegs[i * ADDRESS_WATCH_REG_MAX +
596 ADDRESS_WATCH_REG_CNTL], cntl.u32All);
597
598 return 0;
599}
600
601static int kgd_address_watch_execute(struct kgd_dev *kgd,
602 unsigned int watch_point_id,
603 uint32_t cntl_val,
604 uint32_t addr_hi,
605 uint32_t addr_lo)
606{
607 struct amdgpu_device *adev = get_amdgpu_device(kgd);
608 union TCP_WATCH_CNTL_BITS cntl;
609
610 cntl.u32All = cntl_val;
611
612 /* Turning off this watch point until we set all the registers */
613 cntl.bitfields.valid = 0;
614 WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
615 ADDRESS_WATCH_REG_CNTL], cntl.u32All);
616
617 WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
618 ADDRESS_WATCH_REG_ADDR_HI], addr_hi);
619
620 WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
621 ADDRESS_WATCH_REG_ADDR_LO], addr_lo);
622
623 /* Enable the watch point */
624 cntl.bitfields.valid = 1;
625
626 WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
627 ADDRESS_WATCH_REG_CNTL], cntl.u32All);
628
629 return 0;
630}
631
632static int kgd_wave_control_execute(struct kgd_dev *kgd,
633 uint32_t gfx_index_val,
634 uint32_t sq_cmd)
635{
636 struct amdgpu_device *adev = get_amdgpu_device(kgd);
637 uint32_t data;
638
639 mutex_lock(&adev->grbm_idx_mutex);
640
641 WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
642 WREG32(mmSQ_CMD, sq_cmd);
643
644 /* Restore the GRBM_GFX_INDEX register */
645
646 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK |
647 GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
648 GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
649
650 WREG32(mmGRBM_GFX_INDEX, data);
651
652 mutex_unlock(&adev->grbm_idx_mutex);
653
654 return 0;
655}
656
657static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
658 unsigned int watch_point_id,
659 unsigned int reg_offset)
660{
661 return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
662}
663
664static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
665 uint8_t vmid, uint16_t *p_pasid)
666{
667 uint32_t value;
668 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
669
670 value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
671 *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
672
673 return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
674}
675
676static void set_scratch_backing_va(struct kgd_dev *kgd,
677 uint64_t va, uint32_t vmid)
678{
679 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
680
681 lock_srbm(kgd, 0, 0, 0, vmid);
682 WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
683 unlock_srbm(kgd);
684}
685
686static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
687 uint64_t page_table_base)
688{
689 struct amdgpu_device *adev = get_amdgpu_device(kgd);
690
691 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
692 pr_err("trying to set page table base for wrong VMID\n");
693 return;
694 }
695 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8,
696 lower_32_bits(page_table_base));
697}
698
699static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
700{
701 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
702 int vmid;
703 unsigned int tmp;
704
705 if (adev->in_gpu_reset)
706 return -EIO;
707
708 for (vmid = 0; vmid < 16; vmid++) {
709 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
710 continue;
711
712 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
713 if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) &&
714 (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) {
715 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
716 RREG32(mmVM_INVALIDATE_RESPONSE);
717 break;
718 }
719 }
720
721 return 0;
722}
723
724static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
725{
726 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
727
728 if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
729 pr_err("non kfd vmid\n");
730 return 0;
731 }
732
733 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
734 RREG32(mmVM_INVALIDATE_RESPONSE);
735 return 0;
736}
737
738 /**
739 * read_vmid_from_vmfault_reg - read vmid from register
740 *
741 * adev: amdgpu_device pointer
742 * @vmid: vmid pointer
743 * read vmid from register (CIK).
744 */
745static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd)
746{
747 struct amdgpu_device *adev = get_amdgpu_device(kgd);
748
749 uint32_t status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
750
751 return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
752}
753
754const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
755 .program_sh_mem_settings = kgd_program_sh_mem_settings,
756 .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
757 .init_interrupts = kgd_init_interrupts,
758 .hqd_load = kgd_hqd_load,
759 .hqd_sdma_load = kgd_hqd_sdma_load,
760 .hqd_dump = kgd_hqd_dump,
761 .hqd_sdma_dump = kgd_hqd_sdma_dump,
762 .hqd_is_occupied = kgd_hqd_is_occupied,
763 .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
764 .hqd_destroy = kgd_hqd_destroy,
765 .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
766 .address_watch_disable = kgd_address_watch_disable,
767 .address_watch_execute = kgd_address_watch_execute,
768 .wave_control_execute = kgd_wave_control_execute,
769 .address_watch_get_offset = kgd_address_watch_get_offset,
770 .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info,
771 .set_scratch_backing_va = set_scratch_backing_va,
772 .get_tile_config = get_tile_config,
773 .set_vm_context_page_table_base = set_vm_context_page_table_base,
774 .invalidate_tlbs = invalidate_tlbs,
775 .invalidate_tlbs_vmid = invalidate_tlbs_vmid,
776 .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
777};