Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright 2021 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/printk.h>
25#include <linux/slab.h>
26#include <linux/uaccess.h>
27#include "kfd_priv.h"
28#include "kfd_mqd_manager.h"
29#include "v11_structs.h"
30#include "gc/gc_11_0_0_offset.h"
31#include "gc/gc_11_0_0_sh_mask.h"
32#include "amdgpu_amdkfd.h"
33
34static inline struct v11_compute_mqd *get_mqd(void *mqd)
35{
36 return (struct v11_compute_mqd *)mqd;
37}
38
39static inline struct v11_sdma_mqd *get_sdma_mqd(void *mqd)
40{
41 return (struct v11_sdma_mqd *)mqd;
42}
43
44static void update_cu_mask(struct mqd_manager *mm, void *mqd,
45 struct mqd_update_info *minfo)
46{
47 struct v11_compute_mqd *m;
48 uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
49
50 if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) ||
51 !minfo->cu_mask.ptr)
52 return;
53
54 mqd_symmetrically_map_cu_mask(mm,
55 minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
56
57 m = get_mqd(mqd);
58 m->compute_static_thread_mgmt_se0 = se_mask[0];
59 m->compute_static_thread_mgmt_se1 = se_mask[1];
60 m->compute_static_thread_mgmt_se2 = se_mask[2];
61 m->compute_static_thread_mgmt_se3 = se_mask[3];
62 m->compute_static_thread_mgmt_se4 = se_mask[4];
63 m->compute_static_thread_mgmt_se5 = se_mask[5];
64 m->compute_static_thread_mgmt_se6 = se_mask[6];
65 m->compute_static_thread_mgmt_se7 = se_mask[7];
66
67 pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n",
68 m->compute_static_thread_mgmt_se0,
69 m->compute_static_thread_mgmt_se1,
70 m->compute_static_thread_mgmt_se2,
71 m->compute_static_thread_mgmt_se3,
72 m->compute_static_thread_mgmt_se4,
73 m->compute_static_thread_mgmt_se5,
74 m->compute_static_thread_mgmt_se6,
75 m->compute_static_thread_mgmt_se7);
76}
77
78static void set_priority(struct v11_compute_mqd *m, struct queue_properties *q)
79{
80 m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
81 m->cp_hqd_queue_priority = q->priority;
82}
83
84static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
85 struct queue_properties *q)
86{
87 struct kfd_mem_obj *mqd_mem_obj;
88 int size;
89
90 /*
91 * MES write to areas beyond MQD size. So allocate
92 * 1 PAGE_SIZE memory for MQD is MES is enabled.
93 */
94 if (kfd->shared_resources.enable_mes)
95 size = PAGE_SIZE;
96 else
97 size = sizeof(struct v11_compute_mqd);
98
99 if (kfd_gtt_sa_allocate(kfd, size, &mqd_mem_obj))
100 return NULL;
101
102 return mqd_mem_obj;
103}
104
105static void init_mqd(struct mqd_manager *mm, void **mqd,
106 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
107 struct queue_properties *q)
108{
109 uint64_t addr;
110 struct v11_compute_mqd *m;
111 int size;
112
113 m = (struct v11_compute_mqd *) mqd_mem_obj->cpu_ptr;
114 addr = mqd_mem_obj->gpu_addr;
115
116 if (mm->dev->shared_resources.enable_mes)
117 size = PAGE_SIZE;
118 else
119 size = sizeof(struct v11_compute_mqd);
120
121 memset(m, 0, size);
122
123 m->header = 0xC0310800;
124 m->compute_pipelinestat_enable = 1;
125 m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
126 m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
127 m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
128 m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
129 m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF;
130 m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF;
131 m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF;
132 m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF;
133
134 m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
135 0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
136
137 m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
138
139 m->cp_mqd_base_addr_lo = lower_32_bits(addr);
140 m->cp_mqd_base_addr_hi = upper_32_bits(addr);
141
142 m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT |
143 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
144 1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
145
146 /*
147 * GFX11 RS64 CPFW version >= 509 supports PCIe atomics support
148 * acknowledgment.
149 */
150 if (amdgpu_amdkfd_have_atomics_support(mm->dev->adev))
151 m->cp_hqd_hq_status0 |= 1 << 29;
152
153 if (q->format == KFD_QUEUE_FORMAT_AQL) {
154 m->cp_hqd_aql_control =
155 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
156 }
157
158 if (mm->dev->cwsr_enabled) {
159 m->cp_hqd_persistent_state |=
160 (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
161 m->cp_hqd_ctx_save_base_addr_lo =
162 lower_32_bits(q->ctx_save_restore_area_address);
163 m->cp_hqd_ctx_save_base_addr_hi =
164 upper_32_bits(q->ctx_save_restore_area_address);
165 m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size;
166 m->cp_hqd_cntl_stack_size = q->ctl_stack_size;
167 m->cp_hqd_cntl_stack_offset = q->ctl_stack_size;
168 m->cp_hqd_wg_state_offset = q->ctl_stack_size;
169 }
170
171 *mqd = m;
172 if (gart_addr)
173 *gart_addr = addr;
174 mm->update_mqd(mm, m, q, NULL);
175}
176
177static int load_mqd(struct mqd_manager *mm, void *mqd,
178 uint32_t pipe_id, uint32_t queue_id,
179 struct queue_properties *p, struct mm_struct *mms)
180{
181 int r = 0;
182 /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
183 uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
184
185 r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
186 (uint32_t __user *)p->write_ptr,
187 wptr_shift, 0, mms);
188 return r;
189}
190
191static void update_mqd(struct mqd_manager *mm, void *mqd,
192 struct queue_properties *q,
193 struct mqd_update_info *minfo)
194{
195 struct v11_compute_mqd *m;
196
197 m = get_mqd(mqd);
198
199 m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
200 m->cp_hqd_pq_control |=
201 ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
202 pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
203
204 m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
205 m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
206
207 m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
208 m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
209 m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
210 m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
211
212 m->cp_hqd_pq_doorbell_control =
213 q->doorbell_off <<
214 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
215 pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
216 m->cp_hqd_pq_doorbell_control);
217
218 m->cp_hqd_ib_control = 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT;
219
220 /*
221 * HW does not clamp this field correctly. Maximum EOP queue size
222 * is constrained by per-SE EOP done signal count, which is 8-bit.
223 * Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit
224 * more than (EOP entry count - 1) so a queue size of 0x800 dwords
225 * is safe, giving a maximum field value of 0xA.
226 */
227 m->cp_hqd_eop_control = min(0xA,
228 ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1);
229 m->cp_hqd_eop_base_addr_lo =
230 lower_32_bits(q->eop_ring_buffer_address >> 8);
231 m->cp_hqd_eop_base_addr_hi =
232 upper_32_bits(q->eop_ring_buffer_address >> 8);
233
234 m->cp_hqd_iq_timer = 0;
235
236 m->cp_hqd_vmid = q->vmid;
237
238 if (q->format == KFD_QUEUE_FORMAT_AQL) {
239 /* GC 10 removed WPP_CLAMP from PQ Control */
240 m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
241 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT |
242 1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT ;
243 m->cp_hqd_pq_doorbell_control |=
244 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
245 }
246 if (mm->dev->cwsr_enabled)
247 m->cp_hqd_ctx_save_control = 0;
248
249 update_cu_mask(mm, mqd, minfo);
250 set_priority(m, q);
251
252 q->is_active = QUEUE_IS_ACTIVE(*q);
253}
254
255static uint32_t read_doorbell_id(void *mqd)
256{
257 struct v11_compute_mqd *m = (struct v11_compute_mqd *)mqd;
258
259 return m->queue_doorbell_id0;
260}
261
262static int get_wave_state(struct mqd_manager *mm, void *mqd,
263 void __user *ctl_stack,
264 u32 *ctl_stack_used_size,
265 u32 *save_area_used_size)
266{
267 struct v11_compute_mqd *m;
268 /*struct mqd_user_context_save_area_header header;*/
269
270 m = get_mqd(mqd);
271
272 /* Control stack is written backwards, while workgroup context data
273 * is written forwards. Both starts from m->cp_hqd_cntl_stack_size.
274 * Current position is at m->cp_hqd_cntl_stack_offset and
275 * m->cp_hqd_wg_state_offset, respectively.
276 */
277 *ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
278 m->cp_hqd_cntl_stack_offset;
279 *save_area_used_size = m->cp_hqd_wg_state_offset -
280 m->cp_hqd_cntl_stack_size;
281
282 /* Control stack is not copied to user mode for GFXv11 because
283 * it's part of the context save area that is already
284 * accessible to user mode
285 */
286/*
287 header.control_stack_size = *ctl_stack_used_size;
288 header.wave_state_size = *save_area_used_size;
289
290 header.wave_state_offset = m->cp_hqd_wg_state_offset;
291 header.control_stack_offset = m->cp_hqd_cntl_stack_offset;
292
293 if (copy_to_user(ctl_stack, &header, sizeof(header)))
294 return -EFAULT;
295*/
296 return 0;
297}
298
299static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
300 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
301 struct queue_properties *q)
302{
303 struct v11_compute_mqd *m;
304
305 init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
306
307 m = get_mqd(*mqd);
308
309 m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
310 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
311}
312
313static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
314 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
315 struct queue_properties *q)
316{
317 struct v11_sdma_mqd *m;
318 int size;
319
320 m = (struct v11_sdma_mqd *) mqd_mem_obj->cpu_ptr;
321
322 if (mm->dev->shared_resources.enable_mes)
323 size = PAGE_SIZE;
324 else
325 size = sizeof(struct v11_sdma_mqd);
326
327 memset(m, 0, size);
328 *mqd = m;
329 if (gart_addr)
330 *gart_addr = mqd_mem_obj->gpu_addr;
331
332 mm->update_mqd(mm, m, q, NULL);
333}
334
335#define SDMA_RLC_DUMMY_DEFAULT 0xf
336
337static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
338 struct queue_properties *q,
339 struct mqd_update_info *minfo)
340{
341 struct v11_sdma_mqd *m;
342
343 m = get_sdma_mqd(mqd);
344 m->sdmax_rlcx_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
345 << SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT |
346 q->vmid << SDMA0_QUEUE0_RB_CNTL__RB_VMID__SHIFT |
347 1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
348 6 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT |
349 1 << SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT;
350
351 m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
352 m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
353 m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
354 m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
355 m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
356 m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
357 m->sdmax_rlcx_doorbell_offset =
358 q->doorbell_off << SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT;
359
360 m->sdmax_rlcx_sched_cntl = (amdgpu_sdma_phase_quantum
361 << SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT)
362 & SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK;
363
364 m->sdma_engine_id = q->sdma_engine_id;
365 m->sdma_queue_id = q->sdma_queue_id;
366 m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
367
368 q->is_active = QUEUE_IS_ACTIVE(*q);
369}
370
371#if defined(CONFIG_DEBUG_FS)
372
373static int debugfs_show_mqd(struct seq_file *m, void *data)
374{
375 seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
376 data, sizeof(struct v11_compute_mqd), false);
377 return 0;
378}
379
380static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
381{
382 seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
383 data, sizeof(struct v11_sdma_mqd), false);
384 return 0;
385}
386
387#endif
388
389struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type,
390 struct kfd_dev *dev)
391{
392 struct mqd_manager *mqd;
393
394 if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
395 return NULL;
396
397 mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
398 if (!mqd)
399 return NULL;
400
401 mqd->dev = dev;
402
403 switch (type) {
404 case KFD_MQD_TYPE_CP:
405 pr_debug("%s@%i\n", __func__, __LINE__);
406 mqd->allocate_mqd = allocate_mqd;
407 mqd->init_mqd = init_mqd;
408 mqd->free_mqd = kfd_free_mqd_cp;
409 mqd->load_mqd = load_mqd;
410 mqd->update_mqd = update_mqd;
411 mqd->destroy_mqd = kfd_destroy_mqd_cp;
412 mqd->is_occupied = kfd_is_occupied_cp;
413 mqd->mqd_size = sizeof(struct v11_compute_mqd);
414 mqd->get_wave_state = get_wave_state;
415#if defined(CONFIG_DEBUG_FS)
416 mqd->debugfs_show_mqd = debugfs_show_mqd;
417#endif
418 pr_debug("%s@%i\n", __func__, __LINE__);
419 break;
420 case KFD_MQD_TYPE_HIQ:
421 pr_debug("%s@%i\n", __func__, __LINE__);
422 mqd->allocate_mqd = allocate_hiq_mqd;
423 mqd->init_mqd = init_mqd_hiq;
424 mqd->free_mqd = free_mqd_hiq_sdma;
425 mqd->load_mqd = kfd_hiq_load_mqd_kiq;
426 mqd->update_mqd = update_mqd;
427 mqd->destroy_mqd = kfd_destroy_mqd_cp;
428 mqd->is_occupied = kfd_is_occupied_cp;
429 mqd->mqd_size = sizeof(struct v11_compute_mqd);
430#if defined(CONFIG_DEBUG_FS)
431 mqd->debugfs_show_mqd = debugfs_show_mqd;
432#endif
433 mqd->read_doorbell_id = read_doorbell_id;
434 pr_debug("%s@%i\n", __func__, __LINE__);
435 break;
436 case KFD_MQD_TYPE_DIQ:
437 mqd->allocate_mqd = allocate_mqd;
438 mqd->init_mqd = init_mqd_hiq;
439 mqd->free_mqd = kfd_free_mqd_cp;
440 mqd->load_mqd = load_mqd;
441 mqd->update_mqd = update_mqd;
442 mqd->destroy_mqd = kfd_destroy_mqd_cp;
443 mqd->is_occupied = kfd_is_occupied_cp;
444 mqd->mqd_size = sizeof(struct v11_compute_mqd);
445#if defined(CONFIG_DEBUG_FS)
446 mqd->debugfs_show_mqd = debugfs_show_mqd;
447#endif
448 break;
449 case KFD_MQD_TYPE_SDMA:
450 pr_debug("%s@%i\n", __func__, __LINE__);
451 mqd->allocate_mqd = allocate_sdma_mqd;
452 mqd->init_mqd = init_mqd_sdma;
453 mqd->free_mqd = free_mqd_hiq_sdma;
454 mqd->load_mqd = kfd_load_mqd_sdma;
455 mqd->update_mqd = update_mqd_sdma;
456 mqd->destroy_mqd = kfd_destroy_mqd_sdma;
457 mqd->is_occupied = kfd_is_occupied_sdma;
458 mqd->mqd_size = sizeof(struct v11_sdma_mqd);
459#if defined(CONFIG_DEBUG_FS)
460 mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;
461#endif
462 /*
463 * To allocate SDMA MQDs by generic functions
464 * when MES is enabled.
465 */
466 if (dev->shared_resources.enable_mes) {
467 mqd->allocate_mqd = allocate_mqd;
468 mqd->free_mqd = kfd_free_mqd_cp;
469 }
470 pr_debug("%s@%i\n", __func__, __LINE__);
471 break;
472 default:
473 kfree(mqd);
474 return NULL;
475 }
476
477 return mqd;
478}