Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdkfd: Added MQD manager files for GFX12.

Initial implementation, based on GFX11.

v2: Removed dbg_wa code as not needed on GFX12.
v3: squash in SDMA queue fixes (Alex)
v4: rebase (Alex)

Signed-off-by: David Belanger <david.belanger@amd.com>
Reviewed-by: Harish Kasiviswanathan <Harish.Kasiviswanathan@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

David Belanger and committed by
Alex Deucher
48f0bdf4 8aa89b69

+454 -3
+1
drivers/gpu/drm/amd/amdkfd/Makefile
··· 38 38 $(AMDKFD_PATH)/kfd_mqd_manager_v9.o \ 39 39 $(AMDKFD_PATH)/kfd_mqd_manager_v10.o \ 40 40 $(AMDKFD_PATH)/kfd_mqd_manager_v11.o \ 41 + $(AMDKFD_PATH)/kfd_mqd_manager_v12.o \ 41 42 $(AMDKFD_PATH)/kfd_kernel_queue.o \ 42 43 $(AMDKFD_PATH)/kfd_packet_manager.o \ 43 44 $(AMDKFD_PATH)/kfd_packet_manager_vi.o \
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
··· 129 129 } 130 130 131 131 cu_bitmap_sh_mul = (KFD_GC_VERSION(mm->dev) >= IP_VERSION(11, 0, 0) && 132 - KFD_GC_VERSION(mm->dev) < IP_VERSION(12, 0, 0)) ? 2 : 1; 132 + KFD_GC_VERSION(mm->dev) < IP_VERSION(13, 0, 0)) ? 2 : 1; 133 133 134 134 /* Count active CUs per SH. 135 135 *
+447
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v12.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR MIT 2 + /* 3 + * Copyright 2023 Advanced Micro Devices, Inc. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the "Software"), 7 + * to deal in the Software without restriction, including without limitation 8 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 + * and/or sell copies of the Software, and to permit persons to whom the 10 + * Software is furnished to do so, subject to the following conditions: 11 + * 12 + * The above copyright notice and this permission notice shall be included in 13 + * all copies or substantial portions of the Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 + * OTHER DEALINGS IN THE SOFTWARE. 22 + * 23 + */ 24 + 25 + #include <linux/printk.h> 26 + #include <linux/slab.h> 27 + #include <linux/uaccess.h> 28 + #include "kfd_priv.h" 29 + #include "kfd_mqd_manager.h" 30 + #include "v12_structs.h" 31 + #include "gc/gc_12_0_0_sh_mask.h" 32 + #include "amdgpu_amdkfd.h" 33 + 34 + static inline struct v12_compute_mqd *get_mqd(void *mqd) 35 + { 36 + return (struct v12_compute_mqd *)mqd; 37 + } 38 + 39 + static inline struct v12_sdma_mqd *get_sdma_mqd(void *mqd) 40 + { 41 + return (struct v12_sdma_mqd *)mqd; 42 + } 43 + 44 + static void update_cu_mask(struct mqd_manager *mm, void *mqd, 45 + struct mqd_update_info *minfo) 46 + { 47 + struct v12_compute_mqd *m; 48 + uint32_t se_mask[KFD_MAX_NUM_SE] = {0}; 49 + 50 + if (!minfo || !minfo->cu_mask.ptr) 51 + return; 52 + 53 + mqd_symmetrically_map_cu_mask(mm, 54 + minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask, 0); 55 + 56 + m = get_mqd(mqd); 57 + m->compute_static_thread_mgmt_se0 = se_mask[0]; 58 + m->compute_static_thread_mgmt_se1 = se_mask[1]; 59 + m->compute_static_thread_mgmt_se2 = se_mask[2]; 60 + m->compute_static_thread_mgmt_se3 = se_mask[3]; 61 + m->compute_static_thread_mgmt_se4 = se_mask[4]; 62 + m->compute_static_thread_mgmt_se5 = se_mask[5]; 63 + m->compute_static_thread_mgmt_se6 = se_mask[6]; 64 + m->compute_static_thread_mgmt_se7 = se_mask[7]; 65 + 66 + pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n", 67 + m->compute_static_thread_mgmt_se0, 68 + m->compute_static_thread_mgmt_se1, 69 + m->compute_static_thread_mgmt_se2, 70 + m->compute_static_thread_mgmt_se3, 71 + m->compute_static_thread_mgmt_se4, 72 + m->compute_static_thread_mgmt_se5, 73 + m->compute_static_thread_mgmt_se6, 74 + m->compute_static_thread_mgmt_se7); 75 + } 76 + 77 + static void set_priority(struct v12_compute_mqd *m, struct queue_properties *q) 78 + { 79 + m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; 80 + m->cp_hqd_queue_priority = q->priority; 81 + } 82 + 83 + static struct kfd_mem_obj *allocate_mqd(struct kfd_node *node, 84 + struct queue_properties *q) 85 + { 86 + struct kfd_mem_obj *mqd_mem_obj; 87 + 88 + /* 89 + * Allocate one PAGE_SIZE memory for MQD as MES writes to areas beyond 90 + * struct MQD size. 91 + */ 92 + if (kfd_gtt_sa_allocate(node, PAGE_SIZE, &mqd_mem_obj)) 93 + return NULL; 94 + 95 + return mqd_mem_obj; 96 + } 97 + 98 + static void init_mqd(struct mqd_manager *mm, void **mqd, 99 + struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, 100 + struct queue_properties *q) 101 + { 102 + uint64_t addr; 103 + struct v12_compute_mqd *m; 104 + 105 + m = (struct v12_compute_mqd *) mqd_mem_obj->cpu_ptr; 106 + addr = mqd_mem_obj->gpu_addr; 107 + 108 + memset(m, 0, PAGE_SIZE); 109 + 110 + m->header = 0xC0310800; 111 + m->compute_pipelinestat_enable = 1; 112 + m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF; 113 + m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF; 114 + m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF; 115 + m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF; 116 + m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF; 117 + m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF; 118 + m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF; 119 + m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF; 120 + 121 + m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK | 122 + 0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT; 123 + 124 + m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT; 125 + 126 + m->cp_mqd_base_addr_lo = lower_32_bits(addr); 127 + m->cp_mqd_base_addr_hi = upper_32_bits(addr); 128 + 129 + m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT | 130 + 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT | 131 + 1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT; 132 + 133 + /* Set cp_hqd_hq_status0.c_queue_debug_en to 1 to have the CP set up the 134 + * DISPATCH_PTR. This is required for the kfd debugger 135 + */ 136 + m->cp_hqd_hq_status0 = 1 << 14; 137 + 138 + if (q->format == KFD_QUEUE_FORMAT_AQL) { 139 + m->cp_hqd_aql_control = 140 + 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT; 141 + } 142 + 143 + if (mm->dev->kfd->cwsr_enabled) { 144 + m->cp_hqd_persistent_state |= 145 + (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT); 146 + m->cp_hqd_ctx_save_base_addr_lo = 147 + lower_32_bits(q->ctx_save_restore_area_address); 148 + m->cp_hqd_ctx_save_base_addr_hi = 149 + upper_32_bits(q->ctx_save_restore_area_address); 150 + m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size; 151 + m->cp_hqd_cntl_stack_size = q->ctl_stack_size; 152 + m->cp_hqd_cntl_stack_offset = q->ctl_stack_size; 153 + m->cp_hqd_wg_state_offset = q->ctl_stack_size; 154 + } 155 + 156 + *mqd = m; 157 + if (gart_addr) 158 + *gart_addr = addr; 159 + mm->update_mqd(mm, m, q, NULL); 160 + } 161 + 162 + static int load_mqd(struct mqd_manager *mm, void *mqd, 163 + uint32_t pipe_id, uint32_t queue_id, 164 + struct queue_properties *p, struct mm_struct *mms) 165 + { 166 + int r = 0; 167 + /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */ 168 + uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); 169 + 170 + r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, 171 + (uint32_t __user *)p->write_ptr, 172 + wptr_shift, 0, mms, 0); 173 + return r; 174 + } 175 + 176 + static void update_mqd(struct mqd_manager *mm, void *mqd, 177 + struct queue_properties *q, 178 + struct mqd_update_info *minfo) 179 + { 180 + struct v12_compute_mqd *m; 181 + 182 + m = get_mqd(mqd); 183 + 184 + m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT; 185 + m->cp_hqd_pq_control |= 186 + ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1; 187 + pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control); 188 + 189 + m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); 190 + m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8); 191 + 192 + m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr); 193 + m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr); 194 + m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr); 195 + m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr); 196 + 197 + m->cp_hqd_pq_doorbell_control = 198 + q->doorbell_off << 199 + CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT; 200 + pr_debug("cp_hqd_pq_doorbell_control 0x%x\n", 201 + m->cp_hqd_pq_doorbell_control); 202 + 203 + m->cp_hqd_ib_control = 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT; 204 + 205 + /* 206 + * HW does not clamp this field correctly. Maximum EOP queue size 207 + * is constrained by per-SE EOP done signal count, which is 8-bit. 208 + * Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit 209 + * more than (EOP entry count - 1) so a queue size of 0x800 dwords 210 + * is safe, giving a maximum field value of 0xA. 211 + */ 212 + m->cp_hqd_eop_control = min(0xA, 213 + ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1); 214 + m->cp_hqd_eop_base_addr_lo = 215 + lower_32_bits(q->eop_ring_buffer_address >> 8); 216 + m->cp_hqd_eop_base_addr_hi = 217 + upper_32_bits(q->eop_ring_buffer_address >> 8); 218 + 219 + m->cp_hqd_iq_timer = 0; 220 + 221 + m->cp_hqd_vmid = q->vmid; 222 + 223 + if (q->format == KFD_QUEUE_FORMAT_AQL) { 224 + /* GC 10 removed WPP_CLAMP from PQ Control */ 225 + m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK | 226 + 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT | 227 + 1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT; 228 + m->cp_hqd_pq_doorbell_control |= 229 + 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT; 230 + } 231 + if (mm->dev->kfd->cwsr_enabled) 232 + m->cp_hqd_ctx_save_control = 0; 233 + 234 + update_cu_mask(mm, mqd, minfo); 235 + set_priority(m, q); 236 + 237 + q->is_active = QUEUE_IS_ACTIVE(*q); 238 + } 239 + 240 + static bool check_preemption_failed(struct mqd_manager *mm, void *mqd) 241 + { 242 + struct v12_compute_mqd *m = (struct v12_compute_mqd *)mqd; 243 + 244 + return kfd_check_hiq_mqd_doorbell_id(mm->dev, m->queue_doorbell_id0, 0); 245 + } 246 + 247 + static int get_wave_state(struct mqd_manager *mm, void *mqd, 248 + struct queue_properties *q, 249 + void __user *ctl_stack, 250 + u32 *ctl_stack_used_size, 251 + u32 *save_area_used_size) 252 + { 253 + struct v12_compute_mqd *m; 254 + struct mqd_user_context_save_area_header header; 255 + 256 + m = get_mqd(mqd); 257 + 258 + /* Control stack is written backwards, while workgroup context data 259 + * is written forwards. Both starts from m->cp_hqd_cntl_stack_size. 260 + * Current position is at m->cp_hqd_cntl_stack_offset and 261 + * m->cp_hqd_wg_state_offset, respectively. 262 + */ 263 + *ctl_stack_used_size = m->cp_hqd_cntl_stack_size - 264 + m->cp_hqd_cntl_stack_offset; 265 + *save_area_used_size = m->cp_hqd_wg_state_offset - 266 + m->cp_hqd_cntl_stack_size; 267 + 268 + /* Control stack is not copied to user mode for GFXv12 because 269 + * it's part of the context save area that is already 270 + * accessible to user mode 271 + */ 272 + header.control_stack_size = *ctl_stack_used_size; 273 + header.wave_state_size = *save_area_used_size; 274 + 275 + header.wave_state_offset = m->cp_hqd_wg_state_offset; 276 + header.control_stack_offset = m->cp_hqd_cntl_stack_offset; 277 + 278 + if (copy_to_user(ctl_stack, &header, sizeof(header))) 279 + return -EFAULT; 280 + 281 + return 0; 282 + } 283 + 284 + static void init_mqd_hiq(struct mqd_manager *mm, void **mqd, 285 + struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, 286 + struct queue_properties *q) 287 + { 288 + struct v12_compute_mqd *m; 289 + 290 + init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q); 291 + 292 + m = get_mqd(*mqd); 293 + 294 + m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT | 295 + 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT; 296 + } 297 + 298 + static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, 299 + struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, 300 + struct queue_properties *q) 301 + { 302 + struct v12_sdma_mqd *m; 303 + 304 + m = (struct v12_sdma_mqd *) mqd_mem_obj->cpu_ptr; 305 + 306 + memset(m, 0, sizeof(struct v12_sdma_mqd)); 307 + 308 + *mqd = m; 309 + if (gart_addr) 310 + *gart_addr = mqd_mem_obj->gpu_addr; 311 + 312 + mm->update_mqd(mm, m, q, NULL); 313 + } 314 + 315 + #define SDMA_RLC_DUMMY_DEFAULT 0xf 316 + 317 + static void update_mqd_sdma(struct mqd_manager *mm, void *mqd, 318 + struct queue_properties *q, 319 + struct mqd_update_info *minfo) 320 + { 321 + struct v12_sdma_mqd *m; 322 + 323 + m = get_sdma_mqd(mqd); 324 + m->sdmax_rlcx_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1) 325 + << SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT | 326 + q->vmid << SDMA0_QUEUE0_RB_CNTL__RB_VMID__SHIFT | 327 + 1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT | 328 + 6 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT | 329 + 1 << SDMA0_QUEUE0_RB_CNTL__MCU_WPTR_POLL_ENABLE__SHIFT; 330 + 331 + m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8); 332 + m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8); 333 + m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr); 334 + m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr); 335 + m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr); 336 + m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr); 337 + m->sdmax_rlcx_doorbell_offset = 338 + q->doorbell_off << SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT; 339 + 340 + m->sdma_engine_id = q->sdma_engine_id; 341 + m->sdma_queue_id = q->sdma_queue_id; 342 + 343 + m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT; 344 + 345 + q->is_active = QUEUE_IS_ACTIVE(*q); 346 + } 347 + 348 + #if defined(CONFIG_DEBUG_FS) 349 + 350 + static int debugfs_show_mqd(struct seq_file *m, void *data) 351 + { 352 + seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4, 353 + data, sizeof(struct v12_compute_mqd), false); 354 + return 0; 355 + } 356 + 357 + static int debugfs_show_mqd_sdma(struct seq_file *m, void *data) 358 + { 359 + seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4, 360 + data, sizeof(struct v12_sdma_mqd), false); 361 + return 0; 362 + } 363 + 364 + #endif 365 + 366 + struct mqd_manager *mqd_manager_init_v12(enum KFD_MQD_TYPE type, 367 + struct kfd_node *dev) 368 + { 369 + struct mqd_manager *mqd; 370 + 371 + if (WARN_ON(type >= KFD_MQD_TYPE_MAX)) 372 + return NULL; 373 + 374 + mqd = kzalloc(sizeof(*mqd), GFP_KERNEL); 375 + if (!mqd) 376 + return NULL; 377 + 378 + mqd->dev = dev; 379 + 380 + switch (type) { 381 + case KFD_MQD_TYPE_CP: 382 + pr_debug("%s@%i\n", __func__, __LINE__); 383 + mqd->allocate_mqd = allocate_mqd; 384 + mqd->init_mqd = init_mqd; 385 + mqd->free_mqd = kfd_free_mqd_cp; 386 + mqd->load_mqd = load_mqd; 387 + mqd->update_mqd = update_mqd; 388 + mqd->destroy_mqd = kfd_destroy_mqd_cp; 389 + mqd->is_occupied = kfd_is_occupied_cp; 390 + mqd->mqd_size = sizeof(struct v12_compute_mqd); 391 + mqd->get_wave_state = get_wave_state; 392 + #if defined(CONFIG_DEBUG_FS) 393 + mqd->debugfs_show_mqd = debugfs_show_mqd; 394 + #endif 395 + pr_debug("%s@%i\n", __func__, __LINE__); 396 + break; 397 + case KFD_MQD_TYPE_HIQ: 398 + pr_debug("%s@%i\n", __func__, __LINE__); 399 + mqd->allocate_mqd = allocate_hiq_mqd; 400 + mqd->init_mqd = init_mqd_hiq; 401 + mqd->free_mqd = free_mqd_hiq_sdma; 402 + mqd->load_mqd = kfd_hiq_load_mqd_kiq; 403 + mqd->update_mqd = update_mqd; 404 + mqd->destroy_mqd = kfd_destroy_mqd_cp; 405 + mqd->is_occupied = kfd_is_occupied_cp; 406 + mqd->mqd_size = sizeof(struct v12_compute_mqd); 407 + #if defined(CONFIG_DEBUG_FS) 408 + mqd->debugfs_show_mqd = debugfs_show_mqd; 409 + #endif 410 + mqd->check_preemption_failed = check_preemption_failed; 411 + pr_debug("%s@%i\n", __func__, __LINE__); 412 + break; 413 + case KFD_MQD_TYPE_DIQ: 414 + mqd->allocate_mqd = allocate_mqd; 415 + mqd->init_mqd = init_mqd_hiq; 416 + mqd->free_mqd = kfd_free_mqd_cp; 417 + mqd->load_mqd = load_mqd; 418 + mqd->update_mqd = update_mqd; 419 + mqd->destroy_mqd = kfd_destroy_mqd_cp; 420 + mqd->is_occupied = kfd_is_occupied_cp; 421 + mqd->mqd_size = sizeof(struct v12_compute_mqd); 422 + #if defined(CONFIG_DEBUG_FS) 423 + mqd->debugfs_show_mqd = debugfs_show_mqd; 424 + #endif 425 + break; 426 + case KFD_MQD_TYPE_SDMA: 427 + pr_debug("%s@%i\n", __func__, __LINE__); 428 + mqd->allocate_mqd = allocate_mqd; 429 + mqd->init_mqd = init_mqd_sdma; 430 + mqd->free_mqd = kfd_free_mqd_cp; 431 + mqd->load_mqd = kfd_load_mqd_sdma; 432 + mqd->update_mqd = update_mqd_sdma; 433 + mqd->destroy_mqd = kfd_destroy_mqd_sdma; 434 + mqd->is_occupied = kfd_is_occupied_sdma; 435 + mqd->mqd_size = sizeof(struct v12_sdma_mqd); 436 + #if defined(CONFIG_DEBUG_FS) 437 + mqd->debugfs_show_mqd = debugfs_show_mqd_sdma; 438 + #endif 439 + pr_debug("%s@%i\n", __func__, __LINE__); 440 + break; 441 + default: 442 + kfree(mqd); 443 + return NULL; 444 + } 445 + 446 + return mqd; 447 + }
+2
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
··· 1295 1295 struct kfd_node *dev); 1296 1296 struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type, 1297 1297 struct kfd_node *dev); 1298 + struct mqd_manager *mqd_manager_init_v12(enum KFD_MQD_TYPE type, 1299 + struct kfd_node *dev); 1298 1300 struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev); 1299 1301 void device_queue_manager_uninit(struct device_queue_manager *dqm); 1300 1302 struct kernel_queue *kernel_queue_init(struct kfd_node *dev,
+3 -2
drivers/gpu/drm/amd/include/v12_structs.h
··· 666 666 uint32_t reserved_123; // offset: 123 (0x7B) 667 667 uint32_t reserved_124; // offset: 124 (0x7C) 668 668 uint32_t reserved_125; // offset: 125 (0x7D) 669 - uint32_t reserved_126; // offset: 126 (0x7E) 670 - uint32_t reserved_127; // offset: 127 (0x7F) 669 + /* reserved_126,127: repurposed for driver-internal use */ 670 + uint32_t sdma_engine_id; 671 + uint32_t sdma_queue_id; 671 672 }; 672 673 673 674 struct v12_compute_mqd {