Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdkfd: replace kgd_dev in hqd/mqd kfd2kgd funcs

Modified definitions:

- hqd_load
- hiq_mqd_load
- hqd_sdma_load
- hqd_dump
- hqd_sdma_dump
- hqd_is_occupied
- hqd_destroy
- hqd_sdma_is_occupied
- hqd_sdma_destroy

Signed-off-by: Graham Sider <Graham.Sider@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Graham Sider and committed by
Alex Deucher
420185fd c531a58b

+129 -166
+5 -8
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
··· 123 123 return sdma_rlc_reg_offset; 124 124 } 125 125 126 - int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, 126 + int kgd_arcturus_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, 127 127 uint32_t __user *wptr, struct mm_struct *mm) 128 128 { 129 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 130 129 struct v9_sdma_mqd *m; 131 130 uint32_t sdma_rlc_reg_offset; 132 131 unsigned long end_jiffies; ··· 192 193 return 0; 193 194 } 194 195 195 - int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd, 196 + int kgd_arcturus_hqd_sdma_dump(struct amdgpu_device *adev, 196 197 uint32_t engine_id, uint32_t queue_id, 197 198 uint32_t (**dump)[2], uint32_t *n_regs) 198 199 { 199 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 200 200 uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, 201 201 engine_id, queue_id); 202 202 uint32_t i = 0, reg; ··· 223 225 return 0; 224 226 } 225 227 226 - bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) 228 + bool kgd_arcturus_hqd_sdma_is_occupied(struct amdgpu_device *adev, 229 + void *mqd) 227 230 { 228 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 229 231 struct v9_sdma_mqd *m; 230 232 uint32_t sdma_rlc_reg_offset; 231 233 uint32_t sdma_rlc_rb_cntl; ··· 242 244 return false; 243 245 } 244 246 245 - int kgd_arcturus_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, 247 + int kgd_arcturus_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, 246 248 unsigned int utimeout) 247 249 { 248 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 249 250 struct v9_sdma_mqd *m; 250 251 uint32_t sdma_rlc_reg_offset; 251 252 uint32_t temp;
+5 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.h
··· 20 20 * OTHER DEALINGS IN THE SOFTWARE. 21 21 */ 22 22 23 - int kgd_arcturus_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, 23 + int kgd_arcturus_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, 24 24 uint32_t __user *wptr, struct mm_struct *mm); 25 - int kgd_arcturus_hqd_sdma_dump(struct kgd_dev *kgd, 25 + int kgd_arcturus_hqd_sdma_dump(struct amdgpu_device *adev, 26 26 uint32_t engine_id, uint32_t queue_id, 27 27 uint32_t (**dump)[2], uint32_t *n_regs); 28 - bool kgd_arcturus_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); 29 - int kgd_arcturus_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, 28 + bool kgd_arcturus_hqd_sdma_is_occupied(struct amdgpu_device *adev, 29 + void *mqd); 30 + int kgd_arcturus_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, 30 31 unsigned int utimeout);
+14 -22
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
··· 212 212 return (struct v10_sdma_mqd *)mqd; 213 213 } 214 214 215 - static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, 216 - uint32_t queue_id, uint32_t __user *wptr, 217 - uint32_t wptr_shift, uint32_t wptr_mask, 218 - struct mm_struct *mm) 215 + static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd, 216 + uint32_t pipe_id, uint32_t queue_id, 217 + uint32_t __user *wptr, uint32_t wptr_shift, 218 + uint32_t wptr_mask, struct mm_struct *mm) 219 219 { 220 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 221 220 struct v10_compute_mqd *m; 222 221 uint32_t *mqd_hqd; 223 222 uint32_t reg, hqd_base, data; ··· 294 295 return 0; 295 296 } 296 297 297 - static int kgd_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, 298 + static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, 298 299 uint32_t pipe_id, uint32_t queue_id, 299 300 uint32_t doorbell_off) 300 301 { 301 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 302 302 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; 303 303 struct v10_compute_mqd *m; 304 304 uint32_t mec, pipe; ··· 346 348 return r; 347 349 } 348 350 349 - static int kgd_hqd_dump(struct kgd_dev *kgd, 351 + static int kgd_hqd_dump(struct amdgpu_device *adev, 350 352 uint32_t pipe_id, uint32_t queue_id, 351 353 uint32_t (**dump)[2], uint32_t *n_regs) 352 354 { 353 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 354 355 uint32_t i = 0, reg; 355 356 #define HQD_N_REGS 56 356 357 #define DUMP_REG(addr) do { \ ··· 377 380 return 0; 378 381 } 379 382 380 - static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, 383 + static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, 381 384 uint32_t __user *wptr, struct mm_struct *mm) 382 385 { 383 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 384 386 struct v10_sdma_mqd *m; 385 387 uint32_t sdma_rlc_reg_offset; 386 388 unsigned long end_jiffies; ··· 446 450 return 0; 447 451 } 448 452 449 - static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, 453 + static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, 450 454 uint32_t engine_id, uint32_t queue_id, 451 455 uint32_t (**dump)[2], uint32_t *n_regs) 452 456 { 453 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 454 457 uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, 455 458 engine_id, queue_id); 456 459 uint32_t i = 0, reg; ··· 477 482 return 0; 478 483 } 479 484 480 - static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, 481 - uint32_t pipe_id, uint32_t queue_id) 485 + static bool kgd_hqd_is_occupied(struct amdgpu_device *adev, 486 + uint64_t queue_address, uint32_t pipe_id, 487 + uint32_t queue_id) 482 488 { 483 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 484 489 uint32_t act; 485 490 bool retval = false; 486 491 uint32_t low, high; ··· 499 504 return retval; 500 505 } 501 506 502 - static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) 507 + static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) 503 508 { 504 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 505 509 struct v10_sdma_mqd *m; 506 510 uint32_t sdma_rlc_reg_offset; 507 511 uint32_t sdma_rlc_rb_cntl; ··· 517 523 return false; 518 524 } 519 525 520 - static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, 526 + static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd, 521 527 enum kfd_preempt_type reset_type, 522 528 unsigned int utimeout, uint32_t pipe_id, 523 529 uint32_t queue_id) 524 530 { 525 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 526 531 enum hqd_dequeue_request_type type; 527 532 unsigned long end_jiffies; 528 533 uint32_t temp; ··· 630 637 return 0; 631 638 } 632 639 633 - static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, 640 + static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, 634 641 unsigned int utimeout) 635 642 { 636 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 637 643 struct v10_sdma_mqd *m; 638 644 uint32_t sdma_rlc_reg_offset; 639 645 uint32_t temp;
+15 -22
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c
··· 182 182 return (struct v10_sdma_mqd *)mqd; 183 183 } 184 184 185 - static int hqd_load_v10_3(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, 186 - uint32_t queue_id, uint32_t __user *wptr, 187 - uint32_t wptr_shift, uint32_t wptr_mask, 188 - struct mm_struct *mm) 185 + static int hqd_load_v10_3(struct amdgpu_device *adev, void *mqd, 186 + uint32_t pipe_id, uint32_t queue_id, 187 + uint32_t __user *wptr, uint32_t wptr_shift, 188 + uint32_t wptr_mask, struct mm_struct *mm) 189 189 { 190 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 191 190 struct v10_compute_mqd *m; 192 191 uint32_t *mqd_hqd; 193 192 uint32_t reg, hqd_base, data; ··· 279 280 return 0; 280 281 } 281 282 282 - static int hiq_mqd_load_v10_3(struct kgd_dev *kgd, void *mqd, 283 + static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd, 283 284 uint32_t pipe_id, uint32_t queue_id, 284 285 uint32_t doorbell_off) 285 286 { 286 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 287 287 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; 288 288 struct v10_compute_mqd *m; 289 289 uint32_t mec, pipe; ··· 331 333 return r; 332 334 } 333 335 334 - static int hqd_dump_v10_3(struct kgd_dev *kgd, 336 + static int hqd_dump_v10_3(struct amdgpu_device *adev, 335 337 uint32_t pipe_id, uint32_t queue_id, 336 338 uint32_t (**dump)[2], uint32_t *n_regs) 337 339 { 338 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 339 340 uint32_t i = 0, reg; 340 341 #define HQD_N_REGS 56 341 342 #define DUMP_REG(addr) do { \ ··· 362 365 return 0; 363 366 } 364 367 365 - static int hqd_sdma_load_v10_3(struct kgd_dev *kgd, void *mqd, 368 + static int hqd_sdma_load_v10_3(struct amdgpu_device *adev, void *mqd, 366 369 uint32_t __user *wptr, struct mm_struct *mm) 367 370 { 368 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 369 371 struct v10_sdma_mqd *m; 370 372 uint32_t sdma_rlc_reg_offset; 371 373 unsigned long end_jiffies; ··· 431 435 return 0; 432 436 } 433 437 434 - static int hqd_sdma_dump_v10_3(struct kgd_dev *kgd, 438 + static int hqd_sdma_dump_v10_3(struct amdgpu_device *adev, 435 439 uint32_t engine_id, uint32_t queue_id, 436 440 uint32_t (**dump)[2], uint32_t *n_regs) 437 441 { 438 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 439 442 uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, 440 443 engine_id, queue_id); 441 444 uint32_t i = 0, reg; ··· 462 467 return 0; 463 468 } 464 469 465 - static bool hqd_is_occupied_v10_3(struct kgd_dev *kgd, uint64_t queue_address, 466 - uint32_t pipe_id, uint32_t queue_id) 470 + static bool hqd_is_occupied_v10_3(struct amdgpu_device *adev, 471 + uint64_t queue_address, uint32_t pipe_id, 472 + uint32_t queue_id) 467 473 { 468 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 469 474 uint32_t act; 470 475 bool retval = false; 471 476 uint32_t low, high; ··· 484 489 return retval; 485 490 } 486 491 487 - static bool hqd_sdma_is_occupied_v10_3(struct kgd_dev *kgd, void *mqd) 492 + static bool hqd_sdma_is_occupied_v10_3(struct amdgpu_device *adev, 493 + void *mqd) 488 494 { 489 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 490 495 struct v10_sdma_mqd *m; 491 496 uint32_t sdma_rlc_reg_offset; 492 497 uint32_t sdma_rlc_rb_cntl; ··· 503 508 return false; 504 509 } 505 510 506 - static int hqd_destroy_v10_3(struct kgd_dev *kgd, void *mqd, 511 + static int hqd_destroy_v10_3(struct amdgpu_device *adev, void *mqd, 507 512 enum kfd_preempt_type reset_type, 508 513 unsigned int utimeout, uint32_t pipe_id, 509 514 uint32_t queue_id) 510 515 { 511 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 512 516 enum hqd_dequeue_request_type type; 513 517 unsigned long end_jiffies; 514 518 uint32_t temp; ··· 553 559 return 0; 554 560 } 555 561 556 - static int hqd_sdma_destroy_v10_3(struct kgd_dev *kgd, void *mqd, 562 + static int hqd_sdma_destroy_v10_3(struct amdgpu_device *adev, void *mqd, 557 563 unsigned int utimeout) 558 564 { 559 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 560 565 struct v10_sdma_mqd *m; 561 566 uint32_t sdma_rlc_reg_offset; 562 567 uint32_t temp;
+13 -20
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
··· 202 202 return (struct cik_sdma_rlc_registers *)mqd; 203 203 } 204 204 205 - static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, 206 - uint32_t queue_id, uint32_t __user *wptr, 207 - uint32_t wptr_shift, uint32_t wptr_mask, 208 - struct mm_struct *mm) 205 + static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd, 206 + uint32_t pipe_id, uint32_t queue_id, 207 + uint32_t __user *wptr, uint32_t wptr_shift, 208 + uint32_t wptr_mask, struct mm_struct *mm) 209 209 { 210 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 211 210 struct cik_mqd *m; 212 211 uint32_t *mqd_hqd; 213 212 uint32_t reg, wptr_val, data; ··· 247 248 return 0; 248 249 } 249 250 250 - static int kgd_hqd_dump(struct kgd_dev *kgd, 251 + static int kgd_hqd_dump(struct amdgpu_device *adev, 251 252 uint32_t pipe_id, uint32_t queue_id, 252 253 uint32_t (**dump)[2], uint32_t *n_regs) 253 254 { 254 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 255 255 uint32_t i = 0, reg; 256 256 #define HQD_N_REGS (35+4) 257 257 #define DUMP_REG(addr) do { \ ··· 282 284 return 0; 283 285 } 284 286 285 - static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, 287 + static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, 286 288 uint32_t __user *wptr, struct mm_struct *mm) 287 289 { 288 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 289 290 struct cik_sdma_rlc_registers *m; 290 291 unsigned long end_jiffies; 291 292 uint32_t sdma_rlc_reg_offset; ··· 337 340 return 0; 338 341 } 339 342 340 - static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, 343 + static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, 341 344 uint32_t engine_id, uint32_t queue_id, 342 345 uint32_t (**dump)[2], uint32_t *n_regs) 343 346 { 344 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 345 347 uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET + 346 348 queue_id * KFD_CIK_SDMA_QUEUE_OFFSET; 347 349 uint32_t i = 0, reg; ··· 363 367 return 0; 364 368 } 365 369 366 - static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, 367 - uint32_t pipe_id, uint32_t queue_id) 370 + static bool kgd_hqd_is_occupied(struct amdgpu_device *adev, 371 + uint64_t queue_address, uint32_t pipe_id, 372 + uint32_t queue_id) 368 373 { 369 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 370 374 uint32_t act; 371 375 bool retval = false; 372 376 uint32_t low, high; ··· 385 389 return retval; 386 390 } 387 391 388 - static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) 392 + static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) 389 393 { 390 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 391 394 struct cik_sdma_rlc_registers *m; 392 395 uint32_t sdma_rlc_reg_offset; 393 396 uint32_t sdma_rlc_rb_cntl; ··· 402 407 return false; 403 408 } 404 409 405 - static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, 410 + static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd, 406 411 enum kfd_preempt_type reset_type, 407 412 unsigned int utimeout, uint32_t pipe_id, 408 413 uint32_t queue_id) 409 414 { 410 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 411 415 uint32_t temp; 412 416 enum hqd_dequeue_request_type type; 413 417 unsigned long flags, end_jiffies; ··· 503 509 return 0; 504 510 } 505 511 506 - static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, 512 + static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, 507 513 unsigned int utimeout) 508 514 { 509 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 510 515 struct cik_sdma_rlc_registers *m; 511 516 uint32_t sdma_rlc_reg_offset; 512 517 uint32_t temp;
+14 -21
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
··· 160 160 return (struct vi_sdma_mqd *)mqd; 161 161 } 162 162 163 - static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, 164 - uint32_t queue_id, uint32_t __user *wptr, 165 - uint32_t wptr_shift, uint32_t wptr_mask, 166 - struct mm_struct *mm) 163 + static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd, 164 + uint32_t pipe_id, uint32_t queue_id, 165 + uint32_t __user *wptr, uint32_t wptr_shift, 166 + uint32_t wptr_mask, struct mm_struct *mm) 167 167 { 168 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 169 168 struct vi_mqd *m; 170 169 uint32_t *mqd_hqd; 171 170 uint32_t reg, wptr_val, data; ··· 200 201 * on ASICs that do not support context-save. 201 202 * EOP writes/reads can start anywhere in the ring. 202 203 */ 203 - if (get_amdgpu_device(kgd)->asic_type != CHIP_TONGA) { 204 + if (adev->asic_type != CHIP_TONGA) { 204 205 WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr); 205 206 WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr); 206 207 WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem); ··· 234 235 return 0; 235 236 } 236 237 237 - static int kgd_hqd_dump(struct kgd_dev *kgd, 238 + static int kgd_hqd_dump(struct amdgpu_device *adev, 238 239 uint32_t pipe_id, uint32_t queue_id, 239 240 uint32_t (**dump)[2], uint32_t *n_regs) 240 241 { 241 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 242 242 uint32_t i = 0, reg; 243 243 #define HQD_N_REGS (54+4) 244 244 #define DUMP_REG(addr) do { \ ··· 269 271 return 0; 270 272 } 271 273 272 - static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, 274 + static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, 273 275 uint32_t __user *wptr, struct mm_struct *mm) 274 276 { 275 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 276 277 struct vi_sdma_mqd *m; 277 278 unsigned long end_jiffies; 278 279 uint32_t sdma_rlc_reg_offset; ··· 323 326 return 0; 324 327 } 325 328 326 - static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, 329 + static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, 327 330 uint32_t engine_id, uint32_t queue_id, 328 331 uint32_t (**dump)[2], uint32_t *n_regs) 329 332 { 330 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 331 333 uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET + 332 334 queue_id * KFD_VI_SDMA_QUEUE_OFFSET; 333 335 uint32_t i = 0, reg; ··· 358 362 return 0; 359 363 } 360 364 361 - static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, 362 - uint32_t pipe_id, uint32_t queue_id) 365 + static bool kgd_hqd_is_occupied(struct amdgpu_device *adev, 366 + uint64_t queue_address, uint32_t pipe_id, 367 + uint32_t queue_id) 363 368 { 364 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 365 369 uint32_t act; 366 370 bool retval = false; 367 371 uint32_t low, high; ··· 380 384 return retval; 381 385 } 382 386 383 - static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) 387 + static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) 384 388 { 385 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 386 389 struct vi_sdma_mqd *m; 387 390 uint32_t sdma_rlc_reg_offset; 388 391 uint32_t sdma_rlc_rb_cntl; ··· 397 402 return false; 398 403 } 399 404 400 - static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, 405 + static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd, 401 406 enum kfd_preempt_type reset_type, 402 407 unsigned int utimeout, uint32_t pipe_id, 403 408 uint32_t queue_id) 404 409 { 405 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 406 410 uint32_t temp; 407 411 enum hqd_dequeue_request_type type; 408 412 unsigned long flags, end_jiffies; ··· 501 507 return 0; 502 508 } 503 509 504 - static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, 510 + static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, 505 511 unsigned int utimeout) 506 512 { 507 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 508 513 struct vi_sdma_mqd *m; 509 514 uint32_t sdma_rlc_reg_offset; 510 515 uint32_t temp;
+14 -22
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
··· 227 227 return (struct v9_sdma_mqd *)mqd; 228 228 } 229 229 230 - int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, 231 - uint32_t queue_id, uint32_t __user *wptr, 232 - uint32_t wptr_shift, uint32_t wptr_mask, 233 - struct mm_struct *mm) 230 + int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, 231 + uint32_t pipe_id, uint32_t queue_id, 232 + uint32_t __user *wptr, uint32_t wptr_shift, 233 + uint32_t wptr_mask, struct mm_struct *mm) 234 234 { 235 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 236 235 struct v9_mqd *m; 237 236 uint32_t *mqd_hqd; 238 237 uint32_t reg, hqd_base, data; ··· 306 307 return 0; 307 308 } 308 309 309 - int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, 310 + int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, 310 311 uint32_t pipe_id, uint32_t queue_id, 311 312 uint32_t doorbell_off) 312 313 { 313 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 314 314 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring; 315 315 struct v9_mqd *m; 316 316 uint32_t mec, pipe; ··· 358 360 return r; 359 361 } 360 362 361 - int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd, 363 + int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev, 362 364 uint32_t pipe_id, uint32_t queue_id, 363 365 uint32_t (**dump)[2], uint32_t *n_regs) 364 366 { 365 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 366 367 uint32_t i = 0, reg; 367 368 #define HQD_N_REGS 56 368 369 #define DUMP_REG(addr) do { \ ··· 389 392 return 0; 390 393 } 391 394 392 - static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, 395 + static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, 393 396 uint32_t __user *wptr, struct mm_struct *mm) 394 397 { 395 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 396 398 struct v9_sdma_mqd *m; 397 399 uint32_t sdma_rlc_reg_offset; 398 400 unsigned long end_jiffies; ··· 458 462 return 0; 459 463 } 460 464 461 - static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, 465 + static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, 462 466 uint32_t engine_id, uint32_t queue_id, 463 467 uint32_t (**dump)[2], uint32_t *n_regs) 464 468 { 465 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 466 469 uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, 467 470 engine_id, queue_id); 468 471 uint32_t i = 0, reg; ··· 489 494 return 0; 490 495 } 491 496 492 - bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, 493 - uint32_t pipe_id, uint32_t queue_id) 497 + bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev, 498 + uint64_t queue_address, uint32_t pipe_id, 499 + uint32_t queue_id) 494 500 { 495 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 496 501 uint32_t act; 497 502 bool retval = false; 498 503 uint32_t low, high; ··· 511 516 return retval; 512 517 } 513 518 514 - static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) 519 + static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) 515 520 { 516 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 517 521 struct v9_sdma_mqd *m; 518 522 uint32_t sdma_rlc_reg_offset; 519 523 uint32_t sdma_rlc_rb_cntl; ··· 529 535 return false; 530 536 } 531 537 532 - int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd, 538 + int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd, 533 539 enum kfd_preempt_type reset_type, 534 540 unsigned int utimeout, uint32_t pipe_id, 535 541 uint32_t queue_id) 536 542 { 537 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 538 543 enum hqd_dequeue_request_type type; 539 544 unsigned long end_jiffies; 540 545 uint32_t temp; ··· 581 588 return 0; 582 589 } 583 590 584 - static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, 591 + static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, 585 592 unsigned int utimeout) 586 593 { 587 - struct amdgpu_device *adev = get_amdgpu_device(kgd); 588 594 struct v9_sdma_mqd *m; 589 595 uint32_t sdma_rlc_reg_offset; 590 596 uint32_t temp;
+7 -6
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
··· 29 29 int kgd_gfx_v9_set_pasid_vmid_mapping(struct kgd_dev *kgd, u32 pasid, 30 30 unsigned int vmid); 31 31 int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); 32 - int kgd_gfx_v9_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, 32 + int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, 33 33 uint32_t queue_id, uint32_t __user *wptr, 34 34 uint32_t wptr_shift, uint32_t wptr_mask, 35 35 struct mm_struct *mm); 36 - int kgd_gfx_v9_hiq_mqd_load(struct kgd_dev *kgd, void *mqd, 36 + int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, 37 37 uint32_t pipe_id, uint32_t queue_id, 38 38 uint32_t doorbell_off); 39 - int kgd_gfx_v9_hqd_dump(struct kgd_dev *kgd, 39 + int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev, 40 40 uint32_t pipe_id, uint32_t queue_id, 41 41 uint32_t (**dump)[2], uint32_t *n_regs); 42 - bool kgd_gfx_v9_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, 43 - uint32_t pipe_id, uint32_t queue_id); 44 - int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd, 42 + bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev, 43 + uint64_t queue_address, uint32_t pipe_id, 44 + uint32_t queue_id); 45 + int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd, 45 46 enum kfd_preempt_type reset_type, 46 47 unsigned int utimeout, uint32_t pipe_id, 47 48 uint32_t queue_id);
+3 -3
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
··· 2065 2065 return 0; 2066 2066 } 2067 2067 2068 - r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd, 2068 + r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev, 2069 2069 KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE, 2070 2070 &dump, &n_regs); 2071 2071 if (!r) { ··· 2087 2087 continue; 2088 2088 2089 2089 r = dqm->dev->kfd2kgd->hqd_dump( 2090 - dqm->dev->kgd, pipe, queue, &dump, &n_regs); 2090 + dqm->dev->adev, pipe, queue, &dump, &n_regs); 2091 2091 if (r) 2092 2092 break; 2093 2093 ··· 2104 2104 queue < dqm->dev->device_info->num_sdma_queues_per_engine; 2105 2105 queue++) { 2106 2106 r = dqm->dev->kfd2kgd->hqd_sdma_dump( 2107 - dqm->dev->kgd, pipe, queue, &dump, &n_regs); 2107 + dqm->dev->adev, pipe, queue, &dump, &n_regs); 2108 2108 if (r) 2109 2109 break; 2110 2110
+6 -6
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
··· 171 171 uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); 172 172 uint32_t wptr_mask = (uint32_t)((p->queue_size / 4) - 1); 173 173 174 - return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, 174 + return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, 175 175 (uint32_t __user *)p->write_ptr, 176 176 wptr_shift, wptr_mask, mms); 177 177 } ··· 180 180 uint32_t pipe_id, uint32_t queue_id, 181 181 struct queue_properties *p, struct mm_struct *mms) 182 182 { 183 - return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd, 183 + return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd, 184 184 (uint32_t __user *)p->write_ptr, 185 185 mms); 186 186 } ··· 276 276 unsigned int timeout, uint32_t pipe_id, 277 277 uint32_t queue_id) 278 278 { 279 - return mm->dev->kfd2kgd->hqd_destroy(mm->dev->kgd, mqd, type, timeout, 279 + return mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, mqd, type, timeout, 280 280 pipe_id, queue_id); 281 281 } 282 282 ··· 289 289 unsigned int timeout, uint32_t pipe_id, 290 290 uint32_t queue_id) 291 291 { 292 - return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout); 292 + return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout); 293 293 } 294 294 295 295 static bool is_occupied(struct mqd_manager *mm, void *mqd, ··· 297 297 uint32_t queue_id) 298 298 { 299 299 300 - return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address, 300 + return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->adev, queue_address, 301 301 pipe_id, queue_id); 302 302 303 303 } ··· 306 306 uint64_t queue_address, uint32_t pipe_id, 307 307 uint32_t queue_id) 308 308 { 309 - return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd); 309 + return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd); 310 310 } 311 311 312 312 /*
+7 -7
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
··· 148 148 /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */ 149 149 uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); 150 150 151 - r = mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, 151 + r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, 152 152 (uint32_t __user *)p->write_ptr, 153 153 wptr_shift, 0, mms); 154 154 return r; ··· 158 158 uint32_t pipe_id, uint32_t queue_id, 159 159 struct queue_properties *p, struct mm_struct *mms) 160 160 { 161 - return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id, 161 + return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id, 162 162 queue_id, p->doorbell_off); 163 163 } 164 164 ··· 239 239 uint32_t queue_id) 240 240 { 241 241 return mm->dev->kfd2kgd->hqd_destroy 242 - (mm->dev->kgd, mqd, type, timeout, 242 + (mm->dev->adev, mqd, type, timeout, 243 243 pipe_id, queue_id); 244 244 } 245 245 ··· 254 254 uint32_t queue_id) 255 255 { 256 256 return mm->dev->kfd2kgd->hqd_is_occupied( 257 - mm->dev->kgd, queue_address, 257 + mm->dev->adev, queue_address, 258 258 pipe_id, queue_id); 259 259 } 260 260 ··· 320 320 uint32_t pipe_id, uint32_t queue_id, 321 321 struct queue_properties *p, struct mm_struct *mms) 322 322 { 323 - return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd, 323 + return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd, 324 324 (uint32_t __user *)p->write_ptr, 325 325 mms); 326 326 } ··· 363 363 unsigned int timeout, uint32_t pipe_id, 364 364 uint32_t queue_id) 365 365 { 366 - return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout); 366 + return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout); 367 367 } 368 368 369 369 static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd, 370 370 uint64_t queue_address, uint32_t pipe_id, 371 371 uint32_t queue_id) 372 372 { 373 - return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd); 373 + return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd); 374 374 } 375 375 376 376 #if defined(CONFIG_DEBUG_FS)
+7 -7
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
··· 199 199 /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */ 200 200 uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); 201 201 202 - return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, 202 + return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, 203 203 (uint32_t __user *)p->write_ptr, 204 204 wptr_shift, 0, mms); 205 205 } ··· 208 208 uint32_t pipe_id, uint32_t queue_id, 209 209 struct queue_properties *p, struct mm_struct *mms) 210 210 { 211 - return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->kgd, mqd, pipe_id, 211 + return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id, 212 212 queue_id, p->doorbell_off); 213 213 } 214 214 ··· 291 291 uint32_t queue_id) 292 292 { 293 293 return mm->dev->kfd2kgd->hqd_destroy 294 - (mm->dev->kgd, mqd, type, timeout, 294 + (mm->dev->adev, mqd, type, timeout, 295 295 pipe_id, queue_id); 296 296 } 297 297 ··· 313 313 uint32_t queue_id) 314 314 { 315 315 return mm->dev->kfd2kgd->hqd_is_occupied( 316 - mm->dev->kgd, queue_address, 316 + mm->dev->adev, queue_address, 317 317 pipe_id, queue_id); 318 318 } 319 319 ··· 375 375 uint32_t pipe_id, uint32_t queue_id, 376 376 struct queue_properties *p, struct mm_struct *mms) 377 377 { 378 - return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd, 378 + return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd, 379 379 (uint32_t __user *)p->write_ptr, 380 380 mms); 381 381 } ··· 418 418 unsigned int timeout, uint32_t pipe_id, 419 419 uint32_t queue_id) 420 420 { 421 - return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout); 421 + return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout); 422 422 } 423 423 424 424 static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd, 425 425 uint64_t queue_address, uint32_t pipe_id, 426 426 uint32_t queue_id) 427 427 { 428 - return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd); 428 + return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd); 429 429 } 430 430 431 431 #if defined(CONFIG_DEBUG_FS)
+6 -6
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
··· 162 162 uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); 163 163 uint32_t wptr_mask = (uint32_t)((p->queue_size / 4) - 1); 164 164 165 - return mm->dev->kfd2kgd->hqd_load(mm->dev->kgd, mqd, pipe_id, queue_id, 165 + return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, 166 166 (uint32_t __user *)p->write_ptr, 167 167 wptr_shift, wptr_mask, mms); 168 168 } ··· 265 265 uint32_t queue_id) 266 266 { 267 267 return mm->dev->kfd2kgd->hqd_destroy 268 - (mm->dev->kgd, mqd, type, timeout, 268 + (mm->dev->adev, mqd, type, timeout, 269 269 pipe_id, queue_id); 270 270 } 271 271 ··· 280 280 uint32_t queue_id) 281 281 { 282 282 return mm->dev->kfd2kgd->hqd_is_occupied( 283 - mm->dev->kgd, queue_address, 283 + mm->dev->adev, queue_address, 284 284 pipe_id, queue_id); 285 285 } 286 286 ··· 347 347 uint32_t pipe_id, uint32_t queue_id, 348 348 struct queue_properties *p, struct mm_struct *mms) 349 349 { 350 - return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->kgd, mqd, 350 + return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd, 351 351 (uint32_t __user *)p->write_ptr, 352 352 mms); 353 353 } ··· 389 389 unsigned int timeout, uint32_t pipe_id, 390 390 uint32_t queue_id) 391 391 { 392 - return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->kgd, mqd, timeout); 392 + return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout); 393 393 } 394 394 395 395 static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd, 396 396 uint64_t queue_address, uint32_t pipe_id, 397 397 uint32_t queue_id) 398 398 { 399 - return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->kgd, mqd); 399 + return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd); 400 400 } 401 401 402 402 #if defined(CONFIG_DEBUG_FS)
+13 -12
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
··· 238 238 239 239 int (*init_interrupts)(struct kgd_dev *kgd, uint32_t pipe_id); 240 240 241 - int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, 241 + int (*hqd_load)(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, 242 242 uint32_t queue_id, uint32_t __user *wptr, 243 243 uint32_t wptr_shift, uint32_t wptr_mask, 244 244 struct mm_struct *mm); 245 245 246 - int (*hiq_mqd_load)(struct kgd_dev *kgd, void *mqd, 246 + int (*hiq_mqd_load)(struct amdgpu_device *adev, void *mqd, 247 247 uint32_t pipe_id, uint32_t queue_id, 248 248 uint32_t doorbell_off); 249 249 250 - int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd, 250 + int (*hqd_sdma_load)(struct amdgpu_device *adev, void *mqd, 251 251 uint32_t __user *wptr, struct mm_struct *mm); 252 252 253 - int (*hqd_dump)(struct kgd_dev *kgd, 253 + int (*hqd_dump)(struct amdgpu_device *adev, 254 254 uint32_t pipe_id, uint32_t queue_id, 255 255 uint32_t (**dump)[2], uint32_t *n_regs); 256 256 257 - int (*hqd_sdma_dump)(struct kgd_dev *kgd, 257 + int (*hqd_sdma_dump)(struct amdgpu_device *adev, 258 258 uint32_t engine_id, uint32_t queue_id, 259 259 uint32_t (**dump)[2], uint32_t *n_regs); 260 260 261 - bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address, 262 - uint32_t pipe_id, uint32_t queue_id); 263 - 264 - int (*hqd_destroy)(struct kgd_dev *kgd, void *mqd, uint32_t reset_type, 265 - unsigned int timeout, uint32_t pipe_id, 261 + bool (*hqd_is_occupied)(struct amdgpu_device *adev, 262 + uint64_t queue_address, uint32_t pipe_id, 266 263 uint32_t queue_id); 267 264 268 - bool (*hqd_sdma_is_occupied)(struct kgd_dev *kgd, void *mqd); 265 + int (*hqd_destroy)(struct amdgpu_device *adev, void *mqd, 266 + uint32_t reset_type, unsigned int timeout, 267 + uint32_t pipe_id, uint32_t queue_id); 269 268 270 - int (*hqd_sdma_destroy)(struct kgd_dev *kgd, void *mqd, 269 + bool (*hqd_sdma_is_occupied)(struct amdgpu_device *adev, void *mqd); 270 + 271 + int (*hqd_sdma_destroy)(struct amdgpu_device *adev, void *mqd, 271 272 unsigned int timeout); 272 273 273 274 int (*address_watch_disable)(struct kgd_dev *kgd);