Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/amdkfd: Add spatial partitioning support in KFD

This patch introduces multi-partition support in KFD.
This patch includes:
- Support for maximum 8 spatial partitions in KFD.
- Initialize one HIQ per partition.
- Management of VMID range depending on partition mode.
- Management of doorbell aperture space between all
partitions.
- Each partition does its own queue management, interrupt
handling, SMI event reporting.
- IOMMU, if enabled with multiple partitions, will only work
on first partition.
- SPM is only supported on the first partition.
- Currently, there is no support for resetting individual
partitions. All partitions will reset together.

Signed-off-by: Mukul Joshi <mukul.joshi@amd.com>
Tested-by: Amber Lin <Amber.Lin@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>

authored by

Mukul Joshi and committed by
Alex Deucher
74c5b85d 8dc1db31

+206 -75
+159 -51
drivers/gpu/drm/amd/amdkfd/kfd_device.c
··· 567 567 return err; 568 568 } 569 569 570 - static void kfd_cleanup_node(struct kfd_dev *kfd) 570 + static void kfd_cleanup_nodes(struct kfd_dev *kfd, unsigned int num_nodes) 571 571 { 572 - struct kfd_node *knode = kfd->node; 572 + struct kfd_node *knode; 573 + unsigned int i; 573 574 574 - device_queue_manager_uninit(knode->dqm); 575 - kfd_interrupt_exit(knode); 576 - kfd_topology_remove_device(knode); 577 - if (knode->gws) 578 - amdgpu_amdkfd_free_gws(knode->adev, knode->gws); 579 - kfree(knode); 580 - kfd->node = NULL; 575 + for (i = 0; i < num_nodes; i++) { 576 + knode = kfd->nodes[i]; 577 + device_queue_manager_uninit(knode->dqm); 578 + kfd_interrupt_exit(knode); 579 + kfd_topology_remove_device(knode); 580 + if (knode->gws) 581 + amdgpu_amdkfd_free_gws(knode->adev, knode->gws); 582 + kfree(knode); 583 + kfd->nodes[i] = NULL; 584 + } 581 585 } 582 586 583 587 bool kgd2kfd_device_init(struct kfd_dev *kfd, 584 588 const struct kgd2kfd_shared_resources *gpu_resources) 585 589 { 586 - unsigned int size, map_process_packet_size; 590 + unsigned int size, map_process_packet_size, i; 587 591 struct kfd_node *node; 588 592 uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd; 589 593 unsigned int max_proc_per_quantum; ··· 600 596 KGD_ENGINE_SDMA1); 601 597 kfd->shared_resources = *gpu_resources; 602 598 603 - first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1; 604 - last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1; 605 - vmid_num_kfd = last_vmid_kfd - first_vmid_kfd + 1; 599 + if (kfd->adev->gfx.num_xcd == 0 || kfd->adev->gfx.num_xcd == 1 || 600 + kfd->adev->gfx.num_xcc_per_xcp == 0) 601 + kfd->num_nodes = 1; 602 + else 603 + kfd->num_nodes = 604 + kfd->adev->gfx.num_xcd/kfd->adev->gfx.num_xcc_per_xcp; 605 + if (kfd->num_nodes == 0) { 606 + dev_err(kfd_device, 607 + "KFD num nodes cannot be 0, GC inst: %d, num_xcc_in_node: %d\n", 608 + kfd->adev->gfx.num_xcd, kfd->adev->gfx.num_xcc_per_xcp); 609 + goto out; 610 + } 606 611 607 612 /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps. 608 613 * 32 and 64-bit requests are possible and must be ··· 628 615 kfd->mec_fw_version, 629 616 kfd->device_info.no_atomic_fw_version); 630 617 return false; 618 + } 619 + 620 + first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1; 621 + last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1; 622 + vmid_num_kfd = last_vmid_kfd - first_vmid_kfd + 1; 623 + 624 + /* For GFX9.4.3, we need special handling for VMIDs depending on 625 + * partition mode. 626 + * In CPX mode, the VMID range needs to be shared between XCDs. 627 + * Additionally, there are 13 VMIDs (3-15) available for KFD. To 628 + * divide them equally, we change starting VMID to 4 and not use 629 + * VMID 3. 630 + * If the VMID range changes for GFX9.4.3, then this code MUST be 631 + * revisited. 632 + */ 633 + if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && 634 + kfd->adev->gfx.partition_mode == AMDGPU_CPX_PARTITION_MODE && 635 + kfd->num_nodes != 1) { 636 + vmid_num_kfd /= 2; 637 + first_vmid_kfd = last_vmid_kfd + 1 - vmid_num_kfd*2; 631 638 } 632 639 633 640 /* Verify module parameters regarding mapped process number*/ ··· 715 682 716 683 kfd_cwsr_init(kfd); 717 684 685 + /* TODO: Needs to be updated for memory partitioning */ 718 686 svm_migrate_init(kfd->adev); 719 687 720 688 /* Allocate the KFD node */ ··· 734 700 node->max_proc_per_quantum = max_proc_per_quantum; 735 701 atomic_set(&node->sram_ecc_flag, 0); 736 702 737 - /* Initialize the KFD node */ 738 - if (kfd_init_node(node)) { 739 - dev_err(kfd_device, "Error initializing KFD node\n"); 740 - goto node_init_error; 703 + dev_info(kfd_device, "Total number of KFD nodes to be created: %d\n", 704 + kfd->num_nodes); 705 + for (i = 0; i < kfd->num_nodes; i++) { 706 + node = kzalloc(sizeof(struct kfd_node), GFP_KERNEL); 707 + if (!node) 708 + goto node_alloc_error; 709 + 710 + node->adev = kfd->adev; 711 + node->kfd = kfd; 712 + node->kfd2kgd = kfd->kfd2kgd; 713 + node->vm_info.vmid_num_kfd = vmid_num_kfd; 714 + node->num_xcc_per_node = max(1U, kfd->adev->gfx.num_xcc_per_xcp); 715 + node->start_xcc_id = node->num_xcc_per_node * i; 716 + 717 + if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && 718 + kfd->adev->gfx.partition_mode == AMDGPU_CPX_PARTITION_MODE && 719 + kfd->num_nodes != 1) { 720 + /* For GFX9.4.3 and CPX mode, first XCD gets VMID range 721 + * 4-9 and second XCD gets VMID range 10-15. 722 + */ 723 + 724 + node->vm_info.first_vmid_kfd = (i%2 == 0) ? 725 + first_vmid_kfd : 726 + first_vmid_kfd+vmid_num_kfd; 727 + node->vm_info.last_vmid_kfd = (i%2 == 0) ? 728 + last_vmid_kfd-vmid_num_kfd : 729 + last_vmid_kfd; 730 + node->compute_vmid_bitmap = 731 + ((0x1 << (node->vm_info.last_vmid_kfd + 1)) - 1) - 732 + ((0x1 << (node->vm_info.first_vmid_kfd)) - 1); 733 + } else { 734 + node->vm_info.first_vmid_kfd = first_vmid_kfd; 735 + node->vm_info.last_vmid_kfd = last_vmid_kfd; 736 + node->compute_vmid_bitmap = 737 + gpu_resources->compute_vmid_bitmap; 738 + } 739 + node->max_proc_per_quantum = max_proc_per_quantum; 740 + atomic_set(&node->sram_ecc_flag, 0); 741 + /* Initialize the KFD node */ 742 + if (kfd_init_node(node)) { 743 + dev_err(kfd_device, "Error initializing KFD node\n"); 744 + goto node_init_error; 745 + } 746 + kfd->nodes[i] = node; 741 747 } 742 - kfd->node = node; 743 748 744 749 if (kfd_resume_iommu(kfd)) 745 750 goto kfd_resume_iommu_error; ··· 795 722 goto out; 796 723 797 724 kfd_resume_iommu_error: 798 - kfd_cleanup_node(kfd); 799 725 node_init_error: 800 726 node_alloc_error: 727 + kfd_cleanup_nodes(kfd, i); 801 728 device_iommu_error: 802 729 kfd_doorbell_fini(kfd); 803 730 kfd_doorbell_error: ··· 815 742 void kgd2kfd_device_exit(struct kfd_dev *kfd) 816 743 { 817 744 if (kfd->init_complete) { 818 - kfd_cleanup_node(kfd); 745 + /* Cleanup KFD nodes */ 746 + kfd_cleanup_nodes(kfd, kfd->num_nodes); 747 + /* Cleanup common/shared resources */ 819 748 kfd_doorbell_fini(kfd); 820 749 ida_destroy(&kfd->doorbell_ida); 821 750 kfd_gtt_sa_fini(kfd); ··· 829 754 830 755 int kgd2kfd_pre_reset(struct kfd_dev *kfd) 831 756 { 832 - struct kfd_node *node = kfd->node; 757 + struct kfd_node *node; 758 + int i; 833 759 834 760 if (!kfd->init_complete) 835 761 return 0; 836 762 837 - kfd_smi_event_update_gpu_reset(node, false); 838 - 839 - node->dqm->ops.pre_reset(node->dqm); 763 + for (i = 0; i < kfd->num_nodes; i++) { 764 + node = kfd->nodes[i]; 765 + kfd_smi_event_update_gpu_reset(node, false); 766 + node->dqm->ops.pre_reset(node->dqm); 767 + } 840 768 841 769 kgd2kfd_suspend(kfd, false); 842 770 843 - kfd_signal_reset_event(node); 771 + for (i = 0; i < kfd->num_nodes; i++) 772 + kfd_signal_reset_event(kfd->nodes[i]); 773 + 844 774 return 0; 845 775 } 846 776 ··· 858 778 int kgd2kfd_post_reset(struct kfd_dev *kfd) 859 779 { 860 780 int ret; 861 - struct kfd_node *node = kfd->node; 781 + struct kfd_node *node; 782 + int i; 862 783 863 784 if (!kfd->init_complete) 864 785 return 0; 865 786 866 - ret = kfd_resume(node); 867 - if (ret) 868 - return ret; 787 + for (i = 0; i < kfd->num_nodes; i++) { 788 + ret = kfd_resume(kfd->nodes[i]); 789 + if (ret) 790 + return ret; 791 + } 792 + 869 793 atomic_dec(&kfd_locked); 870 794 871 - atomic_set(&node->sram_ecc_flag, 0); 872 - 873 - kfd_smi_event_update_gpu_reset(node, true); 795 + for (i = 0; i < kfd->num_nodes; i++) { 796 + node = kfd->nodes[i]; 797 + atomic_set(&node->sram_ecc_flag, 0); 798 + kfd_smi_event_update_gpu_reset(node, true); 799 + } 874 800 875 801 return 0; 876 802 } ··· 888 802 889 803 void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) 890 804 { 891 - struct kfd_node *node = kfd->node; 805 + struct kfd_node *node; 806 + int i; 892 807 893 808 if (!kfd->init_complete) 894 809 return; ··· 901 814 kfd_suspend_all_processes(); 902 815 } 903 816 904 - node->dqm->ops.stop(node->dqm); 817 + for (i = 0; i < kfd->num_nodes; i++) { 818 + node = kfd->nodes[i]; 819 + node->dqm->ops.stop(node->dqm); 820 + } 905 821 kfd_iommu_suspend(kfd); 906 822 } 907 823 908 824 int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) 909 825 { 910 - int ret, count; 911 - struct kfd_node *node = kfd->node; 826 + int ret, count, i; 912 827 913 828 if (!kfd->init_complete) 914 829 return 0; 915 830 916 - ret = kfd_resume(node); 917 - if (ret) 918 - return ret; 831 + for (i = 0; i < kfd->num_nodes; i++) { 832 + ret = kfd_resume(kfd->nodes[i]); 833 + if (ret) 834 + return ret; 835 + } 919 836 920 837 /* for runtime resume, skip unlocking kfd */ 921 838 if (!run_pm) { ··· 983 892 /* This is called directly from KGD at ISR. */ 984 893 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) 985 894 { 986 - uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE]; 895 + uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE], i; 987 896 bool is_patched = false; 988 897 unsigned long flags; 989 - struct kfd_node *node = kfd->node; 898 + struct kfd_node *node; 990 899 991 900 if (!kfd->init_complete) 992 901 return; ··· 996 905 return; 997 906 } 998 907 999 - spin_lock_irqsave(&node->interrupt_lock, flags); 908 + for (i = 0; i < kfd->num_nodes; i++) { 909 + node = kfd->nodes[i]; 910 + spin_lock_irqsave(&node->interrupt_lock, flags); 1000 911 1001 - if (node->interrupts_active 1002 - && interrupt_is_wanted(node, ih_ring_entry, 1003 - patched_ihre, &is_patched) 1004 - && enqueue_ih_ring_entry(node, 1005 - is_patched ? patched_ihre : ih_ring_entry)) 1006 - kfd_queue_work(node->ih_wq, &node->interrupt_work); 912 + if (node->interrupts_active 913 + && interrupt_is_wanted(node, ih_ring_entry, 914 + patched_ihre, &is_patched) 915 + && enqueue_ih_ring_entry(node, 916 + is_patched ? patched_ihre : ih_ring_entry)) { 917 + kfd_queue_work(node->ih_wq, &node->interrupt_work); 918 + spin_unlock_irqrestore(&node->interrupt_lock, flags); 919 + return; 920 + } 921 + spin_unlock_irqrestore(&node->interrupt_lock, flags); 922 + } 1007 923 1008 - spin_unlock_irqrestore(&node->interrupt_lock, flags); 1009 924 } 1010 925 1011 926 int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger) ··· 1278 1181 1279 1182 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) 1280 1183 { 1184 + /* 1185 + * TODO: Currently update SRAM ECC flag for first node. 1186 + * This needs to be updated later when we can 1187 + * identify SRAM ECC error on other nodes also. 1188 + */ 1281 1189 if (kfd) 1282 - atomic_inc(&kfd->node->sram_ecc_flag); 1190 + atomic_inc(&kfd->nodes[0]->sram_ecc_flag); 1283 1191 } 1284 1192 1285 1193 void kfd_inc_compute_active(struct kfd_node *node) ··· 1304 1202 1305 1203 void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) 1306 1204 { 1205 + /* 1206 + * TODO: For now, raise the throttling event only on first node. 1207 + * This will need to change after we are able to determine 1208 + * which node raised the throttling event. 1209 + */ 1307 1210 if (kfd && kfd->init_complete) 1308 - kfd_smi_event_update_thermal_throttling(kfd->node, throttle_bitmask); 1211 + kfd_smi_event_update_thermal_throttling(kfd->nodes[0], 1212 + throttle_bitmask); 1309 1213 } 1310 1214 1311 1215 /* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
··· 1426 1426 int i, mec; 1427 1427 struct scheduling_resources res; 1428 1428 1429 - res.vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap; 1429 + res.vmid_mask = dqm->dev->compute_vmid_bitmap; 1430 1430 1431 1431 res.queue_mask = 0; 1432 1432 for (i = 0; i < KGD_MAX_QUEUES; ++i) {
+10 -3
drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
··· 121 121 return -EINVAL; 122 122 } 123 123 124 + if (!kfd_is_first_node(dev)) { 125 + dev_warn_once(kfd_device, 126 + "IOMMU supported only on first node\n"); 127 + return 0; 128 + } 129 + 124 130 err = amd_iommu_bind_pasid(dev->adev->pdev, p->pasid, p->lead_thread); 125 131 if (!err) 126 132 pdd->bound = PDD_BOUND; ··· 144 138 int i; 145 139 146 140 for (i = 0; i < p->n_pdds; i++) 147 - if (p->pdds[i]->bound == PDD_BOUND) 141 + if ((p->pdds[i]->bound == PDD_BOUND) && 142 + (kfd_is_first_node((p->pdds[i]->dev)))) 148 143 amd_iommu_unbind_pasid(p->pdds[i]->dev->adev->pdev, 149 144 p->pasid); 150 145 } ··· 288 281 if (!kfd->use_iommu_v2) 289 282 return; 290 283 291 - kfd_unbind_processes_from_device(kfd->node); 284 + kfd_unbind_processes_from_device(kfd->nodes[0]); 292 285 293 286 amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev, NULL); 294 287 amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, NULL); ··· 319 312 amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, 320 313 iommu_invalid_ppr_cb); 321 314 322 - err = kfd_bind_processes_to_device(kfd->node); 315 + err = kfd_bind_processes_to_device(kfd->nodes[0]); 323 316 if (err) { 324 317 amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev, NULL); 325 318 amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, NULL);
+4 -4
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
··· 423 423 424 424 kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid, 425 425 start >> PAGE_SHIFT, end >> PAGE_SHIFT, 426 - 0, adev->kfd.dev->node->id, prange->prefetch_loc, 426 + 0, adev->kfd.dev->nodes[0]->id, prange->prefetch_loc, 427 427 prange->preferred_loc, trigger); 428 428 429 429 r = migrate_vma_setup(&migrate); ··· 456 456 457 457 kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid, 458 458 start >> PAGE_SHIFT, end >> PAGE_SHIFT, 459 - 0, adev->kfd.dev->node->id, trigger); 459 + 0, adev->kfd.dev->nodes[0]->id, trigger); 460 460 461 461 svm_range_dma_unmap(adev->dev, scratch, 0, npages); 462 462 svm_range_free_dma_mappings(prange); ··· 701 701 702 702 kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid, 703 703 start >> PAGE_SHIFT, end >> PAGE_SHIFT, 704 - adev->kfd.dev->node->id, 0, prange->prefetch_loc, 704 + adev->kfd.dev->nodes[0]->id, 0, prange->prefetch_loc, 705 705 prange->preferred_loc, trigger); 706 706 707 707 r = migrate_vma_setup(&migrate); ··· 737 737 738 738 kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid, 739 739 start >> PAGE_SHIFT, end >> PAGE_SHIFT, 740 - adev->kfd.dev->node->id, 0, trigger); 740 + adev->kfd.dev->nodes[0]->id, 0, trigger); 741 741 742 742 svm_range_dma_unmap(adev->dev, scratch, 0, npages); 743 743
+15 -1
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
··· 255 255 uint32_t vmid_num_kfd; 256 256 }; 257 257 258 + #define MAX_KFD_NODES 8 259 + 258 260 struct kfd_dev; 259 261 260 262 struct kfd_node { ··· 269 267 */ 270 268 struct kfd_vmid_info vm_info; 271 269 unsigned int id; /* topology stub index */ 270 + unsigned int num_xcc_per_node; 271 + unsigned int start_xcc_id; /* Starting XCC instance 272 + * number for the node 273 + */ 272 274 /* Interrupts */ 273 275 struct kfifo ih_fifo; 274 276 struct workqueue_struct *ih_wq; ··· 305 299 306 300 /* Maximum process number mapped to HW scheduler */ 307 301 unsigned int max_proc_per_quantum; 302 + 303 + unsigned int compute_vmid_bitmap; 308 304 309 305 struct kfd_dev *kfd; 310 306 }; ··· 376 368 /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */ 377 369 struct dev_pagemap pgmap; 378 370 379 - struct kfd_node *node; 371 + struct kfd_node *nodes[MAX_KFD_NODES]; 372 + unsigned int num_nodes; 380 373 }; 381 374 382 375 enum kfd_mempool { ··· 1404 1395 #else 1405 1396 return 0; 1406 1397 #endif 1398 + } 1399 + 1400 + static inline bool kfd_is_first_node(struct kfd_node *node) 1401 + { 1402 + return (node == node->kfd->nodes[0]); 1407 1403 } 1408 1404 1409 1405 /* Debugfs */
+12 -12
drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c
··· 254 254 unsigned long address, bool write_fault, 255 255 ktime_t ts) 256 256 { 257 - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_PAGE_FAULT_START, 257 + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_PAGE_FAULT_START, 258 258 "%lld -%d @%lx(%x) %c\n", ktime_to_ns(ts), pid, 259 - address, dev->node->id, write_fault ? 'W' : 'R'); 259 + address, dev->nodes[0]->id, write_fault ? 'W' : 'R'); 260 260 } 261 261 262 262 void kfd_smi_event_page_fault_end(struct kfd_dev *dev, pid_t pid, 263 263 unsigned long address, bool migration) 264 264 { 265 - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_PAGE_FAULT_END, 265 + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_PAGE_FAULT_END, 266 266 "%lld -%d @%lx(%x) %c\n", ktime_get_boottime_ns(), 267 - pid, address, dev->node->id, migration ? 'M' : 'U'); 267 + pid, address, dev->nodes[0]->id, migration ? 'M' : 'U'); 268 268 } 269 269 270 270 void kfd_smi_event_migration_start(struct kfd_dev *dev, pid_t pid, ··· 273 273 uint32_t prefetch_loc, uint32_t preferred_loc, 274 274 uint32_t trigger) 275 275 { 276 - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_MIGRATE_START, 276 + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_MIGRATE_START, 277 277 "%lld -%d @%lx(%lx) %x->%x %x:%x %d\n", 278 278 ktime_get_boottime_ns(), pid, start, end - start, 279 279 from, to, prefetch_loc, preferred_loc, trigger); ··· 283 283 unsigned long start, unsigned long end, 284 284 uint32_t from, uint32_t to, uint32_t trigger) 285 285 { 286 - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_MIGRATE_END, 286 + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_MIGRATE_END, 287 287 "%lld -%d @%lx(%lx) %x->%x %d\n", 288 288 ktime_get_boottime_ns(), pid, start, end - start, 289 289 from, to, trigger); ··· 292 292 void kfd_smi_event_queue_eviction(struct kfd_dev *dev, pid_t pid, 293 293 uint32_t trigger) 294 294 { 295 - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_QUEUE_EVICTION, 295 + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_QUEUE_EVICTION, 296 296 "%lld -%d %x %d\n", ktime_get_boottime_ns(), pid, 297 - dev->node->id, trigger); 297 + dev->nodes[0]->id, trigger); 298 298 } 299 299 300 300 void kfd_smi_event_queue_restore(struct kfd_dev *dev, pid_t pid) 301 301 { 302 - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_QUEUE_RESTORE, 302 + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_QUEUE_RESTORE, 303 303 "%lld -%d %x\n", ktime_get_boottime_ns(), pid, 304 - dev->node->id); 304 + dev->nodes[0]->id); 305 305 } 306 306 307 307 void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm) ··· 328 328 unsigned long address, unsigned long last, 329 329 uint32_t trigger) 330 330 { 331 - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_UNMAP_FROM_GPU, 331 + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_UNMAP_FROM_GPU, 332 332 "%lld -%d @%lx(%lx) %x %d\n", ktime_get_boottime_ns(), 333 - pid, address, last - address + 1, dev->node->id, trigger); 333 + pid, address, last - address + 1, dev->nodes[0]->id, trigger); 334 334 } 335 335 336 336 int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd)
+5 -3
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
··· 555 555 dev->gpu->kfd->sdma_fw_version); 556 556 sysfs_show_64bit_prop(buffer, offs, "unique_id", 557 557 dev->gpu->adev->unique_id); 558 - 558 + sysfs_show_32bit_prop(buffer, offs, "num_xcc", 559 + dev->gpu->num_xcc_per_node); 559 560 } 560 561 561 562 return sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_ccompute", ··· 1161 1160 static uint32_t kfd_generate_gpu_id(struct kfd_node *gpu) 1162 1161 { 1163 1162 uint32_t hashout; 1164 - uint32_t buf[7]; 1163 + uint32_t buf[8]; 1165 1164 uint64_t local_mem_size; 1166 1165 int i; 1167 1166 ··· 1178 1177 buf[4] = gpu->adev->pdev->bus->number; 1179 1178 buf[5] = lower_32_bits(local_mem_size); 1180 1179 buf[6] = upper_32_bits(local_mem_size); 1180 + buf[7] = gpu->start_xcc_id | (gpu->num_xcc_per_node << 16); 1181 1181 1182 - for (i = 0, hashout = 0; i < 7; i++) 1182 + for (i = 0, hashout = 0; i < 8; i++) 1183 1183 hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH); 1184 1184 1185 1185 return hashout;