Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-fixes-2021-07-16' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
"Regular rc2 fixes though a bit more than usual at rc2 stage, people
must have been testing early or else some fixes from last week got a
bit laggy.

There is one larger change in the amd fixes to amalgamate some power
management code on the newer chips with the code from the older chips,
it should only affects chips where support was introduced in rc1 and
it should make future fixes easier to maintain probably a good idea to
merge it now.

Otherwise it's mostly fixes across the board.

dma-buf:
- Fix fence leak in sync_file_merge() error code

drm/panel:
- nt35510: Don't fail on DSI reads

fbdev:
- Avoid use-after-free by not deleting current video mode

ttm:
- Avoid NULL-ptr deref in ttm_range_man_fini()

vmwgfx:
- Fix a merge commit

qxl:
- fix a TTM regression

amdgpu:
- SR-IOV fixes
- RAS fixes
- eDP fixes
- SMU13 code unification to facilitate fixes in the future
- Add new renoir DID
- Yellow Carp fixes
- Beige Goby fixes
- Revert a bunch of TLB fixes that caused regressions
- Revert an LTTPR display regression

amdkfd
- Fix VRAM access regression
- SVM fixes

i915:
- Fix -EDEADLK handling regression
- Drop the page table optimisation"

* tag 'drm-fixes-2021-07-16' of git://anongit.freedesktop.org/drm/drm: (29 commits)
drm/amdgpu: add another Renoir DID
drm/ttm: add a check against null pointer dereference
drm/i915/gtt: drop the page table optimisation
drm/i915/gt: Fix -EDEADLK handling regression
drm/amd/pm: Add waiting for response of mode-reset message for yellow carp
Revert "drm/amdkfd: Add heavy-weight TLB flush after unmapping"
Revert "drm/amdgpu: Add table_freed parameter to amdgpu_vm_bo_update"
Revert "drm/amdkfd: Make TLB flush conditional on mapping"
Revert "drm/amdgpu: Fix warning of Function parameter or member not described"
Revert "drm/amdkfd: Add memory sync before TLB flush on unmap"
drm/amd/pm: Fix BACO state setting for Beige_Goby
drm/amdgpu: Restore msix after FLR
drm/amdkfd: Allow CPU access for all VRAM BOs
drm/amdgpu/display - only update eDP's backlight level when necessary
drm/amdkfd: handle fault counters on invalid address
drm/amdgpu: Correct the irq numbers for virtual crtc
drm/amd/display: update header file name
drm/amd/pm: drop smu_v13_0_1.c|h files for yellow carp
drm/amd/display: remove faulty assert
Revert "drm/amd/display: Always write repeater mode regardless of LTTPR"
...

+202 -1395
+7 -6
drivers/dma-buf/sync_file.c
··· 211 211 struct sync_file *b) 212 212 { 213 213 struct sync_file *sync_file; 214 - struct dma_fence **fences, **nfences, **a_fences, **b_fences; 215 - int i, i_a, i_b, num_fences, a_num_fences, b_num_fences; 214 + struct dma_fence **fences = NULL, **nfences, **a_fences, **b_fences; 215 + int i = 0, i_a, i_b, num_fences, a_num_fences, b_num_fences; 216 216 217 217 sync_file = sync_file_alloc(); 218 218 if (!sync_file) ··· 236 236 * If a sync_file can only be created with sync_file_merge 237 237 * and sync_file_create, this is a reasonable assumption. 238 238 */ 239 - for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) { 239 + for (i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) { 240 240 struct dma_fence *pt_a = a_fences[i_a]; 241 241 struct dma_fence *pt_b = b_fences[i_b]; 242 242 ··· 277 277 fences = nfences; 278 278 } 279 279 280 - if (sync_file_set_fence(sync_file, fences, i) < 0) { 281 - kfree(fences); 280 + if (sync_file_set_fence(sync_file, fences, i) < 0) 282 281 goto err; 283 - } 284 282 285 283 strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name)); 286 284 return sync_file; 287 285 288 286 err: 287 + while (i) 288 + dma_fence_put(fences[--i]); 289 + kfree(fences); 289 290 fput(sync_file->file); 290 291 return NULL; 291 292
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
··· 269 269 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, 270 270 uint64_t *size); 271 271 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( 272 - struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, bool *table_freed); 272 + struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv); 273 273 int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( 274 274 struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv); 275 275 int amdgpu_amdkfd_gpuvm_sync_memory(
+9 -13
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
··· 1057 1057 1058 1058 static int update_gpuvm_pte(struct kgd_mem *mem, 1059 1059 struct kfd_mem_attachment *entry, 1060 - struct amdgpu_sync *sync, 1061 - bool *table_freed) 1060 + struct amdgpu_sync *sync) 1062 1061 { 1063 1062 struct amdgpu_bo_va *bo_va = entry->bo_va; 1064 1063 struct amdgpu_device *adev = entry->adev; ··· 1068 1069 return ret; 1069 1070 1070 1071 /* Update the page tables */ 1071 - ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed); 1072 + ret = amdgpu_vm_bo_update(adev, bo_va, false); 1072 1073 if (ret) { 1073 1074 pr_err("amdgpu_vm_bo_update failed\n"); 1074 1075 return ret; ··· 1080 1081 static int map_bo_to_gpuvm(struct kgd_mem *mem, 1081 1082 struct kfd_mem_attachment *entry, 1082 1083 struct amdgpu_sync *sync, 1083 - bool no_update_pte, 1084 - bool *table_freed) 1084 + bool no_update_pte) 1085 1085 { 1086 1086 int ret; 1087 1087 ··· 1097 1099 if (no_update_pte) 1098 1100 return 0; 1099 1101 1100 - ret = update_gpuvm_pte(mem, entry, sync, table_freed); 1102 + ret = update_gpuvm_pte(mem, entry, sync); 1101 1103 if (ret) { 1102 1104 pr_err("update_gpuvm_pte() failed\n"); 1103 1105 goto update_gpuvm_pte_failed; ··· 1391 1393 domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; 1392 1394 alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; 1393 1395 alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? 1394 - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 1395 - AMDGPU_GEM_CREATE_NO_CPU_ACCESS; 1396 + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0; 1396 1397 } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { 1397 1398 domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; 1398 1399 alloc_flags = 0; ··· 1594 1597 } 1595 1598 1596 1599 int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( 1597 - struct kgd_dev *kgd, struct kgd_mem *mem, 1598 - void *drm_priv, bool *table_freed) 1600 + struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv) 1599 1601 { 1600 1602 struct amdgpu_device *adev = get_amdgpu_device(kgd); 1601 1603 struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); ··· 1682 1686 entry->va, entry->va + bo_size, entry); 1683 1687 1684 1688 ret = map_bo_to_gpuvm(mem, entry, ctx.sync, 1685 - is_invalid_userptr, table_freed); 1689 + is_invalid_userptr); 1686 1690 if (ret) { 1687 1691 pr_err("Failed to map bo to gpuvm\n"); 1688 1692 goto out_unreserve; ··· 2132 2136 continue; 2133 2137 2134 2138 kfd_mem_dmaunmap_attachment(mem, attachment); 2135 - ret = update_gpuvm_pte(mem, attachment, &sync, NULL); 2139 + ret = update_gpuvm_pte(mem, attachment, &sync); 2136 2140 if (ret) { 2137 2141 pr_err("%s: update PTE failed\n", __func__); 2138 2142 /* make sure this gets validated again */ ··· 2338 2342 continue; 2339 2343 2340 2344 kfd_mem_dmaunmap_attachment(mem, attachment); 2341 - ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL); 2345 + ret = update_gpuvm_pte(mem, attachment, &sync_obj); 2342 2346 if (ret) { 2343 2347 pr_debug("Memory eviction: update PTE failed. Try again\n"); 2344 2348 goto validate_map_fail;
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 781 781 if (r) 782 782 return r; 783 783 784 - r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false, NULL); 784 + r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false); 785 785 if (r) 786 786 return r; 787 787 ··· 792 792 if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { 793 793 bo_va = fpriv->csa_va; 794 794 BUG_ON(!bo_va); 795 - r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); 795 + r = amdgpu_vm_bo_update(adev, bo_va, false); 796 796 if (r) 797 797 return r; 798 798 ··· 811 811 if (bo_va == NULL) 812 812 continue; 813 813 814 - r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); 814 + r = amdgpu_vm_bo_update(adev, bo_va, false); 815 815 if (r) 816 816 return r; 817 817
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 1168 1168 {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, 1169 1169 1170 1170 /* Renoir */ 1171 + {0x1002, 0x15E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, 1171 1172 {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, 1172 1173 {0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, 1173 1174 {0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 612 612 613 613 if (operation == AMDGPU_VA_OP_MAP || 614 614 operation == AMDGPU_VA_OP_REPLACE) { 615 - r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); 615 + r = amdgpu_vm_bo_update(adev, bo_va, false); 616 616 if (r) 617 617 goto error; 618 618 }
+18
drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
··· 278 278 return true; 279 279 } 280 280 281 + static void amdgpu_restore_msix(struct amdgpu_device *adev) 282 + { 283 + u16 ctrl; 284 + 285 + pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); 286 + if (!(ctrl & PCI_MSIX_FLAGS_ENABLE)) 287 + return; 288 + 289 + /* VF FLR */ 290 + ctrl &= ~PCI_MSIX_FLAGS_ENABLE; 291 + pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl); 292 + ctrl |= PCI_MSIX_FLAGS_ENABLE; 293 + pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl); 294 + } 295 + 281 296 /** 282 297 * amdgpu_irq_init - initialize interrupt handling 283 298 * ··· 583 568 void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) 584 569 { 585 570 int i, j, k; 571 + 572 + if (amdgpu_sriov_vf(adev)) 573 + amdgpu_restore_msix(adev); 586 574 587 575 for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) { 588 576 if (!adev->irq.client[i].sources)
+35 -14
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
··· 809 809 810 810 /* query/inject/cure begin */ 811 811 int amdgpu_ras_query_error_status(struct amdgpu_device *adev, 812 - struct ras_query_if *info) 812 + struct ras_query_if *info) 813 813 { 814 814 struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); 815 815 struct ras_err_data err_data = {0, 0, 0, NULL}; ··· 1043 1043 return ret; 1044 1044 } 1045 1045 1046 - /* get the total error counts on all IPs */ 1047 - void amdgpu_ras_query_error_count(struct amdgpu_device *adev, 1048 - unsigned long *ce_count, 1049 - unsigned long *ue_count) 1046 + /** 1047 + * amdgpu_ras_query_error_count -- Get error counts of all IPs 1048 + * adev: pointer to AMD GPU device 1049 + * ce_count: pointer to an integer to be set to the count of correctible errors. 1050 + * ue_count: pointer to an integer to be set to the count of uncorrectible 1051 + * errors. 1052 + * 1053 + * If set, @ce_count or @ue_count, count and return the corresponding 1054 + * error counts in those integer pointers. Return 0 if the device 1055 + * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS. 1056 + */ 1057 + int amdgpu_ras_query_error_count(struct amdgpu_device *adev, 1058 + unsigned long *ce_count, 1059 + unsigned long *ue_count) 1050 1060 { 1051 1061 struct amdgpu_ras *con = amdgpu_ras_get_context(adev); 1052 1062 struct ras_manager *obj; 1053 1063 unsigned long ce, ue; 1054 1064 1055 1065 if (!adev->ras_enabled || !con) 1056 - return; 1066 + return -EOPNOTSUPP; 1067 + 1068 + /* Don't count since no reporting. 1069 + */ 1070 + if (!ce_count && !ue_count) 1071 + return 0; 1057 1072 1058 1073 ce = 0; 1059 1074 ue = 0; ··· 1076 1061 struct ras_query_if info = { 1077 1062 .head = obj->head, 1078 1063 }; 1064 + int res; 1079 1065 1080 - if (amdgpu_ras_query_error_status(adev, &info)) 1081 - return; 1066 + res = amdgpu_ras_query_error_status(adev, &info); 1067 + if (res) 1068 + return res; 1082 1069 1083 1070 ce += info.ce_count; 1084 1071 ue += info.ue_count; ··· 1091 1074 1092 1075 if (ue_count) 1093 1076 *ue_count = ue; 1077 + 1078 + return 0; 1094 1079 } 1095 1080 /* query/inject/cure end */ 1096 1081 ··· 2156 2137 2157 2138 /* Cache new values. 2158 2139 */ 2159 - amdgpu_ras_query_error_count(adev, &ce_count, &ue_count); 2160 - atomic_set(&con->ras_ce_count, ce_count); 2161 - atomic_set(&con->ras_ue_count, ue_count); 2140 + if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) { 2141 + atomic_set(&con->ras_ce_count, ce_count); 2142 + atomic_set(&con->ras_ue_count, ue_count); 2143 + } 2162 2144 2163 2145 pm_runtime_mark_last_busy(dev->dev); 2164 2146 Out: ··· 2332 2312 2333 2313 /* Those are the cached values at init. 2334 2314 */ 2335 - amdgpu_ras_query_error_count(adev, &ce_count, &ue_count); 2336 - atomic_set(&con->ras_ce_count, ce_count); 2337 - atomic_set(&con->ras_ue_count, ue_count); 2315 + if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) { 2316 + atomic_set(&con->ras_ce_count, ce_count); 2317 + atomic_set(&con->ras_ue_count, ue_count); 2318 + } 2338 2319 2339 2320 return 0; 2340 2321 cleanup:
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
··· 490 490 void amdgpu_ras_resume(struct amdgpu_device *adev); 491 491 void amdgpu_ras_suspend(struct amdgpu_device *adev); 492 492 493 - void amdgpu_ras_query_error_count(struct amdgpu_device *adev, 494 - unsigned long *ce_count, 495 - unsigned long *ue_count); 493 + int amdgpu_ras_query_error_count(struct amdgpu_device *adev, 494 + unsigned long *ce_count, 495 + unsigned long *ue_count); 496 496 497 497 /* error handling functions */ 498 498 int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev,
+5 -6
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 1758 1758 r = vm->update_funcs->commit(&params, fence); 1759 1759 1760 1760 if (table_freed) 1761 - *table_freed = *table_freed || params.table_freed; 1761 + *table_freed = params.table_freed; 1762 1762 1763 1763 error_unlock: 1764 1764 amdgpu_vm_eviction_unlock(vm); ··· 1816 1816 * @adev: amdgpu_device pointer 1817 1817 * @bo_va: requested BO and VM object 1818 1818 * @clear: if true clear the entries 1819 - * @table_freed: return true if page table is freed 1820 1819 * 1821 1820 * Fill in the page table entries for @bo_va. 1822 1821 * ··· 1823 1824 * 0 for success, -EINVAL for failure. 1824 1825 */ 1825 1826 int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, 1826 - bool clear, bool *table_freed) 1827 + bool clear) 1827 1828 { 1828 1829 struct amdgpu_bo *bo = bo_va->base.bo; 1829 1830 struct amdgpu_vm *vm = bo_va->base.vm; ··· 1902 1903 resv, mapping->start, 1903 1904 mapping->last, update_flags, 1904 1905 mapping->offset, mem, 1905 - pages_addr, last_update, table_freed); 1906 + pages_addr, last_update, NULL); 1906 1907 if (r) 1907 1908 return r; 1908 1909 } ··· 2154 2155 2155 2156 list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { 2156 2157 /* Per VM BOs never need to bo cleared in the page tables */ 2157 - r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); 2158 + r = amdgpu_vm_bo_update(adev, bo_va, false); 2158 2159 if (r) 2159 2160 return r; 2160 2161 } ··· 2173 2174 else 2174 2175 clear = true; 2175 2176 2176 - r = amdgpu_vm_bo_update(adev, bo_va, clear, NULL); 2177 + r = amdgpu_vm_bo_update(adev, bo_va, clear); 2177 2178 if (r) 2178 2179 return r; 2179 2180
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
··· 406 406 struct dma_fence **fence, bool *free_table); 407 407 int amdgpu_vm_bo_update(struct amdgpu_device *adev, 408 408 struct amdgpu_bo_va *bo_va, 409 - bool clear, bool *table_freed); 409 + bool clear); 410 410 bool amdgpu_vm_evictable(struct amdgpu_bo *bo); 411 411 void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, 412 412 struct amdgpu_bo *bo, bool evicted);
+1 -1
drivers/gpu/drm/amd/amdgpu/dce_virtual.c
··· 766 766 767 767 static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev) 768 768 { 769 - adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VBLANK6 + 1; 769 + adev->crtc_irq.num_types = adev->mode_info.num_crtc; 770 770 adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs; 771 771 } 772 772
+2 -2
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
··· 252 252 * otherwise the mailbox msg will be ruined/reseted by 253 253 * the VF FLR. 254 254 */ 255 - if (!down_read_trylock(&adev->reset_sem)) 255 + if (!down_write_trylock(&adev->reset_sem)) 256 256 return; 257 257 258 258 amdgpu_virt_fini_data_exchange(adev); ··· 268 268 269 269 flr_done: 270 270 atomic_set(&adev->in_gpu_reset, 0); 271 - up_read(&adev->reset_sem); 271 + up_write(&adev->reset_sem); 272 272 273 273 /* Trigger recovery for world switch failure if no TDR */ 274 274 if (amdgpu_device_should_recover_gpu(adev)
+2 -2
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
··· 273 273 * otherwise the mailbox msg will be ruined/reseted by 274 274 * the VF FLR. 275 275 */ 276 - if (!down_read_trylock(&adev->reset_sem)) 276 + if (!down_write_trylock(&adev->reset_sem)) 277 277 return; 278 278 279 279 amdgpu_virt_fini_data_exchange(adev); ··· 289 289 290 290 flr_done: 291 291 atomic_set(&adev->in_gpu_reset, 0); 292 - up_read(&adev->reset_sem); 292 + up_write(&adev->reset_sem); 293 293 294 294 /* Trigger recovery for world switch failure if no TDR */ 295 295 if (amdgpu_device_should_recover_gpu(adev)
+12 -33
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
··· 1393 1393 long err = 0; 1394 1394 int i; 1395 1395 uint32_t *devices_arr = NULL; 1396 - bool table_freed = false; 1397 1396 1398 1397 dev = kfd_device_by_id(GET_GPU_ID(args->handle)); 1399 1398 if (!dev) ··· 1450 1451 goto get_mem_obj_from_handle_failed; 1451 1452 } 1452 1453 err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu( 1453 - peer->kgd, (struct kgd_mem *)mem, 1454 - peer_pdd->drm_priv, &table_freed); 1454 + peer->kgd, (struct kgd_mem *)mem, peer_pdd->drm_priv); 1455 1455 if (err) { 1456 1456 pr_err("Failed to map to gpu %d/%d\n", 1457 1457 i, args->n_devices); ··· 1468 1470 } 1469 1471 1470 1472 /* Flush TLBs after waiting for the page table updates to complete */ 1471 - if (table_freed) { 1472 - for (i = 0; i < args->n_devices; i++) { 1473 - peer = kfd_device_by_id(devices_arr[i]); 1474 - if (WARN_ON_ONCE(!peer)) 1475 - continue; 1476 - peer_pdd = kfd_get_process_device_data(peer, p); 1477 - if (WARN_ON_ONCE(!peer_pdd)) 1478 - continue; 1479 - kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY); 1480 - } 1473 + for (i = 0; i < args->n_devices; i++) { 1474 + peer = kfd_device_by_id(devices_arr[i]); 1475 + if (WARN_ON_ONCE(!peer)) 1476 + continue; 1477 + peer_pdd = kfd_get_process_device_data(peer, p); 1478 + if (WARN_ON_ONCE(!peer_pdd)) 1479 + continue; 1480 + kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY); 1481 1481 } 1482 + 1482 1483 kfree(devices_arr); 1483 1484 1484 1485 return err; ··· 1565 1568 } 1566 1569 args->n_success = i+1; 1567 1570 } 1568 - mutex_unlock(&p->mutex); 1569 - 1570 - err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true); 1571 - if (err) { 1572 - pr_debug("Sync memory failed, wait interrupted by user signal\n"); 1573 - goto sync_memory_failed; 1574 - } 1575 - 1576 - /* Flush TLBs after waiting for the page table updates to complete */ 1577 - for (i = 0; i < args->n_devices; i++) { 1578 - peer = kfd_device_by_id(devices_arr[i]); 1579 - if (WARN_ON_ONCE(!peer)) 1580 - continue; 1581 - peer_pdd = kfd_get_process_device_data(peer, p); 1582 - if (WARN_ON_ONCE(!peer_pdd)) 1583 - continue; 1584 - kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT); 1585 - } 1586 - 1587 1571 kfree(devices_arr); 1572 + 1573 + mutex_unlock(&p->mutex); 1588 1574 1589 1575 return 0; 1590 1576 ··· 1576 1596 unmap_memory_from_gpu_failed: 1577 1597 mutex_unlock(&p->mutex); 1578 1598 copy_from_user_failed: 1579 - sync_memory_failed: 1580 1599 kfree(devices_arr); 1581 1600 return err; 1582 1601 }
+1 -2
drivers/gpu/drm/amd/amdkfd/kfd_process.c
··· 714 714 if (err) 715 715 goto err_alloc_mem; 716 716 717 - err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, 718 - pdd->drm_priv, NULL); 717 + err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->drm_priv); 719 718 if (err) 720 719 goto err_map_mem; 721 720
+18 -12
drivers/gpu/drm/amd/amdkfd/kfd_svm.c
··· 2375 2375 2376 2376 static void 2377 2377 svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p, 2378 - struct svm_range *prange, int32_t gpuidx) 2378 + int32_t gpuidx) 2379 2379 { 2380 2380 struct kfd_process_device *pdd; 2381 2381 2382 - if (gpuidx == MAX_GPU_INSTANCE) 2383 - /* fault is on different page of same range 2384 - * or fault is skipped to recover later 2385 - */ 2386 - pdd = svm_range_get_pdd_by_adev(prange, adev); 2387 - else 2388 - /* fault recovered 2389 - * or fault cannot recover because GPU no access on the range 2390 - */ 2391 - pdd = kfd_process_device_from_gpuidx(p, gpuidx); 2382 + /* fault is on different page of same range 2383 + * or fault is skipped to recover later 2384 + * or fault is on invalid virtual address 2385 + */ 2386 + if (gpuidx == MAX_GPU_INSTANCE) { 2387 + uint32_t gpuid; 2388 + int r; 2392 2389 2390 + r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx); 2391 + if (r < 0) 2392 + return; 2393 + } 2394 + 2395 + /* fault is recovered 2396 + * or fault cannot recover because GPU no access on the range 2397 + */ 2398 + pdd = kfd_process_device_from_gpuidx(p, gpuidx); 2393 2399 if (pdd) 2394 2400 WRITE_ONCE(pdd->faults, pdd->faults + 1); 2395 2401 } ··· 2531 2525 mutex_unlock(&svms->lock); 2532 2526 mmap_read_unlock(mm); 2533 2527 2534 - svm_range_count_fault(adev, p, prange, gpuidx); 2528 + svm_range_count_fault(adev, p, gpuidx); 2535 2529 2536 2530 mmput(mm); 2537 2531 out:
+1 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 9191 9191 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ 9192 9192 defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) 9193 9193 /* restore the backlight level */ 9194 - if (dm->backlight_dev) 9194 + if (dm->backlight_dev && (amdgpu_dm_backlight_get_level(dm) != dm->brightness[0])) 9195 9195 amdgpu_dm_backlight_set_level(dm, dm->brightness[0]); 9196 9196 #endif 9197 9197 /*
+2 -2
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c
··· 31 31 #include "dcn31_smu.h" 32 32 33 33 #include "yellow_carp_offset.h" 34 - #include "mp/mp_13_0_1_offset.h" 35 - #include "mp/mp_13_0_1_sh_mask.h" 34 + #include "mp/mp_13_0_2_offset.h" 35 + #include "mp/mp_13_0_2_sh_mask.h" 36 36 37 37 #define REG(reg_name) \ 38 38 (MP0_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name)
+1 -1
drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
··· 390 390 is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal); 391 391 is_dp = dc_is_dp_signal(pipe_ctx->stream->signal); 392 392 393 - if (!is_hdmi_tmds) 393 + if (!is_hdmi_tmds && !is_dp) 394 394 return; 395 395 396 396 if (is_hdmi_tmds)
-355
drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_offset.h
··· 1 - /* 2 - * Copyright 2020 Advanced Micro Devices, Inc. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 - * OTHER DEALINGS IN THE SOFTWARE. 21 - * 22 - * 23 - */ 24 - #ifndef _mp_13_0_1_OFFSET_HEADER 25 - #define _mp_13_0_1_OFFSET_HEADER 26 - 27 - 28 - 29 - // addressBlock: mp_SmuMp0_SmnDec 30 - // base address: 0x0 31 - #define regMP0_SMN_C2PMSG_32 0x0060 32 - #define regMP0_SMN_C2PMSG_32_BASE_IDX 0 33 - #define regMP0_SMN_C2PMSG_33 0x0061 34 - #define regMP0_SMN_C2PMSG_33_BASE_IDX 0 35 - #define regMP0_SMN_C2PMSG_34 0x0062 36 - #define regMP0_SMN_C2PMSG_34_BASE_IDX 0 37 - #define regMP0_SMN_C2PMSG_35 0x0063 38 - #define regMP0_SMN_C2PMSG_35_BASE_IDX 0 39 - #define regMP0_SMN_C2PMSG_36 0x0064 40 - #define regMP0_SMN_C2PMSG_36_BASE_IDX 0 41 - #define regMP0_SMN_C2PMSG_37 0x0065 42 - #define regMP0_SMN_C2PMSG_37_BASE_IDX 0 43 - #define regMP0_SMN_C2PMSG_38 0x0066 44 - #define regMP0_SMN_C2PMSG_38_BASE_IDX 0 45 - #define regMP0_SMN_C2PMSG_39 0x0067 46 - #define regMP0_SMN_C2PMSG_39_BASE_IDX 0 47 - #define regMP0_SMN_C2PMSG_40 0x0068 48 - #define regMP0_SMN_C2PMSG_40_BASE_IDX 0 49 - #define regMP0_SMN_C2PMSG_41 0x0069 50 - #define regMP0_SMN_C2PMSG_41_BASE_IDX 0 51 - #define regMP0_SMN_C2PMSG_42 0x006a 52 - #define regMP0_SMN_C2PMSG_42_BASE_IDX 0 53 - #define regMP0_SMN_C2PMSG_43 0x006b 54 - #define regMP0_SMN_C2PMSG_43_BASE_IDX 0 55 - #define regMP0_SMN_C2PMSG_44 0x006c 56 - #define regMP0_SMN_C2PMSG_44_BASE_IDX 0 57 - #define regMP0_SMN_C2PMSG_45 0x006d 58 - #define regMP0_SMN_C2PMSG_45_BASE_IDX 0 59 - #define regMP0_SMN_C2PMSG_46 0x006e 60 - #define regMP0_SMN_C2PMSG_46_BASE_IDX 0 61 - #define regMP0_SMN_C2PMSG_47 0x006f 62 - #define regMP0_SMN_C2PMSG_47_BASE_IDX 0 63 - #define regMP0_SMN_C2PMSG_48 0x0070 64 - #define regMP0_SMN_C2PMSG_48_BASE_IDX 0 65 - #define regMP0_SMN_C2PMSG_49 0x0071 66 - #define regMP0_SMN_C2PMSG_49_BASE_IDX 0 67 - #define regMP0_SMN_C2PMSG_50 0x0072 68 - #define regMP0_SMN_C2PMSG_50_BASE_IDX 0 69 - #define regMP0_SMN_C2PMSG_51 0x0073 70 - #define regMP0_SMN_C2PMSG_51_BASE_IDX 0 71 - #define regMP0_SMN_C2PMSG_52 0x0074 72 - #define regMP0_SMN_C2PMSG_52_BASE_IDX 0 73 - #define regMP0_SMN_C2PMSG_53 0x0075 74 - #define regMP0_SMN_C2PMSG_53_BASE_IDX 0 75 - #define regMP0_SMN_C2PMSG_54 0x0076 76 - #define regMP0_SMN_C2PMSG_54_BASE_IDX 0 77 - #define regMP0_SMN_C2PMSG_55 0x0077 78 - #define regMP0_SMN_C2PMSG_55_BASE_IDX 0 79 - #define regMP0_SMN_C2PMSG_56 0x0078 80 - #define regMP0_SMN_C2PMSG_56_BASE_IDX 0 81 - #define regMP0_SMN_C2PMSG_57 0x0079 82 - #define regMP0_SMN_C2PMSG_57_BASE_IDX 0 83 - #define regMP0_SMN_C2PMSG_58 0x007a 84 - #define regMP0_SMN_C2PMSG_58_BASE_IDX 0 85 - #define regMP0_SMN_C2PMSG_59 0x007b 86 - #define regMP0_SMN_C2PMSG_59_BASE_IDX 0 87 - #define regMP0_SMN_C2PMSG_60 0x007c 88 - #define regMP0_SMN_C2PMSG_60_BASE_IDX 0 89 - #define regMP0_SMN_C2PMSG_61 0x007d 90 - #define regMP0_SMN_C2PMSG_61_BASE_IDX 0 91 - #define regMP0_SMN_C2PMSG_62 0x007e 92 - #define regMP0_SMN_C2PMSG_62_BASE_IDX 0 93 - #define regMP0_SMN_C2PMSG_63 0x007f 94 - #define regMP0_SMN_C2PMSG_63_BASE_IDX 0 95 - #define regMP0_SMN_C2PMSG_64 0x0080 96 - #define regMP0_SMN_C2PMSG_64_BASE_IDX 0 97 - #define regMP0_SMN_C2PMSG_65 0x0081 98 - #define regMP0_SMN_C2PMSG_65_BASE_IDX 0 99 - #define regMP0_SMN_C2PMSG_66 0x0082 100 - #define regMP0_SMN_C2PMSG_66_BASE_IDX 0 101 - #define regMP0_SMN_C2PMSG_67 0x0083 102 - #define regMP0_SMN_C2PMSG_67_BASE_IDX 0 103 - #define regMP0_SMN_C2PMSG_68 0x0084 104 - #define regMP0_SMN_C2PMSG_68_BASE_IDX 0 105 - #define regMP0_SMN_C2PMSG_69 0x0085 106 - #define regMP0_SMN_C2PMSG_69_BASE_IDX 0 107 - #define regMP0_SMN_C2PMSG_70 0x0086 108 - #define regMP0_SMN_C2PMSG_70_BASE_IDX 0 109 - #define regMP0_SMN_C2PMSG_71 0x0087 110 - #define regMP0_SMN_C2PMSG_71_BASE_IDX 0 111 - #define regMP0_SMN_C2PMSG_72 0x0088 112 - #define regMP0_SMN_C2PMSG_72_BASE_IDX 0 113 - #define regMP0_SMN_C2PMSG_73 0x0089 114 - #define regMP0_SMN_C2PMSG_73_BASE_IDX 0 115 - #define regMP0_SMN_C2PMSG_74 0x008a 116 - #define regMP0_SMN_C2PMSG_74_BASE_IDX 0 117 - #define regMP0_SMN_C2PMSG_75 0x008b 118 - #define regMP0_SMN_C2PMSG_75_BASE_IDX 0 119 - #define regMP0_SMN_C2PMSG_76 0x008c 120 - #define regMP0_SMN_C2PMSG_76_BASE_IDX 0 121 - #define regMP0_SMN_C2PMSG_77 0x008d 122 - #define regMP0_SMN_C2PMSG_77_BASE_IDX 0 123 - #define regMP0_SMN_C2PMSG_78 0x008e 124 - #define regMP0_SMN_C2PMSG_78_BASE_IDX 0 125 - #define regMP0_SMN_C2PMSG_79 0x008f 126 - #define regMP0_SMN_C2PMSG_79_BASE_IDX 0 127 - #define regMP0_SMN_C2PMSG_80 0x0090 128 - #define regMP0_SMN_C2PMSG_80_BASE_IDX 0 129 - #define regMP0_SMN_C2PMSG_81 0x0091 130 - #define regMP0_SMN_C2PMSG_81_BASE_IDX 0 131 - #define regMP0_SMN_C2PMSG_82 0x0092 132 - #define regMP0_SMN_C2PMSG_82_BASE_IDX 0 133 - #define regMP0_SMN_C2PMSG_83 0x0093 134 - #define regMP0_SMN_C2PMSG_83_BASE_IDX 0 135 - #define regMP0_SMN_C2PMSG_84 0x0094 136 - #define regMP0_SMN_C2PMSG_84_BASE_IDX 0 137 - #define regMP0_SMN_C2PMSG_85 0x0095 138 - #define regMP0_SMN_C2PMSG_85_BASE_IDX 0 139 - #define regMP0_SMN_C2PMSG_86 0x0096 140 - #define regMP0_SMN_C2PMSG_86_BASE_IDX 0 141 - #define regMP0_SMN_C2PMSG_87 0x0097 142 - #define regMP0_SMN_C2PMSG_87_BASE_IDX 0 143 - #define regMP0_SMN_C2PMSG_88 0x0098 144 - #define regMP0_SMN_C2PMSG_88_BASE_IDX 0 145 - #define regMP0_SMN_C2PMSG_89 0x0099 146 - #define regMP0_SMN_C2PMSG_89_BASE_IDX 0 147 - #define regMP0_SMN_C2PMSG_90 0x009a 148 - #define regMP0_SMN_C2PMSG_90_BASE_IDX 0 149 - #define regMP0_SMN_C2PMSG_91 0x009b 150 - #define regMP0_SMN_C2PMSG_91_BASE_IDX 0 151 - #define regMP0_SMN_C2PMSG_92 0x009c 152 - #define regMP0_SMN_C2PMSG_92_BASE_IDX 0 153 - #define regMP0_SMN_C2PMSG_93 0x009d 154 - #define regMP0_SMN_C2PMSG_93_BASE_IDX 0 155 - #define regMP0_SMN_C2PMSG_94 0x009e 156 - #define regMP0_SMN_C2PMSG_94_BASE_IDX 0 157 - #define regMP0_SMN_C2PMSG_95 0x009f 158 - #define regMP0_SMN_C2PMSG_95_BASE_IDX 0 159 - #define regMP0_SMN_C2PMSG_96 0x00a0 160 - #define regMP0_SMN_C2PMSG_96_BASE_IDX 0 161 - #define regMP0_SMN_C2PMSG_97 0x00a1 162 - #define regMP0_SMN_C2PMSG_97_BASE_IDX 0 163 - #define regMP0_SMN_C2PMSG_98 0x00a2 164 - #define regMP0_SMN_C2PMSG_98_BASE_IDX 0 165 - #define regMP0_SMN_C2PMSG_99 0x00a3 166 - #define regMP0_SMN_C2PMSG_99_BASE_IDX 0 167 - #define regMP0_SMN_C2PMSG_100 0x00a4 168 - #define regMP0_SMN_C2PMSG_100_BASE_IDX 0 169 - #define regMP0_SMN_C2PMSG_101 0x00a5 170 - #define regMP0_SMN_C2PMSG_101_BASE_IDX 0 171 - #define regMP0_SMN_C2PMSG_102 0x00a6 172 - #define regMP0_SMN_C2PMSG_102_BASE_IDX 0 173 - #define regMP0_SMN_C2PMSG_103 0x00a7 174 - #define regMP0_SMN_C2PMSG_103_BASE_IDX 0 175 - #define regMP0_SMN_IH_CREDIT 0x00c1 176 - #define regMP0_SMN_IH_CREDIT_BASE_IDX 0 177 - #define regMP0_SMN_IH_SW_INT 0x00c2 178 - #define regMP0_SMN_IH_SW_INT_BASE_IDX 0 179 - #define regMP0_SMN_IH_SW_INT_CTRL 0x00c3 180 - #define regMP0_SMN_IH_SW_INT_CTRL_BASE_IDX 0 181 - 182 - 183 - // addressBlock: mp_SmuMp1_SmnDec 184 - // base address: 0x0 185 - #define regMP1_SMN_C2PMSG_32 0x0260 186 - #define regMP1_SMN_C2PMSG_32_BASE_IDX 0 187 - #define regMP1_SMN_C2PMSG_33 0x0261 188 - #define regMP1_SMN_C2PMSG_33_BASE_IDX 0 189 - #define regMP1_SMN_C2PMSG_34 0x0262 190 - #define regMP1_SMN_C2PMSG_34_BASE_IDX 0 191 - #define regMP1_SMN_C2PMSG_35 0x0263 192 - #define regMP1_SMN_C2PMSG_35_BASE_IDX 0 193 - #define regMP1_SMN_C2PMSG_36 0x0264 194 - #define regMP1_SMN_C2PMSG_36_BASE_IDX 0 195 - #define regMP1_SMN_C2PMSG_37 0x0265 196 - #define regMP1_SMN_C2PMSG_37_BASE_IDX 0 197 - #define regMP1_SMN_C2PMSG_38 0x0266 198 - #define regMP1_SMN_C2PMSG_38_BASE_IDX 0 199 - #define regMP1_SMN_C2PMSG_39 0x0267 200 - #define regMP1_SMN_C2PMSG_39_BASE_IDX 0 201 - #define regMP1_SMN_C2PMSG_40 0x0268 202 - #define regMP1_SMN_C2PMSG_40_BASE_IDX 0 203 - #define regMP1_SMN_C2PMSG_41 0x0269 204 - #define regMP1_SMN_C2PMSG_41_BASE_IDX 0 205 - #define regMP1_SMN_C2PMSG_42 0x026a 206 - #define regMP1_SMN_C2PMSG_42_BASE_IDX 0 207 - #define regMP1_SMN_C2PMSG_43 0x026b 208 - #define regMP1_SMN_C2PMSG_43_BASE_IDX 0 209 - #define regMP1_SMN_C2PMSG_44 0x026c 210 - #define regMP1_SMN_C2PMSG_44_BASE_IDX 0 211 - #define regMP1_SMN_C2PMSG_45 0x026d 212 - #define regMP1_SMN_C2PMSG_45_BASE_IDX 0 213 - #define regMP1_SMN_C2PMSG_46 0x026e 214 - #define regMP1_SMN_C2PMSG_46_BASE_IDX 0 215 - #define regMP1_SMN_C2PMSG_47 0x026f 216 - #define regMP1_SMN_C2PMSG_47_BASE_IDX 0 217 - #define regMP1_SMN_C2PMSG_48 0x0270 218 - #define regMP1_SMN_C2PMSG_48_BASE_IDX 0 219 - #define regMP1_SMN_C2PMSG_49 0x0271 220 - #define regMP1_SMN_C2PMSG_49_BASE_IDX 0 221 - #define regMP1_SMN_C2PMSG_50 0x0272 222 - #define regMP1_SMN_C2PMSG_50_BASE_IDX 0 223 - #define regMP1_SMN_C2PMSG_51 0x0273 224 - #define regMP1_SMN_C2PMSG_51_BASE_IDX 0 225 - #define regMP1_SMN_C2PMSG_52 0x0274 226 - #define regMP1_SMN_C2PMSG_52_BASE_IDX 0 227 - #define regMP1_SMN_C2PMSG_53 0x0275 228 - #define regMP1_SMN_C2PMSG_53_BASE_IDX 0 229 - #define regMP1_SMN_C2PMSG_54 0x0276 230 - #define regMP1_SMN_C2PMSG_54_BASE_IDX 0 231 - #define regMP1_SMN_C2PMSG_55 0x0277 232 - #define regMP1_SMN_C2PMSG_55_BASE_IDX 0 233 - #define regMP1_SMN_C2PMSG_56 0x0278 234 - #define regMP1_SMN_C2PMSG_56_BASE_IDX 0 235 - #define regMP1_SMN_C2PMSG_57 0x0279 236 - #define regMP1_SMN_C2PMSG_57_BASE_IDX 0 237 - #define regMP1_SMN_C2PMSG_58 0x027a 238 - #define regMP1_SMN_C2PMSG_58_BASE_IDX 0 239 - #define regMP1_SMN_C2PMSG_59 0x027b 240 - #define regMP1_SMN_C2PMSG_59_BASE_IDX 0 241 - #define regMP1_SMN_C2PMSG_60 0x027c 242 - #define regMP1_SMN_C2PMSG_60_BASE_IDX 0 243 - #define regMP1_SMN_C2PMSG_61 0x027d 244 - #define regMP1_SMN_C2PMSG_61_BASE_IDX 0 245 - #define regMP1_SMN_C2PMSG_62 0x027e 246 - #define regMP1_SMN_C2PMSG_62_BASE_IDX 0 247 - #define regMP1_SMN_C2PMSG_63 0x027f 248 - #define regMP1_SMN_C2PMSG_63_BASE_IDX 0 249 - #define regMP1_SMN_C2PMSG_64 0x0280 250 - #define regMP1_SMN_C2PMSG_64_BASE_IDX 0 251 - #define regMP1_SMN_C2PMSG_65 0x0281 252 - #define regMP1_SMN_C2PMSG_65_BASE_IDX 0 253 - #define regMP1_SMN_C2PMSG_66 0x0282 254 - #define regMP1_SMN_C2PMSG_66_BASE_IDX 0 255 - #define regMP1_SMN_C2PMSG_67 0x0283 256 - #define regMP1_SMN_C2PMSG_67_BASE_IDX 0 257 - #define regMP1_SMN_C2PMSG_68 0x0284 258 - #define regMP1_SMN_C2PMSG_68_BASE_IDX 0 259 - #define regMP1_SMN_C2PMSG_69 0x0285 260 - #define regMP1_SMN_C2PMSG_69_BASE_IDX 0 261 - #define regMP1_SMN_C2PMSG_70 0x0286 262 - #define regMP1_SMN_C2PMSG_70_BASE_IDX 0 263 - #define regMP1_SMN_C2PMSG_71 0x0287 264 - #define regMP1_SMN_C2PMSG_71_BASE_IDX 0 265 - #define regMP1_SMN_C2PMSG_72 0x0288 266 - #define regMP1_SMN_C2PMSG_72_BASE_IDX 0 267 - #define regMP1_SMN_C2PMSG_73 0x0289 268 - #define regMP1_SMN_C2PMSG_73_BASE_IDX 0 269 - #define regMP1_SMN_C2PMSG_74 0x028a 270 - #define regMP1_SMN_C2PMSG_74_BASE_IDX 0 271 - #define regMP1_SMN_C2PMSG_75 0x028b 272 - #define regMP1_SMN_C2PMSG_75_BASE_IDX 0 273 - #define regMP1_SMN_C2PMSG_76 0x028c 274 - #define regMP1_SMN_C2PMSG_76_BASE_IDX 0 275 - #define regMP1_SMN_C2PMSG_77 0x028d 276 - #define regMP1_SMN_C2PMSG_77_BASE_IDX 0 277 - #define regMP1_SMN_C2PMSG_78 0x028e 278 - #define regMP1_SMN_C2PMSG_78_BASE_IDX 0 279 - #define regMP1_SMN_C2PMSG_79 0x028f 280 - #define regMP1_SMN_C2PMSG_79_BASE_IDX 0 281 - #define regMP1_SMN_C2PMSG_80 0x0290 282 - #define regMP1_SMN_C2PMSG_80_BASE_IDX 0 283 - #define regMP1_SMN_C2PMSG_81 0x0291 284 - #define regMP1_SMN_C2PMSG_81_BASE_IDX 0 285 - #define regMP1_SMN_C2PMSG_82 0x0292 286 - #define regMP1_SMN_C2PMSG_82_BASE_IDX 0 287 - #define regMP1_SMN_C2PMSG_83 0x0293 288 - #define regMP1_SMN_C2PMSG_83_BASE_IDX 0 289 - #define regMP1_SMN_C2PMSG_84 0x0294 290 - #define regMP1_SMN_C2PMSG_84_BASE_IDX 0 291 - #define regMP1_SMN_C2PMSG_85 0x0295 292 - #define regMP1_SMN_C2PMSG_85_BASE_IDX 0 293 - #define regMP1_SMN_C2PMSG_86 0x0296 294 - #define regMP1_SMN_C2PMSG_86_BASE_IDX 0 295 - #define regMP1_SMN_C2PMSG_87 0x0297 296 - #define regMP1_SMN_C2PMSG_87_BASE_IDX 0 297 - #define regMP1_SMN_C2PMSG_88 0x0298 298 - #define regMP1_SMN_C2PMSG_88_BASE_IDX 0 299 - #define regMP1_SMN_C2PMSG_89 0x0299 300 - #define regMP1_SMN_C2PMSG_89_BASE_IDX 0 301 - #define regMP1_SMN_C2PMSG_90 0x029a 302 - #define regMP1_SMN_C2PMSG_90_BASE_IDX 0 303 - #define regMP1_SMN_C2PMSG_91 0x029b 304 - #define regMP1_SMN_C2PMSG_91_BASE_IDX 0 305 - #define regMP1_SMN_C2PMSG_92 0x029c 306 - #define regMP1_SMN_C2PMSG_92_BASE_IDX 0 307 - #define regMP1_SMN_C2PMSG_93 0x029d 308 - #define regMP1_SMN_C2PMSG_93_BASE_IDX 0 309 - #define regMP1_SMN_C2PMSG_94 0x029e 310 - #define regMP1_SMN_C2PMSG_94_BASE_IDX 0 311 - #define regMP1_SMN_C2PMSG_95 0x029f 312 - #define regMP1_SMN_C2PMSG_95_BASE_IDX 0 313 - #define regMP1_SMN_C2PMSG_96 0x02a0 314 - #define regMP1_SMN_C2PMSG_96_BASE_IDX 0 315 - #define regMP1_SMN_C2PMSG_97 0x02a1 316 - #define regMP1_SMN_C2PMSG_97_BASE_IDX 0 317 - #define regMP1_SMN_C2PMSG_98 0x02a2 318 - #define regMP1_SMN_C2PMSG_98_BASE_IDX 0 319 - #define regMP1_SMN_C2PMSG_99 0x02a3 320 - #define regMP1_SMN_C2PMSG_99_BASE_IDX 0 321 - #define regMP1_SMN_C2PMSG_100 0x02a4 322 - #define regMP1_SMN_C2PMSG_100_BASE_IDX 0 323 - #define regMP1_SMN_C2PMSG_101 0x02a5 324 - #define regMP1_SMN_C2PMSG_101_BASE_IDX 0 325 - #define regMP1_SMN_C2PMSG_102 0x02a6 326 - #define regMP1_SMN_C2PMSG_102_BASE_IDX 0 327 - #define regMP1_SMN_C2PMSG_103 0x02a7 328 - #define regMP1_SMN_C2PMSG_103_BASE_IDX 0 329 - #define regMP1_SMN_IH_CREDIT 0x02c1 330 - #define regMP1_SMN_IH_CREDIT_BASE_IDX 0 331 - #define regMP1_SMN_IH_SW_INT 0x02c2 332 - #define regMP1_SMN_IH_SW_INT_BASE_IDX 0 333 - #define regMP1_SMN_IH_SW_INT_CTRL 0x02c3 334 - #define regMP1_SMN_IH_SW_INT_CTRL_BASE_IDX 0 335 - #define regMP1_SMN_FPS_CNT 0x02c4 336 - #define regMP1_SMN_FPS_CNT_BASE_IDX 0 337 - #define regMP1_SMN_EXT_SCRATCH0 0x0340 338 - #define regMP1_SMN_EXT_SCRATCH0_BASE_IDX 0 339 - #define regMP1_SMN_EXT_SCRATCH1 0x0341 340 - #define regMP1_SMN_EXT_SCRATCH1_BASE_IDX 0 341 - #define regMP1_SMN_EXT_SCRATCH2 0x0342 342 - #define regMP1_SMN_EXT_SCRATCH2_BASE_IDX 0 343 - #define regMP1_SMN_EXT_SCRATCH3 0x0343 344 - #define regMP1_SMN_EXT_SCRATCH3_BASE_IDX 0 345 - #define regMP1_SMN_EXT_SCRATCH4 0x0344 346 - #define regMP1_SMN_EXT_SCRATCH4_BASE_IDX 0 347 - #define regMP1_SMN_EXT_SCRATCH5 0x0345 348 - #define regMP1_SMN_EXT_SCRATCH5_BASE_IDX 0 349 - #define regMP1_SMN_EXT_SCRATCH6 0x0346 350 - #define regMP1_SMN_EXT_SCRATCH6_BASE_IDX 0 351 - #define regMP1_SMN_EXT_SCRATCH7 0x0347 352 - #define regMP1_SMN_EXT_SCRATCH7_BASE_IDX 0 353 - 354 - 355 - #endif
-531
drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_sh_mask.h
··· 1 - /* 2 - * Copyright 2020 Advanced Micro Devices, Inc. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 - * OTHER DEALINGS IN THE SOFTWARE. 21 - * 22 - * 23 - */ 24 - #ifndef _mp_13_0_1_SH_MASK_HEADER 25 - #define _mp_13_0_1_SH_MASK_HEADER 26 - 27 - 28 - // addressBlock: mp_SmuMp0_SmnDec 29 - //MP0_SMN_C2PMSG_32 30 - #define MP0_SMN_C2PMSG_32__CONTENT__SHIFT 0x0 31 - #define MP0_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL 32 - //MP0_SMN_C2PMSG_33 33 - #define MP0_SMN_C2PMSG_33__CONTENT__SHIFT 0x0 34 - #define MP0_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL 35 - //MP0_SMN_C2PMSG_34 36 - #define MP0_SMN_C2PMSG_34__CONTENT__SHIFT 0x0 37 - #define MP0_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL 38 - //MP0_SMN_C2PMSG_35 39 - #define MP0_SMN_C2PMSG_35__CONTENT__SHIFT 0x0 40 - #define MP0_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL 41 - //MP0_SMN_C2PMSG_36 42 - #define MP0_SMN_C2PMSG_36__CONTENT__SHIFT 0x0 43 - #define MP0_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL 44 - //MP0_SMN_C2PMSG_37 45 - #define MP0_SMN_C2PMSG_37__CONTENT__SHIFT 0x0 46 - #define MP0_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL 47 - //MP0_SMN_C2PMSG_38 48 - #define MP0_SMN_C2PMSG_38__CONTENT__SHIFT 0x0 49 - #define MP0_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL 50 - //MP0_SMN_C2PMSG_39 51 - #define MP0_SMN_C2PMSG_39__CONTENT__SHIFT 0x0 52 - #define MP0_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL 53 - //MP0_SMN_C2PMSG_40 54 - #define MP0_SMN_C2PMSG_40__CONTENT__SHIFT 0x0 55 - #define MP0_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL 56 - //MP0_SMN_C2PMSG_41 57 - #define MP0_SMN_C2PMSG_41__CONTENT__SHIFT 0x0 58 - #define MP0_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL 59 - //MP0_SMN_C2PMSG_42 60 - #define MP0_SMN_C2PMSG_42__CONTENT__SHIFT 0x0 61 - #define MP0_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL 62 - //MP0_SMN_C2PMSG_43 63 - #define MP0_SMN_C2PMSG_43__CONTENT__SHIFT 0x0 64 - #define MP0_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL 65 - //MP0_SMN_C2PMSG_44 66 - #define MP0_SMN_C2PMSG_44__CONTENT__SHIFT 0x0 67 - #define MP0_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL 68 - //MP0_SMN_C2PMSG_45 69 - #define MP0_SMN_C2PMSG_45__CONTENT__SHIFT 0x0 70 - #define MP0_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL 71 - //MP0_SMN_C2PMSG_46 72 - #define MP0_SMN_C2PMSG_46__CONTENT__SHIFT 0x0 73 - #define MP0_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL 74 - //MP0_SMN_C2PMSG_47 75 - #define MP0_SMN_C2PMSG_47__CONTENT__SHIFT 0x0 76 - #define MP0_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL 77 - //MP0_SMN_C2PMSG_48 78 - #define MP0_SMN_C2PMSG_48__CONTENT__SHIFT 0x0 79 - #define MP0_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL 80 - //MP0_SMN_C2PMSG_49 81 - #define MP0_SMN_C2PMSG_49__CONTENT__SHIFT 0x0 82 - #define MP0_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL 83 - //MP0_SMN_C2PMSG_50 84 - #define MP0_SMN_C2PMSG_50__CONTENT__SHIFT 0x0 85 - #define MP0_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL 86 - //MP0_SMN_C2PMSG_51 87 - #define MP0_SMN_C2PMSG_51__CONTENT__SHIFT 0x0 88 - #define MP0_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL 89 - //MP0_SMN_C2PMSG_52 90 - #define MP0_SMN_C2PMSG_52__CONTENT__SHIFT 0x0 91 - #define MP0_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL 92 - //MP0_SMN_C2PMSG_53 93 - #define MP0_SMN_C2PMSG_53__CONTENT__SHIFT 0x0 94 - #define MP0_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL 95 - //MP0_SMN_C2PMSG_54 96 - #define MP0_SMN_C2PMSG_54__CONTENT__SHIFT 0x0 97 - #define MP0_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL 98 - //MP0_SMN_C2PMSG_55 99 - #define MP0_SMN_C2PMSG_55__CONTENT__SHIFT 0x0 100 - #define MP0_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL 101 - //MP0_SMN_C2PMSG_56 102 - #define MP0_SMN_C2PMSG_56__CONTENT__SHIFT 0x0 103 - #define MP0_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL 104 - //MP0_SMN_C2PMSG_57 105 - #define MP0_SMN_C2PMSG_57__CONTENT__SHIFT 0x0 106 - #define MP0_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL 107 - //MP0_SMN_C2PMSG_58 108 - #define MP0_SMN_C2PMSG_58__CONTENT__SHIFT 0x0 109 - #define MP0_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL 110 - //MP0_SMN_C2PMSG_59 111 - #define MP0_SMN_C2PMSG_59__CONTENT__SHIFT 0x0 112 - #define MP0_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL 113 - //MP0_SMN_C2PMSG_60 114 - #define MP0_SMN_C2PMSG_60__CONTENT__SHIFT 0x0 115 - #define MP0_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL 116 - //MP0_SMN_C2PMSG_61 117 - #define MP0_SMN_C2PMSG_61__CONTENT__SHIFT 0x0 118 - #define MP0_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL 119 - //MP0_SMN_C2PMSG_62 120 - #define MP0_SMN_C2PMSG_62__CONTENT__SHIFT 0x0 121 - #define MP0_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL 122 - //MP0_SMN_C2PMSG_63 123 - #define MP0_SMN_C2PMSG_63__CONTENT__SHIFT 0x0 124 - #define MP0_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL 125 - //MP0_SMN_C2PMSG_64 126 - #define MP0_SMN_C2PMSG_64__CONTENT__SHIFT 0x0 127 - #define MP0_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL 128 - //MP0_SMN_C2PMSG_65 129 - #define MP0_SMN_C2PMSG_65__CONTENT__SHIFT 0x0 130 - #define MP0_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL 131 - //MP0_SMN_C2PMSG_66 132 - #define MP0_SMN_C2PMSG_66__CONTENT__SHIFT 0x0 133 - #define MP0_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL 134 - //MP0_SMN_C2PMSG_67 135 - #define MP0_SMN_C2PMSG_67__CONTENT__SHIFT 0x0 136 - #define MP0_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL 137 - //MP0_SMN_C2PMSG_68 138 - #define MP0_SMN_C2PMSG_68__CONTENT__SHIFT 0x0 139 - #define MP0_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL 140 - //MP0_SMN_C2PMSG_69 141 - #define MP0_SMN_C2PMSG_69__CONTENT__SHIFT 0x0 142 - #define MP0_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL 143 - //MP0_SMN_C2PMSG_70 144 - #define MP0_SMN_C2PMSG_70__CONTENT__SHIFT 0x0 145 - #define MP0_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL 146 - //MP0_SMN_C2PMSG_71 147 - #define MP0_SMN_C2PMSG_71__CONTENT__SHIFT 0x0 148 - #define MP0_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL 149 - //MP0_SMN_C2PMSG_72 150 - #define MP0_SMN_C2PMSG_72__CONTENT__SHIFT 0x0 151 - #define MP0_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL 152 - //MP0_SMN_C2PMSG_73 153 - #define MP0_SMN_C2PMSG_73__CONTENT__SHIFT 0x0 154 - #define MP0_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL 155 - //MP0_SMN_C2PMSG_74 156 - #define MP0_SMN_C2PMSG_74__CONTENT__SHIFT 0x0 157 - #define MP0_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL 158 - //MP0_SMN_C2PMSG_75 159 - #define MP0_SMN_C2PMSG_75__CONTENT__SHIFT 0x0 160 - #define MP0_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL 161 - //MP0_SMN_C2PMSG_76 162 - #define MP0_SMN_C2PMSG_76__CONTENT__SHIFT 0x0 163 - #define MP0_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL 164 - //MP0_SMN_C2PMSG_77 165 - #define MP0_SMN_C2PMSG_77__CONTENT__SHIFT 0x0 166 - #define MP0_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL 167 - //MP0_SMN_C2PMSG_78 168 - #define MP0_SMN_C2PMSG_78__CONTENT__SHIFT 0x0 169 - #define MP0_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL 170 - //MP0_SMN_C2PMSG_79 171 - #define MP0_SMN_C2PMSG_79__CONTENT__SHIFT 0x0 172 - #define MP0_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL 173 - //MP0_SMN_C2PMSG_80 174 - #define MP0_SMN_C2PMSG_80__CONTENT__SHIFT 0x0 175 - #define MP0_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL 176 - //MP0_SMN_C2PMSG_81 177 - #define MP0_SMN_C2PMSG_81__CONTENT__SHIFT 0x0 178 - #define MP0_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL 179 - //MP0_SMN_C2PMSG_82 180 - #define MP0_SMN_C2PMSG_82__CONTENT__SHIFT 0x0 181 - #define MP0_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL 182 - //MP0_SMN_C2PMSG_83 183 - #define MP0_SMN_C2PMSG_83__CONTENT__SHIFT 0x0 184 - #define MP0_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL 185 - //MP0_SMN_C2PMSG_84 186 - #define MP0_SMN_C2PMSG_84__CONTENT__SHIFT 0x0 187 - #define MP0_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL 188 - //MP0_SMN_C2PMSG_85 189 - #define MP0_SMN_C2PMSG_85__CONTENT__SHIFT 0x0 190 - #define MP0_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL 191 - //MP0_SMN_C2PMSG_86 192 - #define MP0_SMN_C2PMSG_86__CONTENT__SHIFT 0x0 193 - #define MP0_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL 194 - //MP0_SMN_C2PMSG_87 195 - #define MP0_SMN_C2PMSG_87__CONTENT__SHIFT 0x0 196 - #define MP0_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL 197 - //MP0_SMN_C2PMSG_88 198 - #define MP0_SMN_C2PMSG_88__CONTENT__SHIFT 0x0 199 - #define MP0_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL 200 - //MP0_SMN_C2PMSG_89 201 - #define MP0_SMN_C2PMSG_89__CONTENT__SHIFT 0x0 202 - #define MP0_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL 203 - //MP0_SMN_C2PMSG_90 204 - #define MP0_SMN_C2PMSG_90__CONTENT__SHIFT 0x0 205 - #define MP0_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL 206 - //MP0_SMN_C2PMSG_91 207 - #define MP0_SMN_C2PMSG_91__CONTENT__SHIFT 0x0 208 - #define MP0_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL 209 - //MP0_SMN_C2PMSG_92 210 - #define MP0_SMN_C2PMSG_92__CONTENT__SHIFT 0x0 211 - #define MP0_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL 212 - //MP0_SMN_C2PMSG_93 213 - #define MP0_SMN_C2PMSG_93__CONTENT__SHIFT 0x0 214 - #define MP0_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL 215 - //MP0_SMN_C2PMSG_94 216 - #define MP0_SMN_C2PMSG_94__CONTENT__SHIFT 0x0 217 - #define MP0_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL 218 - //MP0_SMN_C2PMSG_95 219 - #define MP0_SMN_C2PMSG_95__CONTENT__SHIFT 0x0 220 - #define MP0_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL 221 - //MP0_SMN_C2PMSG_96 222 - #define MP0_SMN_C2PMSG_96__CONTENT__SHIFT 0x0 223 - #define MP0_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL 224 - //MP0_SMN_C2PMSG_97 225 - #define MP0_SMN_C2PMSG_97__CONTENT__SHIFT 0x0 226 - #define MP0_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL 227 - //MP0_SMN_C2PMSG_98 228 - #define MP0_SMN_C2PMSG_98__CONTENT__SHIFT 0x0 229 - #define MP0_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL 230 - //MP0_SMN_C2PMSG_99 231 - #define MP0_SMN_C2PMSG_99__CONTENT__SHIFT 0x0 232 - #define MP0_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL 233 - //MP0_SMN_C2PMSG_100 234 - #define MP0_SMN_C2PMSG_100__CONTENT__SHIFT 0x0 235 - #define MP0_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL 236 - //MP0_SMN_C2PMSG_101 237 - #define MP0_SMN_C2PMSG_101__CONTENT__SHIFT 0x0 238 - #define MP0_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL 239 - //MP0_SMN_C2PMSG_102 240 - #define MP0_SMN_C2PMSG_102__CONTENT__SHIFT 0x0 241 - #define MP0_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL 242 - //MP0_SMN_C2PMSG_103 243 - #define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0 244 - #define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL 245 - //MP0_SMN_IH_CREDIT 246 - #define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0 247 - #define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10 248 - #define MP0_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L 249 - #define MP0_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L 250 - //MP0_SMN_IH_SW_INT 251 - #define MP0_SMN_IH_SW_INT__ID__SHIFT 0x0 252 - #define MP0_SMN_IH_SW_INT__VALID__SHIFT 0x8 253 - #define MP0_SMN_IH_SW_INT__ID_MASK 0x000000FFL 254 - #define MP0_SMN_IH_SW_INT__VALID_MASK 0x00000100L 255 - //MP0_SMN_IH_SW_INT_CTRL 256 - #define MP0_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0 257 - #define MP0_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8 258 - #define MP0_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L 259 - #define MP0_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L 260 - 261 - 262 - // addressBlock: mp_SmuMp1Pub_CruDec 263 - //MP1_FIRMWARE_FLAGS 264 - #define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0 265 - #define MP1_FIRMWARE_FLAGS__RESERVED__SHIFT 0x1 266 - #define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x00000001L 267 - #define MP1_FIRMWARE_FLAGS__RESERVED_MASK 0xFFFFFFFEL 268 - 269 - 270 - // addressBlock: mp_SmuMp1_SmnDec 271 - //MP1_SMN_C2PMSG_32 272 - #define MP1_SMN_C2PMSG_32__CONTENT__SHIFT 0x0 273 - #define MP1_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL 274 - //MP1_SMN_C2PMSG_33 275 - #define MP1_SMN_C2PMSG_33__CONTENT__SHIFT 0x0 276 - #define MP1_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL 277 - //MP1_SMN_C2PMSG_34 278 - #define MP1_SMN_C2PMSG_34__CONTENT__SHIFT 0x0 279 - #define MP1_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL 280 - //MP1_SMN_C2PMSG_35 281 - #define MP1_SMN_C2PMSG_35__CONTENT__SHIFT 0x0 282 - #define MP1_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL 283 - //MP1_SMN_C2PMSG_36 284 - #define MP1_SMN_C2PMSG_36__CONTENT__SHIFT 0x0 285 - #define MP1_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL 286 - //MP1_SMN_C2PMSG_37 287 - #define MP1_SMN_C2PMSG_37__CONTENT__SHIFT 0x0 288 - #define MP1_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL 289 - //MP1_SMN_C2PMSG_38 290 - #define MP1_SMN_C2PMSG_38__CONTENT__SHIFT 0x0 291 - #define MP1_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL 292 - //MP1_SMN_C2PMSG_39 293 - #define MP1_SMN_C2PMSG_39__CONTENT__SHIFT 0x0 294 - #define MP1_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL 295 - //MP1_SMN_C2PMSG_40 296 - #define MP1_SMN_C2PMSG_40__CONTENT__SHIFT 0x0 297 - #define MP1_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL 298 - //MP1_SMN_C2PMSG_41 299 - #define MP1_SMN_C2PMSG_41__CONTENT__SHIFT 0x0 300 - #define MP1_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL 301 - //MP1_SMN_C2PMSG_42 302 - #define MP1_SMN_C2PMSG_42__CONTENT__SHIFT 0x0 303 - #define MP1_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL 304 - //MP1_SMN_C2PMSG_43 305 - #define MP1_SMN_C2PMSG_43__CONTENT__SHIFT 0x0 306 - #define MP1_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL 307 - //MP1_SMN_C2PMSG_44 308 - #define MP1_SMN_C2PMSG_44__CONTENT__SHIFT 0x0 309 - #define MP1_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL 310 - //MP1_SMN_C2PMSG_45 311 - #define MP1_SMN_C2PMSG_45__CONTENT__SHIFT 0x0 312 - #define MP1_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL 313 - //MP1_SMN_C2PMSG_46 314 - #define MP1_SMN_C2PMSG_46__CONTENT__SHIFT 0x0 315 - #define MP1_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL 316 - //MP1_SMN_C2PMSG_47 317 - #define MP1_SMN_C2PMSG_47__CONTENT__SHIFT 0x0 318 - #define MP1_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL 319 - //MP1_SMN_C2PMSG_48 320 - #define MP1_SMN_C2PMSG_48__CONTENT__SHIFT 0x0 321 - #define MP1_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL 322 - //MP1_SMN_C2PMSG_49 323 - #define MP1_SMN_C2PMSG_49__CONTENT__SHIFT 0x0 324 - #define MP1_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL 325 - //MP1_SMN_C2PMSG_50 326 - #define MP1_SMN_C2PMSG_50__CONTENT__SHIFT 0x0 327 - #define MP1_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL 328 - //MP1_SMN_C2PMSG_51 329 - #define MP1_SMN_C2PMSG_51__CONTENT__SHIFT 0x0 330 - #define MP1_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL 331 - //MP1_SMN_C2PMSG_52 332 - #define MP1_SMN_C2PMSG_52__CONTENT__SHIFT 0x0 333 - #define MP1_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL 334 - //MP1_SMN_C2PMSG_53 335 - #define MP1_SMN_C2PMSG_53__CONTENT__SHIFT 0x0 336 - #define MP1_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL 337 - //MP1_SMN_C2PMSG_54 338 - #define MP1_SMN_C2PMSG_54__CONTENT__SHIFT 0x0 339 - #define MP1_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL 340 - //MP1_SMN_C2PMSG_55 341 - #define MP1_SMN_C2PMSG_55__CONTENT__SHIFT 0x0 342 - #define MP1_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL 343 - //MP1_SMN_C2PMSG_56 344 - #define MP1_SMN_C2PMSG_56__CONTENT__SHIFT 0x0 345 - #define MP1_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL 346 - //MP1_SMN_C2PMSG_57 347 - #define MP1_SMN_C2PMSG_57__CONTENT__SHIFT 0x0 348 - #define MP1_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL 349 - //MP1_SMN_C2PMSG_58 350 - #define MP1_SMN_C2PMSG_58__CONTENT__SHIFT 0x0 351 - #define MP1_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL 352 - //MP1_SMN_C2PMSG_59 353 - #define MP1_SMN_C2PMSG_59__CONTENT__SHIFT 0x0 354 - #define MP1_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL 355 - //MP1_SMN_C2PMSG_60 356 - #define MP1_SMN_C2PMSG_60__CONTENT__SHIFT 0x0 357 - #define MP1_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL 358 - //MP1_SMN_C2PMSG_61 359 - #define MP1_SMN_C2PMSG_61__CONTENT__SHIFT 0x0 360 - #define MP1_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL 361 - //MP1_SMN_C2PMSG_62 362 - #define MP1_SMN_C2PMSG_62__CONTENT__SHIFT 0x0 363 - #define MP1_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL 364 - //MP1_SMN_C2PMSG_63 365 - #define MP1_SMN_C2PMSG_63__CONTENT__SHIFT 0x0 366 - #define MP1_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL 367 - //MP1_SMN_C2PMSG_64 368 - #define MP1_SMN_C2PMSG_64__CONTENT__SHIFT 0x0 369 - #define MP1_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL 370 - //MP1_SMN_C2PMSG_65 371 - #define MP1_SMN_C2PMSG_65__CONTENT__SHIFT 0x0 372 - #define MP1_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL 373 - //MP1_SMN_C2PMSG_66 374 - #define MP1_SMN_C2PMSG_66__CONTENT__SHIFT 0x0 375 - #define MP1_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL 376 - //MP1_SMN_C2PMSG_67 377 - #define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x0 378 - #define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL 379 - //MP1_SMN_C2PMSG_68 380 - #define MP1_SMN_C2PMSG_68__CONTENT__SHIFT 0x0 381 - #define MP1_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL 382 - //MP1_SMN_C2PMSG_69 383 - #define MP1_SMN_C2PMSG_69__CONTENT__SHIFT 0x0 384 - #define MP1_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL 385 - //MP1_SMN_C2PMSG_70 386 - #define MP1_SMN_C2PMSG_70__CONTENT__SHIFT 0x0 387 - #define MP1_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL 388 - //MP1_SMN_C2PMSG_71 389 - #define MP1_SMN_C2PMSG_71__CONTENT__SHIFT 0x0 390 - #define MP1_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL 391 - //MP1_SMN_C2PMSG_72 392 - #define MP1_SMN_C2PMSG_72__CONTENT__SHIFT 0x0 393 - #define MP1_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL 394 - //MP1_SMN_C2PMSG_73 395 - #define MP1_SMN_C2PMSG_73__CONTENT__SHIFT 0x0 396 - #define MP1_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL 397 - //MP1_SMN_C2PMSG_74 398 - #define MP1_SMN_C2PMSG_74__CONTENT__SHIFT 0x0 399 - #define MP1_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL 400 - //MP1_SMN_C2PMSG_75 401 - #define MP1_SMN_C2PMSG_75__CONTENT__SHIFT 0x0 402 - #define MP1_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL 403 - //MP1_SMN_C2PMSG_76 404 - #define MP1_SMN_C2PMSG_76__CONTENT__SHIFT 0x0 405 - #define MP1_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL 406 - //MP1_SMN_C2PMSG_77 407 - #define MP1_SMN_C2PMSG_77__CONTENT__SHIFT 0x0 408 - #define MP1_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL 409 - //MP1_SMN_C2PMSG_78 410 - #define MP1_SMN_C2PMSG_78__CONTENT__SHIFT 0x0 411 - #define MP1_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL 412 - //MP1_SMN_C2PMSG_79 413 - #define MP1_SMN_C2PMSG_79__CONTENT__SHIFT 0x0 414 - #define MP1_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL 415 - //MP1_SMN_C2PMSG_80 416 - #define MP1_SMN_C2PMSG_80__CONTENT__SHIFT 0x0 417 - #define MP1_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL 418 - //MP1_SMN_C2PMSG_81 419 - #define MP1_SMN_C2PMSG_81__CONTENT__SHIFT 0x0 420 - #define MP1_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL 421 - //MP1_SMN_C2PMSG_82 422 - #define MP1_SMN_C2PMSG_82__CONTENT__SHIFT 0x0 423 - #define MP1_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL 424 - //MP1_SMN_C2PMSG_83 425 - #define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x0 426 - #define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL 427 - //MP1_SMN_C2PMSG_84 428 - #define MP1_SMN_C2PMSG_84__CONTENT__SHIFT 0x0 429 - #define MP1_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL 430 - //MP1_SMN_C2PMSG_85 431 - #define MP1_SMN_C2PMSG_85__CONTENT__SHIFT 0x0 432 - #define MP1_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL 433 - //MP1_SMN_C2PMSG_86 434 - #define MP1_SMN_C2PMSG_86__CONTENT__SHIFT 0x0 435 - #define MP1_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL 436 - //MP1_SMN_C2PMSG_87 437 - #define MP1_SMN_C2PMSG_87__CONTENT__SHIFT 0x0 438 - #define MP1_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL 439 - //MP1_SMN_C2PMSG_88 440 - #define MP1_SMN_C2PMSG_88__CONTENT__SHIFT 0x0 441 - #define MP1_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL 442 - //MP1_SMN_C2PMSG_89 443 - #define MP1_SMN_C2PMSG_89__CONTENT__SHIFT 0x0 444 - #define MP1_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL 445 - //MP1_SMN_C2PMSG_90 446 - #define MP1_SMN_C2PMSG_90__CONTENT__SHIFT 0x0 447 - #define MP1_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL 448 - //MP1_SMN_C2PMSG_91 449 - #define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x0 450 - #define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL 451 - //MP1_SMN_C2PMSG_92 452 - #define MP1_SMN_C2PMSG_92__CONTENT__SHIFT 0x0 453 - #define MP1_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL 454 - //MP1_SMN_C2PMSG_93 455 - #define MP1_SMN_C2PMSG_93__CONTENT__SHIFT 0x0 456 - #define MP1_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL 457 - //MP1_SMN_C2PMSG_94 458 - #define MP1_SMN_C2PMSG_94__CONTENT__SHIFT 0x0 459 - #define MP1_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL 460 - //MP1_SMN_C2PMSG_95 461 - #define MP1_SMN_C2PMSG_95__CONTENT__SHIFT 0x0 462 - #define MP1_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL 463 - //MP1_SMN_C2PMSG_96 464 - #define MP1_SMN_C2PMSG_96__CONTENT__SHIFT 0x0 465 - #define MP1_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL 466 - //MP1_SMN_C2PMSG_97 467 - #define MP1_SMN_C2PMSG_97__CONTENT__SHIFT 0x0 468 - #define MP1_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL 469 - //MP1_SMN_C2PMSG_98 470 - #define MP1_SMN_C2PMSG_98__CONTENT__SHIFT 0x0 471 - #define MP1_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL 472 - //MP1_SMN_C2PMSG_99 473 - #define MP1_SMN_C2PMSG_99__CONTENT__SHIFT 0x0 474 - #define MP1_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL 475 - //MP1_SMN_C2PMSG_100 476 - #define MP1_SMN_C2PMSG_100__CONTENT__SHIFT 0x0 477 - #define MP1_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL 478 - //MP1_SMN_C2PMSG_101 479 - #define MP1_SMN_C2PMSG_101__CONTENT__SHIFT 0x0 480 - #define MP1_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL 481 - //MP1_SMN_C2PMSG_102 482 - #define MP1_SMN_C2PMSG_102__CONTENT__SHIFT 0x0 483 - #define MP1_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL 484 - //MP1_SMN_C2PMSG_103 485 - #define MP1_SMN_C2PMSG_103__CONTENT__SHIFT 0x0 486 - #define MP1_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL 487 - //MP1_SMN_IH_CREDIT 488 - #define MP1_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0 489 - #define MP1_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10 490 - #define MP1_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L 491 - #define MP1_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L 492 - //MP1_SMN_IH_SW_INT 493 - #define MP1_SMN_IH_SW_INT__ID__SHIFT 0x0 494 - #define MP1_SMN_IH_SW_INT__VALID__SHIFT 0x8 495 - #define MP1_SMN_IH_SW_INT__ID_MASK 0x000000FFL 496 - #define MP1_SMN_IH_SW_INT__VALID_MASK 0x00000100L 497 - //MP1_SMN_IH_SW_INT_CTRL 498 - #define MP1_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0 499 - #define MP1_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8 500 - #define MP1_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L 501 - #define MP1_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L 502 - //MP1_SMN_FPS_CNT 503 - #define MP1_SMN_FPS_CNT__COUNT__SHIFT 0x0 504 - #define MP1_SMN_FPS_CNT__COUNT_MASK 0xFFFFFFFFL 505 - //MP1_SMN_EXT_SCRATCH0 506 - #define MP1_SMN_EXT_SCRATCH0__DATA__SHIFT 0x0 507 - #define MP1_SMN_EXT_SCRATCH0__DATA_MASK 0xFFFFFFFFL 508 - //MP1_SMN_EXT_SCRATCH1 509 - #define MP1_SMN_EXT_SCRATCH1__DATA__SHIFT 0x0 510 - #define MP1_SMN_EXT_SCRATCH1__DATA_MASK 0xFFFFFFFFL 511 - //MP1_SMN_EXT_SCRATCH2 512 - #define MP1_SMN_EXT_SCRATCH2__DATA__SHIFT 0x0 513 - #define MP1_SMN_EXT_SCRATCH2__DATA_MASK 0xFFFFFFFFL 514 - //MP1_SMN_EXT_SCRATCH3 515 - #define MP1_SMN_EXT_SCRATCH3__DATA__SHIFT 0x0 516 - #define MP1_SMN_EXT_SCRATCH3__DATA_MASK 0xFFFFFFFFL 517 - //MP1_SMN_EXT_SCRATCH4 518 - #define MP1_SMN_EXT_SCRATCH4__DATA__SHIFT 0x0 519 - #define MP1_SMN_EXT_SCRATCH4__DATA_MASK 0xFFFFFFFFL 520 - //MP1_SMN_EXT_SCRATCH5 521 - #define MP1_SMN_EXT_SCRATCH5__DATA__SHIFT 0x0 522 - #define MP1_SMN_EXT_SCRATCH5__DATA_MASK 0xFFFFFFFFL 523 - //MP1_SMN_EXT_SCRATCH6 524 - #define MP1_SMN_EXT_SCRATCH6__DATA__SHIFT 0x0 525 - #define MP1_SMN_EXT_SCRATCH6__DATA_MASK 0xFFFFFFFFL 526 - //MP1_SMN_EXT_SCRATCH7 527 - #define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0 528 - #define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL 529 - 530 - 531 - #endif
+1
drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
··· 26 26 #include "amdgpu_smu.h" 27 27 28 28 #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF 29 + #define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x03 29 30 #define SMU13_DRIVER_IF_VERSION_ALDE 0x07 30 31 31 32 /* MP Apertures */
-57
drivers/gpu/drm/amd/pm/inc/smu_v13_0_1.h
··· 1 - /* 2 - * Copyright 2020 Advanced Micro Devices, Inc. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 - * OTHER DEALINGS IN THE SOFTWARE. 21 - * 22 - */ 23 - #ifndef __SMU_V13_0_1_H__ 24 - #define __SMU_V13_0_1_H__ 25 - 26 - #include "amdgpu_smu.h" 27 - 28 - #define SMU13_0_1_DRIVER_IF_VERSION_INV 0xFFFFFFFF 29 - #define SMU13_0_1_DRIVER_IF_VERSION_YELLOW_CARP 0x3 30 - 31 - /* MP Apertures */ 32 - #define MP0_Public 0x03800000 33 - #define MP0_SRAM 0x03900000 34 - #define MP1_Public 0x03b00000 35 - #define MP1_SRAM 0x03c00004 36 - 37 - /* address block */ 38 - #define smnMP1_FIRMWARE_FLAGS 0x3010024 39 - 40 - 41 - #if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) 42 - 43 - int smu_v13_0_1_check_fw_status(struct smu_context *smu); 44 - 45 - int smu_v13_0_1_check_fw_version(struct smu_context *smu); 46 - 47 - int smu_v13_0_1_fini_smc_tables(struct smu_context *smu); 48 - 49 - int smu_v13_0_1_get_vbios_bootup_values(struct smu_context *smu); 50 - 51 - int smu_v13_0_1_set_default_dpm_tables(struct smu_context *smu); 52 - 53 - int smu_v13_0_1_set_driver_table_location(struct smu_context *smu); 54 - 55 - int smu_v13_0_1_gfx_off_control(struct smu_context *smu, bool enable); 56 - #endif 57 - #endif
+1
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
··· 1528 1528 case CHIP_SIENNA_CICHLID: 1529 1529 case CHIP_NAVY_FLOUNDER: 1530 1530 case CHIP_DIMGREY_CAVEFISH: 1531 + case CHIP_BEIGE_GOBY: 1531 1532 if (amdgpu_runtime_pm == 2) 1532 1533 ret = smu_cmn_send_smc_msg_with_param(smu, 1533 1534 SMU_MSG_EnterBaco,
+1 -1
drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
··· 23 23 # Makefile for the 'smu manager' sub-component of powerplay. 24 24 # It provides the smu management services for the driver. 25 25 26 - SMU13_MGR = smu_v13_0.o aldebaran_ppt.o smu_v13_0_1.o yellow_carp_ppt.o 26 + SMU13_MGR = smu_v13_0.o aldebaran_ppt.o yellow_carp_ppt.o 27 27 28 28 AMD_SWSMU_SMU13MGR = $(addprefix $(AMD_SWSMU_PATH)/smu13/,$(SMU13_MGR)) 29 29
+24
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
··· 210 210 case CHIP_ALDEBARAN: 211 211 smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE; 212 212 break; 213 + case CHIP_YELLOW_CARP: 214 + smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_YELLOW_CARP; 215 + break; 213 216 default: 214 217 dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type); 215 218 smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_INV; ··· 694 691 695 692 failed: 696 693 mutex_unlock(&feature->mutex); 694 + return ret; 695 + } 696 + 697 + int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable) 698 + { 699 + int ret = 0; 700 + struct amdgpu_device *adev = smu->adev; 701 + 702 + switch (adev->asic_type) { 703 + case CHIP_YELLOW_CARP: 704 + if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 705 + return 0; 706 + if (enable) 707 + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); 708 + else 709 + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); 710 + break; 711 + default: 712 + break; 713 + } 714 + 697 715 return ret; 698 716 } 699 717
-311
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_1.c
··· 1 - /* 2 - * Copyright 2020 Advanced Micro Devices, Inc. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 - * OTHER DEALINGS IN THE SOFTWARE. 21 - */ 22 - 23 - //#include <linux/reboot.h> 24 - 25 - #define SWSMU_CODE_LAYER_L3 26 - 27 - #include "amdgpu.h" 28 - #include "amdgpu_smu.h" 29 - #include "smu_v13_0_1.h" 30 - #include "soc15_common.h" 31 - #include "smu_cmn.h" 32 - #include "atomfirmware.h" 33 - #include "amdgpu_atomfirmware.h" 34 - #include "amdgpu_atombios.h" 35 - #include "atom.h" 36 - 37 - #include "asic_reg/mp/mp_13_0_1_offset.h" 38 - #include "asic_reg/mp/mp_13_0_1_sh_mask.h" 39 - 40 - /* 41 - * DO NOT use these for err/warn/info/debug messages. 42 - * Use dev_err, dev_warn, dev_info and dev_dbg instead. 43 - * They are more MGPU friendly. 44 - */ 45 - #undef pr_err 46 - #undef pr_warn 47 - #undef pr_info 48 - #undef pr_debug 49 - 50 - int smu_v13_0_1_check_fw_status(struct smu_context *smu) 51 - { 52 - struct amdgpu_device *adev = smu->adev; 53 - uint32_t mp1_fw_flags; 54 - 55 - mp1_fw_flags = RREG32_PCIE(MP1_Public | 56 - (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); 57 - 58 - if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> 59 - MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) 60 - return 0; 61 - 62 - return -EIO; 63 - } 64 - 65 - int smu_v13_0_1_check_fw_version(struct smu_context *smu) 66 - { 67 - uint32_t if_version = 0xff, smu_version = 0xff; 68 - uint16_t smu_major; 69 - uint8_t smu_minor, smu_debug; 70 - int ret = 0; 71 - 72 - ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); 73 - if (ret) 74 - return ret; 75 - 76 - smu_major = (smu_version >> 16) & 0xffff; 77 - smu_minor = (smu_version >> 8) & 0xff; 78 - smu_debug = (smu_version >> 0) & 0xff; 79 - 80 - switch (smu->adev->asic_type) { 81 - case CHIP_YELLOW_CARP: 82 - smu->smc_driver_if_version = SMU13_0_1_DRIVER_IF_VERSION_YELLOW_CARP; 83 - break; 84 - 85 - default: 86 - dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type); 87 - smu->smc_driver_if_version = SMU13_0_1_DRIVER_IF_VERSION_INV; 88 - break; 89 - } 90 - 91 - dev_info(smu->adev->dev, "smu fw reported version = 0x%08x (%d.%d.%d)\n", 92 - smu_version, smu_major, smu_minor, smu_debug); 93 - 94 - /* 95 - * 1. if_version mismatch is not critical as our fw is designed 96 - * to be backward compatible. 97 - * 2. New fw usually brings some optimizations. But that's visible 98 - * only on the paired driver. 99 - * Considering above, we just leave user a warning message instead 100 - * of halt driver loading. 101 - */ 102 - if (if_version != smu->smc_driver_if_version) { 103 - dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " 104 - "smu fw version = 0x%08x (%d.%d.%d)\n", 105 - smu->smc_driver_if_version, if_version, 106 - smu_version, smu_major, smu_minor, smu_debug); 107 - dev_warn(smu->adev->dev, "SMU driver if version not matched\n"); 108 - } 109 - 110 - return ret; 111 - } 112 - 113 - int smu_v13_0_1_fini_smc_tables(struct smu_context *smu) 114 - { 115 - struct smu_table_context *smu_table = &smu->smu_table; 116 - 117 - kfree(smu_table->clocks_table); 118 - smu_table->clocks_table = NULL; 119 - 120 - kfree(smu_table->metrics_table); 121 - smu_table->metrics_table = NULL; 122 - 123 - kfree(smu_table->watermarks_table); 124 - smu_table->watermarks_table = NULL; 125 - 126 - return 0; 127 - } 128 - 129 - static int smu_v13_0_1_atom_get_smu_clockinfo(struct amdgpu_device *adev, 130 - uint8_t clk_id, 131 - uint8_t syspll_id, 132 - uint32_t *clk_freq) 133 - { 134 - struct atom_get_smu_clock_info_parameters_v3_1 input = {0}; 135 - struct atom_get_smu_clock_info_output_parameters_v3_1 *output; 136 - int ret, index; 137 - 138 - input.clk_id = clk_id; 139 - input.syspll_id = syspll_id; 140 - input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; 141 - index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1, 142 - getsmuclockinfo); 143 - 144 - ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index, 145 - (uint32_t *)&input); 146 - if (ret) 147 - return -EINVAL; 148 - 149 - output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input; 150 - *clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000; 151 - 152 - return 0; 153 - } 154 - 155 - int smu_v13_0_1_get_vbios_bootup_values(struct smu_context *smu) 156 - { 157 - int ret, index; 158 - uint16_t size; 159 - uint8_t frev, crev; 160 - struct atom_common_table_header *header; 161 - struct atom_firmware_info_v3_4 *v_3_4; 162 - struct atom_firmware_info_v3_3 *v_3_3; 163 - struct atom_firmware_info_v3_1 *v_3_1; 164 - 165 - index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, 166 - firmwareinfo); 167 - 168 - ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev, 169 - (uint8_t **)&header); 170 - if (ret) 171 - return ret; 172 - 173 - if (header->format_revision != 3) { 174 - dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu13\n"); 175 - return -EINVAL; 176 - } 177 - 178 - switch (header->content_revision) { 179 - case 0: 180 - case 1: 181 - case 2: 182 - v_3_1 = (struct atom_firmware_info_v3_1 *)header; 183 - smu->smu_table.boot_values.revision = v_3_1->firmware_revision; 184 - smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz; 185 - smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz; 186 - smu->smu_table.boot_values.socclk = 0; 187 - smu->smu_table.boot_values.dcefclk = 0; 188 - smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv; 189 - smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv; 190 - smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv; 191 - smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv; 192 - smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id; 193 - break; 194 - case 3: 195 - v_3_3 = (struct atom_firmware_info_v3_3 *)header; 196 - smu->smu_table.boot_values.revision = v_3_3->firmware_revision; 197 - smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz; 198 - smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz; 199 - smu->smu_table.boot_values.socclk = 0; 200 - smu->smu_table.boot_values.dcefclk = 0; 201 - smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv; 202 - smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv; 203 - smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv; 204 - smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv; 205 - smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id; 206 - break; 207 - case 4: 208 - default: 209 - v_3_4 = (struct atom_firmware_info_v3_4 *)header; 210 - smu->smu_table.boot_values.revision = v_3_4->firmware_revision; 211 - smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz; 212 - smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz; 213 - smu->smu_table.boot_values.socclk = 0; 214 - smu->smu_table.boot_values.dcefclk = 0; 215 - smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv; 216 - smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv; 217 - smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv; 218 - smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv; 219 - smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id; 220 - break; 221 - } 222 - 223 - smu->smu_table.boot_values.format_revision = header->format_revision; 224 - smu->smu_table.boot_values.content_revision = header->content_revision; 225 - 226 - smu_v13_0_1_atom_get_smu_clockinfo(smu->adev, 227 - (uint8_t)SMU11_SYSPLL0_SOCCLK_ID, 228 - (uint8_t)0, 229 - &smu->smu_table.boot_values.socclk); 230 - 231 - smu_v13_0_1_atom_get_smu_clockinfo(smu->adev, 232 - (uint8_t)SMU11_SYSPLL0_DCEFCLK_ID, 233 - (uint8_t)0, 234 - &smu->smu_table.boot_values.dcefclk); 235 - 236 - smu_v13_0_1_atom_get_smu_clockinfo(smu->adev, 237 - (uint8_t)SMU11_SYSPLL0_ECLK_ID, 238 - (uint8_t)0, 239 - &smu->smu_table.boot_values.eclk); 240 - 241 - smu_v13_0_1_atom_get_smu_clockinfo(smu->adev, 242 - (uint8_t)SMU11_SYSPLL0_VCLK_ID, 243 - (uint8_t)0, 244 - &smu->smu_table.boot_values.vclk); 245 - 246 - smu_v13_0_1_atom_get_smu_clockinfo(smu->adev, 247 - (uint8_t)SMU11_SYSPLL0_DCLK_ID, 248 - (uint8_t)0, 249 - &smu->smu_table.boot_values.dclk); 250 - 251 - if ((smu->smu_table.boot_values.format_revision == 3) && 252 - (smu->smu_table.boot_values.content_revision >= 2)) 253 - smu_v13_0_1_atom_get_smu_clockinfo(smu->adev, 254 - (uint8_t)SMU11_SYSPLL1_0_FCLK_ID, 255 - (uint8_t)SMU11_SYSPLL1_2_ID, 256 - &smu->smu_table.boot_values.fclk); 257 - 258 - return 0; 259 - } 260 - 261 - int smu_v13_0_1_set_default_dpm_tables(struct smu_context *smu) 262 - { 263 - struct smu_table_context *smu_table = &smu->smu_table; 264 - 265 - return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); 266 - } 267 - 268 - int smu_v13_0_1_set_driver_table_location(struct smu_context *smu) 269 - { 270 - struct smu_table *driver_table = &smu->smu_table.driver_table; 271 - int ret = 0; 272 - 273 - if (!driver_table->mc_address) 274 - return 0; 275 - 276 - ret = smu_cmn_send_smc_msg_with_param(smu, 277 - SMU_MSG_SetDriverDramAddrHigh, 278 - upper_32_bits(driver_table->mc_address), 279 - NULL); 280 - 281 - if (ret) 282 - return ret; 283 - 284 - ret = smu_cmn_send_smc_msg_with_param(smu, 285 - SMU_MSG_SetDriverDramAddrLow, 286 - lower_32_bits(driver_table->mc_address), 287 - NULL); 288 - 289 - return ret; 290 - } 291 - 292 - int smu_v13_0_1_gfx_off_control(struct smu_context *smu, bool enable) 293 - { 294 - int ret = 0; 295 - struct amdgpu_device *adev = smu->adev; 296 - 297 - switch (adev->asic_type) { 298 - case CHIP_YELLOW_CARP: 299 - if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) 300 - return 0; 301 - if (enable) 302 - ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); 303 - else 304 - ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); 305 - break; 306 - default: 307 - break; 308 - } 309 - 310 - return ret; 311 - }
+34 -15
drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
··· 25 25 26 26 #include "amdgpu.h" 27 27 #include "amdgpu_smu.h" 28 - #include "smu_v13_0_1.h" 28 + #include "smu_v13_0.h" 29 29 #include "smu13_driver_if_yellow_carp.h" 30 30 #include "yellow_carp_ppt.h" 31 31 #include "smu_v13_0_1_ppsmc.h" ··· 186 186 return -ENOMEM; 187 187 } 188 188 189 + static int yellow_carp_fini_smc_tables(struct smu_context *smu) 190 + { 191 + struct smu_table_context *smu_table = &smu->smu_table; 192 + 193 + kfree(smu_table->clocks_table); 194 + smu_table->clocks_table = NULL; 195 + 196 + kfree(smu_table->metrics_table); 197 + smu_table->metrics_table = NULL; 198 + 199 + kfree(smu_table->watermarks_table); 200 + smu_table->watermarks_table = NULL; 201 + 202 + return 0; 203 + } 204 + 189 205 static int yellow_carp_system_features_control(struct smu_context *smu, bool en) 190 206 { 191 207 struct smu_feature *feature = &smu->smu_feature; ··· 298 282 if (index < 0) 299 283 return index == -EACCES ? 0 : index; 300 284 301 - mutex_lock(&smu->message_lock); 302 - 303 - ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type); 304 - 305 - mutex_unlock(&smu->message_lock); 306 - 307 - mdelay(10); 285 + ret = smu_cmn_send_smc_msg_with_param(smu, (uint16_t)index, type, NULL); 286 + if (ret) 287 + dev_err(smu->adev->dev, "Failed to mode reset!\n"); 308 288 309 289 return ret; 310 290 } ··· 669 657 *table = (void *)gpu_metrics; 670 658 671 659 return sizeof(struct gpu_metrics_v2_1); 660 + } 661 + 662 + static int yellow_carp_set_default_dpm_tables(struct smu_context *smu) 663 + { 664 + struct smu_table_context *smu_table = &smu->smu_table; 665 + 666 + return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); 672 667 } 673 668 674 669 static int yellow_carp_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, ··· 1222 1203 } 1223 1204 1224 1205 static const struct pptable_funcs yellow_carp_ppt_funcs = { 1225 - .check_fw_status = smu_v13_0_1_check_fw_status, 1226 - .check_fw_version = smu_v13_0_1_check_fw_version, 1206 + .check_fw_status = smu_v13_0_check_fw_status, 1207 + .check_fw_version = smu_v13_0_check_fw_version, 1227 1208 .init_smc_tables = yellow_carp_init_smc_tables, 1228 - .fini_smc_tables = smu_v13_0_1_fini_smc_tables, 1229 - .get_vbios_bootup_values = smu_v13_0_1_get_vbios_bootup_values, 1209 + .fini_smc_tables = yellow_carp_fini_smc_tables, 1210 + .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values, 1230 1211 .system_features_control = yellow_carp_system_features_control, 1231 1212 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, 1232 1213 .send_smc_msg = smu_cmn_send_smc_msg, 1233 1214 .dpm_set_vcn_enable = yellow_carp_dpm_set_vcn_enable, 1234 1215 .dpm_set_jpeg_enable = yellow_carp_dpm_set_jpeg_enable, 1235 - .set_default_dpm_table = smu_v13_0_1_set_default_dpm_tables, 1216 + .set_default_dpm_table = yellow_carp_set_default_dpm_tables, 1236 1217 .read_sensor = yellow_carp_read_sensor, 1237 1218 .is_dpm_running = yellow_carp_is_dpm_running, 1238 1219 .set_watermarks_table = yellow_carp_set_watermarks_table, ··· 1241 1222 .get_gpu_metrics = yellow_carp_get_gpu_metrics, 1242 1223 .get_enabled_mask = smu_cmn_get_enabled_32_bits_mask, 1243 1224 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, 1244 - .set_driver_table_location = smu_v13_0_1_set_driver_table_location, 1245 - .gfx_off_control = smu_v13_0_1_gfx_off_control, 1225 + .set_driver_table_location = smu_v13_0_set_driver_table_location, 1226 + .gfx_off_control = smu_v13_0_gfx_off_control, 1246 1227 .post_init = yellow_carp_post_smu_init, 1247 1228 .mode2_reset = yellow_carp_mode2_reset, 1248 1229 .get_dpm_ultimate_freq = yellow_carp_get_dpm_ultimate_freq,
+1 -4
drivers/gpu/drm/i915/gt/gen8_ppgtt.c
··· 303 303 __i915_gem_object_pin_pages(pt->base); 304 304 i915_gem_object_make_unshrinkable(pt->base); 305 305 306 - if (lvl || 307 - gen8_pt_count(*start, end) < I915_PDES || 308 - intel_vgpu_active(vm->i915)) 309 - fill_px(pt, vm->scratch[lvl]->encode); 306 + fill_px(pt, vm->scratch[lvl]->encode); 310 307 311 308 spin_lock(&pd->lock); 312 309 if (likely(!pd->entry[idx])) {
+1 -1
drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
··· 348 348 if (intel_has_pending_fb_unpin(ggtt->vm.i915)) 349 349 return ERR_PTR(-EAGAIN); 350 350 351 - return ERR_PTR(-EDEADLK); 351 + return ERR_PTR(-ENOBUFS); 352 352 } 353 353 354 354 int __i915_vma_pin_fence(struct i915_vma *vma)
+1 -3
drivers/gpu/drm/panel/panel-novatek-nt35510.c
··· 706 706 if (ret) 707 707 return ret; 708 708 709 - ret = nt35510_read_id(nt); 710 - if (ret) 711 - return ret; 709 + nt35510_read_id(nt); 712 710 713 711 /* Set up stuff in manufacturer control, page 1 */ 714 712 ret = nt35510_send_long(nt, dsi, MCS_CMD_MAUCCTR,
+1 -1
drivers/gpu/drm/qxl/qxl_ttm.c
··· 127 127 struct qxl_bo *qbo; 128 128 struct qxl_device *qdev; 129 129 130 - if (!qxl_ttm_bo_is_qxl_bo(bo)) 130 + if (!qxl_ttm_bo_is_qxl_bo(bo) || !bo->resource) 131 131 return; 132 132 qbo = to_qxl_bo(bo); 133 133 qdev = to_qxl(qbo->tbo.base.dev);
+3
drivers/gpu/drm/ttm/ttm_range_manager.c
··· 181 181 struct drm_mm *mm = &rman->mm; 182 182 int ret; 183 183 184 + if (!man) 185 + return 0; 186 + 184 187 ttm_resource_manager_set_used(man, false); 185 188 186 189 ret = ttm_resource_manager_evict_all(bdev, man);
+1
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 36 36 #include <drm/drm_ioctl.h> 37 37 #include <drm/drm_sysfs.h> 38 38 #include <drm/ttm/ttm_bo_driver.h> 39 + #include <drm/ttm/ttm_range_manager.h> 39 40 #include <drm/ttm/ttm_placement.h> 40 41 #include <generated/utsrelease.h> 41 42
-1
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
··· 354 354 ttm_bo_unpin(bo); 355 355 ttm_bo_unreserve(bo); 356 356 357 - ttm_bo_unpin(batch->otable_bo); 358 357 ttm_bo_put(batch->otable_bo); 359 358 batch->otable_bo = NULL; 360 359 }
+5 -7
drivers/video/fbdev/core/fbmem.c
··· 970 970 fb_var_to_videomode(&mode2, &info->var); 971 971 /* make sure we don't delete the videomode of current var */ 972 972 ret = fb_mode_is_equal(&mode1, &mode2); 973 - 974 - if (!ret) 975 - fbcon_mode_deleted(info, &mode1); 976 - 977 - if (!ret) 978 - fb_delete_videomode(&mode1, &info->modelist); 979 - 973 + if (!ret) { 974 + ret = fbcon_mode_deleted(info, &mode1); 975 + if (!ret) 976 + fb_delete_videomode(&mode1, &info->modelist); 977 + } 980 978 981 979 return ret ? -EINVAL : 0; 982 980 }