Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-next-2020-02-07' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
"Just some fixes for this merge window: the tegra changes fix some
regressions in the merge, nouveau has a few modesetting fixes.

The amdgpu fixes are bit bigger, but they contain a couple of weeks of
fixes, and don't seem to contain anything that isn't really a fix.

Summary:

tegra:
- merge window regression fixes

nouveau:
- couple of volta/turing modesetting fixes

amdgpu:
- EDC fixes for Arcturus
- GDDR6 memory training fixe
- Fix for reading gfx clockgating registers while in GFXOFF state
- i2c freq fixes
- Misc display fixes
- TLB invalidation fix when using semaphores
- VCN 2.5 instancing fixes
- Switch raven1 gfxoff to a blacklist
- Coreboot workaround for KV/KB
- Root cause dongle fixes for display and revert workaround
- Enable GPU reset for renoir and navi
- Navi overclocking fixes
- Fix up confusing warnings in display clock validation on raven

amdkfd:
- SDMA fix

radeon:
- Misc LUT fixes"

* tag 'drm-next-2020-02-07' of git://anongit.freedesktop.org/drm/drm: (90 commits)
gpu: host1x: Set DMA direction only for DMA-mapped buffer objects
drm/tegra: Reuse IOVA mapping where possible
drm/tegra: Relax IOMMU usage criteria on old Tegra
drm/amd/dm/mst: Ignore payload update failures
drm/amdgpu: update default voltage for boot od table for navi1x
drm/amdgpu/smu10: fix smu10_get_clock_by_type_with_voltage
drm/amdgpu/smu10: fix smu10_get_clock_by_type_with_latency
drm/amdgpu/display: handle multiple numbers of fclks in dcn_calcs.c (v2)
drm/amdgpu: fetch default VDDC curve voltages (v2)
drm/amdgpu/smu_v11_0: Correct behavior of restoring default tables (v2)
drm/amdgpu/navi10: add OD_RANGE for navi overclocking
drm/amdgpu/navi: fix index for OD MCLK
drm/amd/display: Fix HW/SW state mismatch
drm/amd/display: Fix a typo when computing dsc configuration
drm/amd/powerplay: fix navi10 system intermittent reboot issue V2
drm/amdkfd: Fix a bug in SDMA RLC queue counting under HWS mode
drm/amd/display: Only enable cursor on pipes that need it
drm/nouveau/kms/gv100-: avoid sending a core update until the first modeset
drm/nouveau/kms/gv100-: move window ownership setup into modesetting path
drm/nouveau/disp/gv100-: halt NV_PDISP_FE_RM_INTR_STAT_CTRL_DISP_ERROR storms
...

+4522 -954
+1
drivers/gpu/drm/amd/amdgpu/Makefile
··· 120 120 amdgpu_rlc.o \ 121 121 gfx_v8_0.o \ 122 122 gfx_v9_0.o \ 123 + gfx_v9_4.o \ 123 124 gfx_v10_0.o 124 125 125 126 # add async DMA block
+4
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 1009 1009 1010 1010 #define AMDGPU_REGS_IDX (1<<0) 1011 1011 #define AMDGPU_REGS_NO_KIQ (1<<1) 1012 + #define AMDGPU_REGS_KIQ (1<<2) 1012 1013 1013 1014 #define RREG32_NO_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ) 1014 1015 #define WREG32_NO_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ) 1016 + 1017 + #define RREG32_KIQ(reg) amdgpu_mm_rreg(adev, (reg), AMDGPU_REGS_KIQ) 1018 + #define WREG32_KIQ(reg, v) amdgpu_mm_wreg(adev, (reg), (v), AMDGPU_REGS_KIQ) 1015 1019 1016 1020 #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg)) 1017 1021 #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v))
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
··· 527 527 enum amd_powergating_state state) 528 528 { 529 529 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 530 - bool enable = state == AMD_PG_STATE_GATE ? true : false; 530 + bool enable = (state == AMD_PG_STATE_GATE); 531 531 532 532 if (adev->powerplay.pp_funcs && 533 533 adev->powerplay.pp_funcs->set_powergating_by_smu)
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
··· 2129 2129 return -ENOMEM; 2130 2130 2131 2131 mutex_init(&(*mem)->lock); 2132 + INIT_LIST_HEAD(&(*mem)->bo_va_list); 2132 2133 (*mem)->bo = amdgpu_bo_ref(gws_bo); 2133 2134 (*mem)->domain = AMDGPU_GEM_DOMAIN_GWS; 2134 2135 (*mem)->process_info = process_info;
+123 -108
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
··· 42 42 [AMDGPU_HW_IP_VCN_JPEG] = 1, 43 43 }; 44 44 45 - static int amdgpu_ctx_total_num_entities(void) 46 - { 47 - unsigned i, num_entities = 0; 48 - 49 - for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) 50 - num_entities += amdgpu_ctx_num_entities[i]; 51 - 52 - return num_entities; 53 - } 54 - 55 45 static int amdgpu_ctx_priority_permit(struct drm_file *filp, 56 46 enum drm_sched_priority priority) 57 47 { 48 + if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) 49 + return -EINVAL; 50 + 58 51 /* NORMAL and below are accessible by everyone */ 59 52 if (priority <= DRM_SCHED_PRIORITY_NORMAL) 60 53 return 0; ··· 61 68 return -EACCES; 62 69 } 63 70 64 - static int amdgpu_ctx_init(struct amdgpu_device *adev, 65 - enum drm_sched_priority priority, 66 - struct drm_file *filp, 67 - struct amdgpu_ctx *ctx) 71 + static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring) 68 72 { 69 - unsigned num_entities = amdgpu_ctx_total_num_entities(); 70 - unsigned i, j; 73 + struct amdgpu_device *adev = ctx->adev; 74 + struct amdgpu_ctx_entity *entity; 75 + struct drm_gpu_scheduler **scheds = NULL, *sched = NULL; 76 + unsigned num_scheds = 0; 77 + enum drm_sched_priority priority; 71 78 int r; 72 79 73 - if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) 74 - return -EINVAL; 80 + entity = kcalloc(1, offsetof(typeof(*entity), fences[amdgpu_sched_jobs]), 81 + GFP_KERNEL); 82 + if (!entity) 83 + return -ENOMEM; 75 84 76 - r = amdgpu_ctx_priority_permit(filp, priority); 77 - if (r) 78 - return r; 79 - 80 - memset(ctx, 0, sizeof(*ctx)); 81 - ctx->adev = adev; 82 - 83 - ctx->fences = kcalloc(amdgpu_sched_jobs * num_entities, 84 - sizeof(struct dma_fence*), GFP_KERNEL); 85 - if (!ctx->fences) 86 - return -ENOMEM; 87 - 88 - ctx->entities[0] = kcalloc(num_entities, 89 - sizeof(struct amdgpu_ctx_entity), 90 - GFP_KERNEL); 91 - if (!ctx->entities[0]) { 92 - r = -ENOMEM; 93 - goto error_free_fences; 94 - } 95 - 96 - for (i = 0; i < num_entities; ++i) { 97 - struct amdgpu_ctx_entity *entity = &ctx->entities[0][i]; 98 - 99 - entity->sequence = 1; 100 - entity->fences = &ctx->fences[amdgpu_sched_jobs * i]; 101 - } 102 - for (i = 1; i < AMDGPU_HW_IP_NUM; ++i) 103 - ctx->entities[i] = ctx->entities[i - 1] + 104 - amdgpu_ctx_num_entities[i - 1]; 105 - 106 - kref_init(&ctx->refcount); 107 - spin_lock_init(&ctx->ring_lock); 108 - mutex_init(&ctx->lock); 109 - 110 - ctx->reset_counter = atomic_read(&adev->gpu_reset_counter); 111 - ctx->reset_counter_query = ctx->reset_counter; 112 - ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter); 113 - ctx->init_priority = priority; 114 - ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; 115 - 116 - for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { 117 - struct drm_gpu_scheduler **scheds; 118 - struct drm_gpu_scheduler *sched; 119 - unsigned num_scheds = 0; 120 - 121 - switch (i) { 85 + entity->sequence = 1; 86 + priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? 87 + ctx->init_priority : ctx->override_priority; 88 + switch (hw_ip) { 122 89 case AMDGPU_HW_IP_GFX: 123 90 sched = &adev->gfx.gfx_ring[0].sched; 124 91 scheds = &sched; ··· 119 166 scheds = adev->jpeg.jpeg_sched; 120 167 num_scheds = adev->jpeg.num_jpeg_sched; 121 168 break; 122 - } 123 - 124 - for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) 125 - r = drm_sched_entity_init(&ctx->entities[i][j].entity, 126 - priority, scheds, 127 - num_scheds, &ctx->guilty); 128 - if (r) 129 - goto error_cleanup_entities; 130 169 } 170 + 171 + r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds, 172 + &ctx->guilty); 173 + if (r) 174 + goto error_free_entity; 175 + 176 + ctx->entities[hw_ip][ring] = entity; 177 + return 0; 178 + 179 + error_free_entity: 180 + kfree(entity); 181 + 182 + return r; 183 + } 184 + 185 + static int amdgpu_ctx_init(struct amdgpu_device *adev, 186 + enum drm_sched_priority priority, 187 + struct drm_file *filp, 188 + struct amdgpu_ctx *ctx) 189 + { 190 + int r; 191 + 192 + r = amdgpu_ctx_priority_permit(filp, priority); 193 + if (r) 194 + return r; 195 + 196 + memset(ctx, 0, sizeof(*ctx)); 197 + 198 + ctx->adev = adev; 199 + 200 + kref_init(&ctx->refcount); 201 + spin_lock_init(&ctx->ring_lock); 202 + mutex_init(&ctx->lock); 203 + 204 + ctx->reset_counter = atomic_read(&adev->gpu_reset_counter); 205 + ctx->reset_counter_query = ctx->reset_counter; 206 + ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter); 207 + ctx->init_priority = priority; 208 + ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; 131 209 132 210 return 0; 133 211 134 - error_cleanup_entities: 135 - for (i = 0; i < num_entities; ++i) 136 - drm_sched_entity_destroy(&ctx->entities[0][i].entity); 137 - kfree(ctx->entities[0]); 212 + } 138 213 139 - error_free_fences: 140 - kfree(ctx->fences); 141 - ctx->fences = NULL; 142 - return r; 214 + static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity) 215 + { 216 + 217 + int i; 218 + 219 + if (!entity) 220 + return; 221 + 222 + for (i = 0; i < amdgpu_sched_jobs; ++i) 223 + dma_fence_put(entity->fences[i]); 224 + 225 + kfree(entity); 143 226 } 144 227 145 228 static void amdgpu_ctx_fini(struct kref *ref) 146 229 { 147 230 struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount); 148 - unsigned num_entities = amdgpu_ctx_total_num_entities(); 149 231 struct amdgpu_device *adev = ctx->adev; 150 232 unsigned i, j; 151 233 152 234 if (!adev) 153 235 return; 154 236 155 - for (i = 0; i < num_entities; ++i) 156 - for (j = 0; j < amdgpu_sched_jobs; ++j) 157 - dma_fence_put(ctx->entities[0][i].fences[j]); 158 - kfree(ctx->fences); 159 - kfree(ctx->entities[0]); 237 + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { 238 + for (j = 0; j < AMDGPU_MAX_ENTITY_NUM; ++j) { 239 + amdgpu_ctx_fini_entity(ctx->entities[i][j]); 240 + ctx->entities[i][j] = NULL; 241 + } 242 + } 160 243 161 244 mutex_destroy(&ctx->lock); 162 - 163 245 kfree(ctx); 164 246 } 165 247 166 248 int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance, 167 249 u32 ring, struct drm_sched_entity **entity) 168 250 { 251 + int r; 252 + 169 253 if (hw_ip >= AMDGPU_HW_IP_NUM) { 170 254 DRM_ERROR("unknown HW IP type: %d\n", hw_ip); 171 255 return -EINVAL; ··· 219 229 return -EINVAL; 220 230 } 221 231 222 - *entity = &ctx->entities[hw_ip][ring].entity; 232 + if (ctx->entities[hw_ip][ring] == NULL) { 233 + r = amdgpu_ctx_init_entity(ctx, hw_ip, ring); 234 + if (r) 235 + return r; 236 + } 237 + 238 + *entity = &ctx->entities[hw_ip][ring]->entity; 223 239 return 0; 224 240 } 225 241 ··· 265 269 static void amdgpu_ctx_do_release(struct kref *ref) 266 270 { 267 271 struct amdgpu_ctx *ctx; 268 - unsigned num_entities; 269 - u32 i; 272 + u32 i, j; 270 273 271 274 ctx = container_of(ref, struct amdgpu_ctx, refcount); 275 + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { 276 + for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { 277 + if (!ctx->entities[i][j]) 278 + continue; 272 279 273 - num_entities = amdgpu_ctx_total_num_entities(); 274 - for (i = 0; i < num_entities; i++) 275 - drm_sched_entity_destroy(&ctx->entities[0][i].entity); 280 + drm_sched_entity_destroy(&ctx->entities[i][j]->entity); 281 + } 282 + } 276 283 277 284 amdgpu_ctx_fini(ref); 278 285 } ··· 505 506 void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, 506 507 enum drm_sched_priority priority) 507 508 { 508 - unsigned num_entities = amdgpu_ctx_total_num_entities(); 509 509 enum drm_sched_priority ctx_prio; 510 - unsigned i; 510 + unsigned i, j; 511 511 512 512 ctx->override_priority = priority; 513 513 514 514 ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ? 515 515 ctx->init_priority : ctx->override_priority; 516 + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { 517 + for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { 518 + struct drm_sched_entity *entity; 516 519 517 - for (i = 0; i < num_entities; i++) { 518 - struct drm_sched_entity *entity = &ctx->entities[0][i].entity; 520 + if (!ctx->entities[i][j]) 521 + continue; 519 522 520 - drm_sched_entity_set_priority(entity, ctx_prio); 523 + entity = &ctx->entities[i][j]->entity; 524 + drm_sched_entity_set_priority(entity, ctx_prio); 525 + } 521 526 } 522 527 } 523 528 ··· 557 554 558 555 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout) 559 556 { 560 - unsigned num_entities = amdgpu_ctx_total_num_entities(); 561 557 struct amdgpu_ctx *ctx; 562 558 struct idr *idp; 563 - uint32_t id, i; 559 + uint32_t id, i, j; 564 560 565 561 idp = &mgr->ctx_handles; 566 562 567 563 mutex_lock(&mgr->lock); 568 564 idr_for_each_entry(idp, ctx, id) { 569 - for (i = 0; i < num_entities; i++) { 570 - struct drm_sched_entity *entity; 565 + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { 566 + for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { 567 + struct drm_sched_entity *entity; 571 568 572 - entity = &ctx->entities[0][i].entity; 573 - timeout = drm_sched_entity_flush(entity, timeout); 569 + if (!ctx->entities[i][j]) 570 + continue; 571 + 572 + entity = &ctx->entities[i][j]->entity; 573 + timeout = drm_sched_entity_flush(entity, timeout); 574 + } 574 575 } 575 576 } 576 577 mutex_unlock(&mgr->lock); ··· 583 576 584 577 void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) 585 578 { 586 - unsigned num_entities = amdgpu_ctx_total_num_entities(); 587 579 struct amdgpu_ctx *ctx; 588 580 struct idr *idp; 589 - uint32_t id, i; 581 + uint32_t id, i, j; 590 582 591 583 idp = &mgr->ctx_handles; 592 584 ··· 595 589 continue; 596 590 } 597 591 598 - for (i = 0; i < num_entities; i++) 599 - drm_sched_entity_fini(&ctx->entities[0][i].entity); 592 + for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { 593 + for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) { 594 + struct drm_sched_entity *entity; 595 + 596 + if (!ctx->entities[i][j]) 597 + continue; 598 + 599 + entity = &ctx->entities[i][j]->entity; 600 + drm_sched_entity_fini(entity); 601 + } 602 + } 600 603 } 601 604 } 602 605
+4 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
··· 29 29 struct drm_file; 30 30 struct amdgpu_fpriv; 31 31 32 + #define AMDGPU_MAX_ENTITY_NUM 4 33 + 32 34 struct amdgpu_ctx_entity { 33 35 uint64_t sequence; 34 - struct dma_fence **fences; 35 36 struct drm_sched_entity entity; 37 + struct dma_fence *fences[]; 36 38 }; 37 39 38 40 struct amdgpu_ctx { ··· 44 42 unsigned reset_counter_query; 45 43 uint32_t vram_lost_counter; 46 44 spinlock_t ring_lock; 47 - struct dma_fence **fences; 48 - struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM]; 45 + struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM][AMDGPU_MAX_ENTITY_NUM]; 49 46 bool preamble_presented; 50 47 enum drm_sched_priority init_priority; 51 48 enum drm_sched_priority override_priority;
+9 -5
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 216 216 { 217 217 uint32_t ret; 218 218 219 - if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) 220 - return amdgpu_virt_kiq_rreg(adev, reg); 219 + if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))) 220 + return amdgpu_kiq_rreg(adev, reg); 221 221 222 222 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) 223 223 ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); ··· 294 294 adev->last_mm_index = v; 295 295 } 296 296 297 - if (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)) 298 - return amdgpu_virt_kiq_wreg(adev, reg, v); 297 + if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev))) 298 + return amdgpu_kiq_wreg(adev, reg, v); 299 299 300 300 if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX)) 301 301 writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); ··· 985 985 static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) 986 986 { 987 987 struct sysinfo si; 988 - bool is_os_64 = (sizeof(void *) == 8) ? true : false; 988 + bool is_os_64 = (sizeof(void *) == 8); 989 989 uint64_t total_memory; 990 990 uint64_t dram_size_seven_GB = 0x1B8000000; 991 991 uint64_t dram_size_three_GB = 0xB8000000; ··· 3760 3760 case CHIP_VEGA12: 3761 3761 case CHIP_RAVEN: 3762 3762 case CHIP_ARCTURUS: 3763 + case CHIP_RENOIR: 3764 + case CHIP_NAVI10: 3765 + case CHIP_NAVI14: 3766 + case CHIP_NAVI12: 3763 3767 break; 3764 3768 default: 3765 3769 goto disabled;
+3
drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
··· 52 52 uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val); 53 53 void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val, 54 54 uint32_t ficadl_val, uint32_t ficadh_val); 55 + uint64_t (*get_dram_base_addr)(struct amdgpu_device *adev, 56 + uint32_t df_inst); 57 + uint32_t (*get_df_inst_id)(struct amdgpu_device *adev); 55 58 }; 56 59 57 60 struct amdgpu_df {
+94 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
··· 296 296 297 297 spin_lock_init(&kiq->ring_lock); 298 298 299 - r = amdgpu_device_wb_get(adev, &adev->virt.reg_val_offs); 299 + r = amdgpu_device_wb_get(adev, &kiq->reg_val_offs); 300 300 if (r) 301 301 return r; 302 302 ··· 321 321 322 322 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring) 323 323 { 324 - amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs); 324 + amdgpu_device_wb_free(ring->adev, ring->adev->gfx.kiq.reg_val_offs); 325 325 amdgpu_ring_fini(ring); 326 326 } 327 327 ··· 657 657 DRM_ERROR("CP ECC ERROR IRQ\n"); 658 658 amdgpu_ras_interrupt_dispatch(adev, &ih_data); 659 659 return 0; 660 + } 661 + 662 + uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) 663 + { 664 + signed long r, cnt = 0; 665 + unsigned long flags; 666 + uint32_t seq; 667 + struct amdgpu_kiq *kiq = &adev->gfx.kiq; 668 + struct amdgpu_ring *ring = &kiq->ring; 669 + 670 + BUG_ON(!ring->funcs->emit_rreg); 671 + 672 + spin_lock_irqsave(&kiq->ring_lock, flags); 673 + amdgpu_ring_alloc(ring, 32); 674 + amdgpu_ring_emit_rreg(ring, reg); 675 + amdgpu_fence_emit_polling(ring, &seq); 676 + amdgpu_ring_commit(ring); 677 + spin_unlock_irqrestore(&kiq->ring_lock, flags); 678 + 679 + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 680 + 681 + /* don't wait anymore for gpu reset case because this way may 682 + * block gpu_recover() routine forever, e.g. this virt_kiq_rreg 683 + * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will 684 + * never return if we keep waiting in virt_kiq_rreg, which cause 685 + * gpu_recover() hang there. 686 + * 687 + * also don't wait anymore for IRQ context 688 + * */ 689 + if (r < 1 && (adev->in_gpu_reset || in_interrupt())) 690 + goto failed_kiq_read; 691 + 692 + might_sleep(); 693 + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { 694 + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); 695 + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 696 + } 697 + 698 + if (cnt > MAX_KIQ_REG_TRY) 699 + goto failed_kiq_read; 700 + 701 + return adev->wb.wb[kiq->reg_val_offs]; 702 + 703 + failed_kiq_read: 704 + pr_err("failed to read reg:%x\n", reg); 705 + return ~0; 706 + } 707 + 708 + void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) 709 + { 710 + signed long r, cnt = 0; 711 + unsigned long flags; 712 + uint32_t seq; 713 + struct amdgpu_kiq *kiq = &adev->gfx.kiq; 714 + struct amdgpu_ring *ring = &kiq->ring; 715 + 716 + BUG_ON(!ring->funcs->emit_wreg); 717 + 718 + spin_lock_irqsave(&kiq->ring_lock, flags); 719 + amdgpu_ring_alloc(ring, 32); 720 + amdgpu_ring_emit_wreg(ring, reg, v); 721 + amdgpu_fence_emit_polling(ring, &seq); 722 + amdgpu_ring_commit(ring); 723 + spin_unlock_irqrestore(&kiq->ring_lock, flags); 724 + 725 + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 726 + 727 + /* don't wait anymore for gpu reset case because this way may 728 + * block gpu_recover() routine forever, e.g. this virt_kiq_rreg 729 + * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will 730 + * never return if we keep waiting in virt_kiq_rreg, which cause 731 + * gpu_recover() hang there. 732 + * 733 + * also don't wait anymore for IRQ context 734 + * */ 735 + if (r < 1 && (adev->in_gpu_reset || in_interrupt())) 736 + goto failed_kiq_write; 737 + 738 + might_sleep(); 739 + while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { 740 + 741 + msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); 742 + r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 743 + } 744 + 745 + if (cnt > MAX_KIQ_REG_TRY) 746 + goto failed_kiq_write; 747 + 748 + return; 749 + 750 + failed_kiq_write: 751 + pr_err("failed to write reg:%x\n", reg); 660 752 }
+3
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
··· 94 94 struct amdgpu_ring ring; 95 95 struct amdgpu_irq_src irq; 96 96 const struct kiq_pm4_funcs *pmf; 97 + uint32_t reg_val_offs; 97 98 }; 98 99 99 100 /* ··· 376 375 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, 377 376 struct amdgpu_irq_src *source, 378 377 struct amdgpu_iv_entry *entry); 378 + uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); 379 + void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); 379 380 #endif
-5
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
··· 60 60 */ 61 61 #define AMDGPU_GMC_FAULT_TIMEOUT 5000ULL 62 62 63 - /* 64 - * Default stolen memory size, 1024 * 768 * 4 65 - */ 66 - #define AMDGPU_STOLEN_BIST_TRAINING_DEFAULT_SIZE 0x300000ULL 67 - 68 63 struct firmware; 69 64 70 65 /*
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
··· 172 172 #define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942 173 173 #define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000 174 174 #define GDDR6_MEM_TRAINING_OFFSET 0x8000 175 + /*Define the VRAM size that will be encroached by BIST training.*/ 176 + #define GDDR6_MEM_TRAINING_ENCROACHED_SIZE 0x2000000 175 177 176 178 enum psp_memory_training_init_flag { 177 179 PSP_MEM_TRAIN_NOT_SUPPORT = 0x0,
+20
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
··· 742 742 return 0; 743 743 } 744 744 745 + uint64_t get_xgmi_relative_phy_addr(struct amdgpu_device *adev, uint64_t addr) 746 + { 747 + uint32_t df_inst_id; 748 + 749 + if ((!adev->df.funcs) || 750 + (!adev->df.funcs->get_df_inst_id) || 751 + (!adev->df.funcs->get_dram_base_addr)) 752 + return addr; 753 + 754 + df_inst_id = adev->df.funcs->get_df_inst_id(adev); 755 + 756 + return addr + adev->df.funcs->get_dram_base_addr(adev, df_inst_id); 757 + } 758 + 745 759 /* wrapper of psp_ras_trigger_error */ 746 760 int amdgpu_ras_error_inject(struct amdgpu_device *adev, 747 761 struct ras_inject_if *info) ··· 772 758 773 759 if (!obj) 774 760 return -EINVAL; 761 + 762 + /* Calculate XGMI relative offset */ 763 + if (adev->gmc.xgmi.num_physical_nodes > 1) { 764 + block_info.address = get_xgmi_relative_phy_addr(adev, 765 + block_info.address); 766 + } 775 767 776 768 switch (info->head.block) { 777 769 case AMDGPU_RAS_BLOCK__GFX:
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
··· 652 652 653 653 if ((addr + (uint64_t)size) > 654 654 (mapping->last + 1) * AMDGPU_GPU_PAGE_SIZE) { 655 - DRM_ERROR("BO to small for addr 0x%010Lx %d %d\n", 655 + DRM_ERROR("BO too small for addr 0x%010Lx %d %d\n", 656 656 addr, lo, hi); 657 657 return -EINVAL; 658 658 }
+13 -13
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
··· 65 65 /* 1 second timeout */ 66 66 #define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000) 67 67 68 - #define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, sram_sel) \ 69 - ({ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \ 70 - WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \ 68 + #define RREG32_SOC15_DPG_MODE(ip, inst_idx, reg, mask, sram_sel) \ 69 + ({ WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); \ 70 + WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, \ 71 71 UVD_DPG_LMA_CTL__MASK_EN_MASK | \ 72 - ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \ 72 + ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) \ 73 73 << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \ 74 74 (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \ 75 - RREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA); \ 75 + RREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA); \ 76 76 }) 77 77 78 - #define WREG32_SOC15_DPG_MODE(ip, inst, reg, value, mask, sram_sel) \ 78 + #define WREG32_SOC15_DPG_MODE(ip, inst_idx, reg, value, mask, sram_sel) \ 79 79 do { \ 80 - WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_DATA, value); \ 81 - WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \ 82 - WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_CTL, \ 80 + WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_DATA, value); \ 81 + WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_MASK, mask); \ 82 + WREG32_SOC15(ip, inst_idx, mmUVD_DPG_LMA_CTL, \ 83 83 UVD_DPG_LMA_CTL__READ_WRITE_MASK | \ 84 - ((adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) \ 84 + ((adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg) \ 85 85 << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT) | \ 86 86 (sram_sel << UVD_DPG_LMA_CTL__SRAM_SEL__SHIFT)); \ 87 87 } while (0) 88 88 89 - #define SOC15_DPG_MODE_OFFSET_2_0(ip, inst, reg) \ 89 + #define SOC15_DPG_MODE_OFFSET_2_0(ip, inst_idx, reg) \ 90 90 ({ \ 91 91 uint32_t internal_reg_offset, addr; \ 92 92 bool video_range, aon_range; \ 93 93 \ 94 - addr = (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg); \ 94 + addr = (adev->reg_offset[ip##_HWIP][inst_idx][reg##_BASE_IDX] + reg); \ 95 95 addr <<= 2; \ 96 96 video_range = ((((0xFFFFF & addr) >= (VCN_VID_SOC_ADDRESS_2_0)) && \ 97 97 ((0xFFFFF & addr) < ((VCN_VID_SOC_ADDRESS_2_0 + 0x2600))))); \ ··· 111 111 112 112 #define RREG32_SOC15_DPG_MODE_2_0(inst_idx, offset, mask_en) \ 113 113 ({ \ 114 - WREG32_SOC15(VCN, inst, mmUVD_DPG_LMA_CTL, \ 114 + WREG32_SOC15(VCN, inst_idx, mmUVD_DPG_LMA_CTL, \ 115 115 (0x0 << UVD_DPG_LMA_CTL__READ_WRITE__SHIFT | \ 116 116 mask_en << UVD_DPG_LMA_CTL__MASK_EN__SHIFT | \ 117 117 offset << UVD_DPG_LMA_CTL__READ_WRITE_ADDR__SHIFT)); \
-92
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
··· 45 45 adev->pg_flags = 0; 46 46 } 47 47 48 - uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg) 49 - { 50 - signed long r, cnt = 0; 51 - unsigned long flags; 52 - uint32_t seq; 53 - struct amdgpu_kiq *kiq = &adev->gfx.kiq; 54 - struct amdgpu_ring *ring = &kiq->ring; 55 - 56 - BUG_ON(!ring->funcs->emit_rreg); 57 - 58 - spin_lock_irqsave(&kiq->ring_lock, flags); 59 - amdgpu_ring_alloc(ring, 32); 60 - amdgpu_ring_emit_rreg(ring, reg); 61 - amdgpu_fence_emit_polling(ring, &seq); 62 - amdgpu_ring_commit(ring); 63 - spin_unlock_irqrestore(&kiq->ring_lock, flags); 64 - 65 - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 66 - 67 - /* don't wait anymore for gpu reset case because this way may 68 - * block gpu_recover() routine forever, e.g. this virt_kiq_rreg 69 - * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will 70 - * never return if we keep waiting in virt_kiq_rreg, which cause 71 - * gpu_recover() hang there. 72 - * 73 - * also don't wait anymore for IRQ context 74 - * */ 75 - if (r < 1 && (adev->in_gpu_reset || in_interrupt())) 76 - goto failed_kiq_read; 77 - 78 - might_sleep(); 79 - while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { 80 - msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); 81 - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 82 - } 83 - 84 - if (cnt > MAX_KIQ_REG_TRY) 85 - goto failed_kiq_read; 86 - 87 - return adev->wb.wb[adev->virt.reg_val_offs]; 88 - 89 - failed_kiq_read: 90 - pr_err("failed to read reg:%x\n", reg); 91 - return ~0; 92 - } 93 - 94 - void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v) 95 - { 96 - signed long r, cnt = 0; 97 - unsigned long flags; 98 - uint32_t seq; 99 - struct amdgpu_kiq *kiq = &adev->gfx.kiq; 100 - struct amdgpu_ring *ring = &kiq->ring; 101 - 102 - BUG_ON(!ring->funcs->emit_wreg); 103 - 104 - spin_lock_irqsave(&kiq->ring_lock, flags); 105 - amdgpu_ring_alloc(ring, 32); 106 - amdgpu_ring_emit_wreg(ring, reg, v); 107 - amdgpu_fence_emit_polling(ring, &seq); 108 - amdgpu_ring_commit(ring); 109 - spin_unlock_irqrestore(&kiq->ring_lock, flags); 110 - 111 - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 112 - 113 - /* don't wait anymore for gpu reset case because this way may 114 - * block gpu_recover() routine forever, e.g. this virt_kiq_rreg 115 - * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will 116 - * never return if we keep waiting in virt_kiq_rreg, which cause 117 - * gpu_recover() hang there. 118 - * 119 - * also don't wait anymore for IRQ context 120 - * */ 121 - if (r < 1 && (adev->in_gpu_reset || in_interrupt())) 122 - goto failed_kiq_write; 123 - 124 - might_sleep(); 125 - while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) { 126 - 127 - msleep(MAX_KIQ_REG_BAILOUT_INTERVAL); 128 - r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT); 129 - } 130 - 131 - if (cnt > MAX_KIQ_REG_TRY) 132 - goto failed_kiq_write; 133 - 134 - return; 135 - 136 - failed_kiq_write: 137 - pr_err("failed to write reg:%x\n", reg); 138 - } 139 - 140 48 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, 141 49 uint32_t reg0, uint32_t reg1, 142 50 uint32_t ref, uint32_t mask)
-2
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
··· 287 287 288 288 bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); 289 289 void amdgpu_virt_init_setting(struct amdgpu_device *adev); 290 - uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); 291 - void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); 292 290 void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, 293 291 uint32_t reg0, uint32_t rreg1, 294 292 uint32_t ref, uint32_t mask);
+2 -2
drivers/gpu/drm/amd/amdgpu/athub_v1_0.c
··· 74 74 case CHIP_VEGA20: 75 75 case CHIP_RAVEN: 76 76 athub_update_medium_grain_clock_gating(adev, 77 - state == AMD_CG_STATE_GATE ? true : false); 77 + state == AMD_CG_STATE_GATE); 78 78 athub_update_medium_grain_light_sleep(adev, 79 - state == AMD_CG_STATE_GATE ? true : false); 79 + state == AMD_CG_STATE_GATE); 80 80 break; 81 81 default: 82 82 break;
+2 -2
drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
··· 77 77 case CHIP_NAVI14: 78 78 case CHIP_NAVI12: 79 79 athub_v2_0_update_medium_grain_clock_gating(adev, 80 - state == AMD_CG_STATE_GATE ? true : false); 80 + state == AMD_CG_STATE_GATE); 81 81 athub_v2_0_update_medium_grain_light_sleep(adev, 82 - state == AMD_CG_STATE_GATE ? true : false); 82 + state == AMD_CG_STATE_GATE); 83 83 break; 84 84 default: 85 85 break;
+58 -1
drivers/gpu/drm/amd/amdgpu/df_v3_6.c
··· 27 27 #include "df/df_3_6_offset.h" 28 28 #include "df/df_3_6_sh_mask.h" 29 29 30 + #define DF_3_6_SMN_REG_INST_DIST 0x8 31 + #define DF_3_6_INST_CNT 8 32 + 30 33 static u32 df_v3_6_channel_number[] = {1, 2, 0, 4, 0, 8, 0, 31 34 16, 32, 0, 0, 0, 2, 4, 8}; 32 35 ··· 686 683 } 687 684 } 688 685 686 + static uint64_t df_v3_6_get_dram_base_addr(struct amdgpu_device *adev, 687 + uint32_t df_inst) 688 + { 689 + uint32_t base_addr_reg_val = 0; 690 + uint64_t base_addr = 0; 691 + 692 + base_addr_reg_val = RREG32_PCIE(smnDF_CS_UMC_AON0_DramBaseAddress0 + 693 + df_inst * DF_3_6_SMN_REG_INST_DIST); 694 + 695 + if (REG_GET_FIELD(base_addr_reg_val, 696 + DF_CS_UMC_AON0_DramBaseAddress0, 697 + AddrRngVal) == 0) { 698 + DRM_WARN("address range not valid"); 699 + return 0; 700 + } 701 + 702 + base_addr = REG_GET_FIELD(base_addr_reg_val, 703 + DF_CS_UMC_AON0_DramBaseAddress0, 704 + DramBaseAddr); 705 + 706 + return base_addr << 28; 707 + } 708 + 709 + static uint32_t df_v3_6_get_df_inst_id(struct amdgpu_device *adev) 710 + { 711 + uint32_t xgmi_node_id = 0; 712 + uint32_t df_inst_id = 0; 713 + 714 + /* Walk through DF dst nodes to find current XGMI node */ 715 + for (df_inst_id = 0; df_inst_id < DF_3_6_INST_CNT; df_inst_id++) { 716 + 717 + xgmi_node_id = RREG32_PCIE(smnDF_CS_UMC_AON0_DramLimitAddress0 + 718 + df_inst_id * DF_3_6_SMN_REG_INST_DIST); 719 + xgmi_node_id = REG_GET_FIELD(xgmi_node_id, 720 + DF_CS_UMC_AON0_DramLimitAddress0, 721 + DstFabricID); 722 + 723 + /* TODO: establish reason dest fabric id is offset by 7 */ 724 + xgmi_node_id = xgmi_node_id >> 7; 725 + 726 + if (adev->gmc.xgmi.physical_node_id == xgmi_node_id) 727 + break; 728 + } 729 + 730 + if (df_inst_id == DF_3_6_INST_CNT) { 731 + DRM_WARN("cant match df dst id with gpu node"); 732 + return 0; 733 + } 734 + 735 + return df_inst_id; 736 + } 737 + 689 738 const struct amdgpu_df_funcs df_v3_6_funcs = { 690 739 .sw_init = df_v3_6_sw_init, 691 740 .sw_fini = df_v3_6_sw_fini, ··· 751 696 .pmc_stop = df_v3_6_pmc_stop, 752 697 .pmc_get_count = df_v3_6_pmc_get_count, 753 698 .get_fica = df_v3_6_get_fica, 754 - .set_fica = df_v3_6_set_fica 699 + .set_fica = df_v3_6_set_fica, 700 + .get_dram_base_addr = df_v3_6_get_dram_base_addr, 701 + .get_df_inst_id = df_v3_6_get_df_inst_id 755 702 };
+6 -5
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
··· 368 368 .map_queues_size = 7, 369 369 .unmap_queues_size = 6, 370 370 .query_status_size = 7, 371 - .invalidate_tlbs_size = 12, 371 + .invalidate_tlbs_size = 2, 372 372 }; 373 373 374 374 static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) ··· 4229 4229 enum amd_powergating_state state) 4230 4230 { 4231 4231 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4232 - bool enable = (state == AMD_PG_STATE_GATE) ? true : false; 4232 + bool enable = (state == AMD_PG_STATE_GATE); 4233 4233 switch (adev->asic_type) { 4234 4234 case CHIP_NAVI10: 4235 4235 case CHIP_NAVI14: ··· 4255 4255 case CHIP_NAVI14: 4256 4256 case CHIP_NAVI12: 4257 4257 gfx_v10_0_update_gfx_clock_gating(adev, 4258 - state == AMD_CG_STATE_GATE ? true : false); 4258 + state == AMD_CG_STATE_GATE); 4259 4259 break; 4260 4260 default: 4261 4261 break; ··· 4737 4737 static void gfx_v10_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) 4738 4738 { 4739 4739 struct amdgpu_device *adev = ring->adev; 4740 + struct amdgpu_kiq *kiq = &adev->gfx.kiq; 4740 4741 4741 4742 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 4742 4743 amdgpu_ring_write(ring, 0 | /* src: register*/ ··· 4746 4745 amdgpu_ring_write(ring, reg); 4747 4746 amdgpu_ring_write(ring, 0); 4748 4747 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 4749 - adev->virt.reg_val_offs * 4)); 4748 + kiq->reg_val_offs * 4)); 4750 4749 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 4751 - adev->virt.reg_val_offs * 4)); 4750 + kiq->reg_val_offs * 4)); 4752 4751 } 4753 4752 4754 4753 static void gfx_v10_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
+3 -2
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
··· 6449 6449 static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) 6450 6450 { 6451 6451 struct amdgpu_device *adev = ring->adev; 6452 + struct amdgpu_kiq *kiq = &adev->gfx.kiq; 6452 6453 6453 6454 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 6454 6455 amdgpu_ring_write(ring, 0 | /* src: register*/ ··· 6458 6457 amdgpu_ring_write(ring, reg); 6459 6458 amdgpu_ring_write(ring, 0); 6460 6459 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 6461 - adev->virt.reg_val_offs * 4)); 6460 + kiq->reg_val_offs * 4)); 6462 6461 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 6463 - adev->virt.reg_val_offs * 4)); 6462 + kiq->reg_val_offs * 4)); 6464 6463 } 6465 6464 6466 6465 static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg,
+170 -50
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 48 48 49 49 #include "amdgpu_ras.h" 50 50 51 + #include "gfx_v9_4.h" 52 + 51 53 #define GFX9_NUM_GFX_RINGS 1 52 54 #define GFX9_MEC_HPD_SIZE 4096 53 55 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L ··· 738 736 static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring); 739 737 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, 740 738 void *ras_error_status); 739 + static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev); 741 740 static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, 742 741 void *inject_if); 743 742 ··· 862 859 .map_queues_size = 7, 863 860 .unmap_queues_size = 6, 864 861 .query_status_size = 7, 865 - .invalidate_tlbs_size = 12, 862 + .invalidate_tlbs_size = 2, 866 863 }; 867 864 868 865 static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev) ··· 1162 1159 } 1163 1160 } 1164 1161 1162 + struct amdgpu_gfxoff_quirk { 1163 + u16 chip_vendor; 1164 + u16 chip_device; 1165 + u16 subsys_vendor; 1166 + u16 subsys_device; 1167 + u8 revision; 1168 + }; 1169 + 1170 + static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = { 1171 + /* https://bugzilla.kernel.org/show_bug.cgi?id=204689 */ 1172 + { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 }, 1173 + { 0, 0, 0, 0, 0 }, 1174 + }; 1175 + 1176 + static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev) 1177 + { 1178 + const struct amdgpu_gfxoff_quirk *p = amdgpu_gfxoff_quirk_list; 1179 + 1180 + while (p && p->chip_device != 0) { 1181 + if (pdev->vendor == p->chip_vendor && 1182 + pdev->device == p->chip_device && 1183 + pdev->subsystem_vendor == p->subsys_vendor && 1184 + pdev->subsystem_device == p->subsys_device && 1185 + pdev->revision == p->revision) { 1186 + return true; 1187 + } 1188 + ++p; 1189 + } 1190 + return false; 1191 + } 1192 + 1165 1193 static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev) 1166 1194 { 1195 + if (gfx_v9_0_should_disable_gfxoff(adev->pdev)) 1196 + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 1197 + 1167 1198 switch (adev->asic_type) { 1168 1199 case CHIP_VEGA10: 1169 1200 case CHIP_VEGA12: 1170 1201 case CHIP_VEGA20: 1171 1202 break; 1172 1203 case CHIP_RAVEN: 1173 - if (!(adev->rev_id >= 0x8 || 1174 - adev->pdev->device == 0x15d8) && 1175 - (adev->pm.fw_version < 0x41e2b || /* not raven1 fresh */ 1176 - !adev->gfx.rlc.is_rlc_v2_1)) /* without rlc save restore ucodes */ 1204 + if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) && 1205 + ((adev->gfx.rlc_fw_version != 106 && 1206 + adev->gfx.rlc_fw_version < 531) || 1207 + (adev->gfx.rlc_fw_version == 53815) || 1208 + (adev->gfx.rlc_feature_version < 1) || 1209 + !adev->gfx.rlc.is_rlc_v2_1)) 1177 1210 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 1178 1211 1179 1212 if (adev->pm.pp_feature & PP_GFXOFF_MASK) ··· 1988 1949 .query_ras_error_count = &gfx_v9_0_query_ras_error_count 1989 1950 }; 1990 1951 1952 + static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = { 1953 + .get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter, 1954 + .select_se_sh = &gfx_v9_0_select_se_sh, 1955 + .read_wave_data = &gfx_v9_0_read_wave_data, 1956 + .read_wave_sgprs = &gfx_v9_0_read_wave_sgprs, 1957 + .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs, 1958 + .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q, 1959 + .ras_error_inject = &gfx_v9_4_ras_error_inject, 1960 + .query_ras_error_count = &gfx_v9_4_query_ras_error_count 1961 + }; 1962 + 1991 1963 static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) 1992 1964 { 1993 1965 u32 gb_addr_config; ··· 2050 2000 gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN; 2051 2001 break; 2052 2002 case CHIP_ARCTURUS: 2003 + adev->gfx.funcs = &gfx_v9_4_gfx_funcs; 2053 2004 adev->gfx.config.max_hw_contexts = 8; 2054 2005 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; 2055 2006 adev->gfx.config.sc_prim_fifo_size_backend = 0x100; ··· 2441 2390 } 2442 2391 } 2443 2392 2393 + static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev) 2394 + { 2395 + uint32_t tmp; 2396 + 2397 + switch (adev->asic_type) { 2398 + case CHIP_ARCTURUS: 2399 + tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG); 2400 + tmp = REG_SET_FIELD(tmp, SQ_CONFIG, 2401 + DISABLE_BARRIER_WAITCNT, 1); 2402 + WREG32_SOC15(GC, 0, mmSQ_CONFIG, tmp); 2403 + break; 2404 + default: 2405 + break; 2406 + }; 2407 + } 2408 + 2444 2409 static void gfx_v9_0_constants_init(struct amdgpu_device *adev) 2445 2410 { 2446 2411 u32 tmp; ··· 2502 2435 2503 2436 gfx_v9_0_init_compute_vmid(adev); 2504 2437 gfx_v9_0_init_gds_vmid(adev); 2438 + gfx_v9_0_init_sq_config(adev); 2505 2439 } 2506 2440 2507 2441 static void gfx_v9_0_wait_for_rlc_serdes(struct amdgpu_device *adev) ··· 4097 4029 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 }, 4098 4030 }; 4099 4031 4100 - static const struct soc15_reg_entry sec_ded_counter_registers[] = { 4032 + static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = { 4101 4033 { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1}, 4102 4034 { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1}, 4103 4035 { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1}, ··· 4186 4118 struct amdgpu_ring *ring = &adev->gfx.compute_ring[0]; 4187 4119 struct amdgpu_ib ib; 4188 4120 struct dma_fence *f = NULL; 4189 - int r, i, j, k; 4121 + int r, i; 4190 4122 unsigned total_size, vgpr_offset, sgpr_offset; 4191 4123 u64 gpu_addr; 4192 4124 ··· 4332 4264 goto fail; 4333 4265 } 4334 4266 4335 - /* read back registers to clear the counters */ 4336 - mutex_lock(&adev->grbm_idx_mutex); 4337 - for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) { 4338 - for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) { 4339 - for (k = 0; k < sec_ded_counter_registers[i].instance; k++) { 4340 - gfx_v9_0_select_se_sh(adev, j, 0x0, k); 4341 - RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i])); 4342 - } 4343 - } 4267 + switch (adev->asic_type) 4268 + { 4269 + case CHIP_VEGA20: 4270 + gfx_v9_0_clear_ras_edc_counter(adev); 4271 + break; 4272 + case CHIP_ARCTURUS: 4273 + gfx_v9_4_clear_ras_edc_counter(adev); 4274 + break; 4275 + default: 4276 + break; 4344 4277 } 4345 - WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000); 4346 - mutex_unlock(&adev->grbm_idx_mutex); 4347 4278 4348 4279 fail: 4349 4280 amdgpu_ib_free(adev, &ib, NULL); ··· 4705 4638 enum amd_powergating_state state) 4706 4639 { 4707 4640 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 4708 - bool enable = (state == AMD_PG_STATE_GATE) ? true : false; 4641 + bool enable = (state == AMD_PG_STATE_GATE); 4709 4642 4710 4643 switch (adev->asic_type) { 4711 4644 case CHIP_RAVEN: ··· 4767 4700 case CHIP_ARCTURUS: 4768 4701 case CHIP_RENOIR: 4769 4702 gfx_v9_0_update_gfx_clock_gating(adev, 4770 - state == AMD_CG_STATE_GATE ? true : false); 4703 + state == AMD_CG_STATE_GATE); 4771 4704 break; 4772 4705 default: 4773 4706 break; ··· 4784 4717 *flags = 0; 4785 4718 4786 4719 /* AMD_CG_SUPPORT_GFX_MGCG */ 4787 - data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE); 4720 + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE)); 4788 4721 if (!(data & RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK)) 4789 4722 *flags |= AMD_CG_SUPPORT_GFX_MGCG; 4790 4723 4791 4724 /* AMD_CG_SUPPORT_GFX_CGCG */ 4792 - data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL); 4725 + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL)); 4793 4726 if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) 4794 4727 *flags |= AMD_CG_SUPPORT_GFX_CGCG; 4795 4728 ··· 4798 4731 *flags |= AMD_CG_SUPPORT_GFX_CGLS; 4799 4732 4800 4733 /* AMD_CG_SUPPORT_GFX_RLC_LS */ 4801 - data = RREG32_SOC15(GC, 0, mmRLC_MEM_SLP_CNTL); 4734 + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_MEM_SLP_CNTL)); 4802 4735 if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) 4803 4736 *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS; 4804 4737 4805 4738 /* AMD_CG_SUPPORT_GFX_CP_LS */ 4806 - data = RREG32_SOC15(GC, 0, mmCP_MEM_SLP_CNTL); 4739 + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmCP_MEM_SLP_CNTL)); 4807 4740 if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) 4808 4741 *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; 4809 4742 4810 4743 if (adev->asic_type != CHIP_ARCTURUS) { 4811 4744 /* AMD_CG_SUPPORT_GFX_3D_CGCG */ 4812 - data = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D); 4745 + data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D)); 4813 4746 if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK) 4814 4747 *flags |= AMD_CG_SUPPORT_GFX_3D_CGCG; 4815 4748 ··· 5280 5213 static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) 5281 5214 { 5282 5215 struct amdgpu_device *adev = ring->adev; 5216 + struct amdgpu_kiq *kiq = &adev->gfx.kiq; 5283 5217 5284 5218 amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); 5285 5219 amdgpu_ring_write(ring, 0 | /* src: register*/ ··· 5289 5221 amdgpu_ring_write(ring, reg); 5290 5222 amdgpu_ring_write(ring, 0); 5291 5223 amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + 5292 - adev->virt.reg_val_offs * 4)); 5224 + kiq->reg_val_offs * 4)); 5293 5225 amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + 5294 - adev->virt.reg_val_offs * 4)); 5226 + kiq->reg_val_offs * 4)); 5295 5227 } 5296 5228 5297 5229 static void gfx_v9_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, ··· 5613 5545 } 5614 5546 5615 5547 5616 - static const struct soc15_ras_field_entry gc_ras_fields_vg20[] = { 5548 + static const struct soc15_ras_field_entry gfx_v9_0_ras_fields[] = { 5617 5549 { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 5618 5550 SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT), 5619 5551 SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT) ··· 6061 5993 int ret; 6062 5994 struct ta_ras_trigger_error_input block_info = { 0 }; 6063 5995 6064 - if (adev->asic_type != CHIP_VEGA20) 5996 + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) 6065 5997 return -EINVAL; 6066 5998 6067 5999 if (info->head.sub_block_index >= ARRAY_SIZE(ras_gfx_subblocks)) ··· 6186 6118 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255); 6187 6119 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0); 6188 6120 6189 - for (i = 0; i < 16; i++) { 6121 + for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) { 6190 6122 WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i); 6191 6123 data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT); 6192 6124 ··· 6205 6137 } 6206 6138 } 6207 6139 6208 - for (i = 0; i < 7; i++) { 6140 + for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) { 6209 6141 WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i); 6210 6142 data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT); 6211 6143 ··· 6226 6158 } 6227 6159 } 6228 6160 6229 - for (i = 0; i < 4; i++) { 6161 + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) { 6230 6162 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i); 6231 6163 data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT); 6232 6164 ··· 6238 6170 } 6239 6171 } 6240 6172 6241 - for (i = 0; i < 32; i++) { 6173 + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) { 6242 6174 WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i); 6243 6175 data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT); 6244 6176 ··· 6265 6197 return 0; 6266 6198 } 6267 6199 6268 - static int __get_ras_error_count(const struct soc15_reg_entry *reg, 6200 + static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg, 6269 6201 uint32_t se_id, uint32_t inst_id, uint32_t value, 6270 6202 uint32_t *sec_count, uint32_t *ded_count) 6271 6203 { 6272 6204 uint32_t i; 6273 6205 uint32_t sec_cnt, ded_cnt; 6274 6206 6275 - for (i = 0; i < ARRAY_SIZE(gc_ras_fields_vg20); i++) { 6276 - if(gc_ras_fields_vg20[i].reg_offset != reg->reg_offset || 6277 - gc_ras_fields_vg20[i].seg != reg->seg || 6278 - gc_ras_fields_vg20[i].inst != reg->inst) 6207 + for (i = 0; i < ARRAY_SIZE(gfx_v9_0_ras_fields); i++) { 6208 + if(gfx_v9_0_ras_fields[i].reg_offset != reg->reg_offset || 6209 + gfx_v9_0_ras_fields[i].seg != reg->seg || 6210 + gfx_v9_0_ras_fields[i].inst != reg->inst) 6279 6211 continue; 6280 6212 6281 6213 sec_cnt = (value & 6282 - gc_ras_fields_vg20[i].sec_count_mask) >> 6283 - gc_ras_fields_vg20[i].sec_count_shift; 6214 + gfx_v9_0_ras_fields[i].sec_count_mask) >> 6215 + gfx_v9_0_ras_fields[i].sec_count_shift; 6284 6216 if (sec_cnt) { 6285 6217 DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n", 6286 - gc_ras_fields_vg20[i].name, 6218 + gfx_v9_0_ras_fields[i].name, 6287 6219 se_id, inst_id, 6288 6220 sec_cnt); 6289 6221 *sec_count += sec_cnt; 6290 6222 } 6291 6223 6292 6224 ded_cnt = (value & 6293 - gc_ras_fields_vg20[i].ded_count_mask) >> 6294 - gc_ras_fields_vg20[i].ded_count_shift; 6225 + gfx_v9_0_ras_fields[i].ded_count_mask) >> 6226 + gfx_v9_0_ras_fields[i].ded_count_shift; 6295 6227 if (ded_cnt) { 6296 6228 DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n", 6297 - gc_ras_fields_vg20[i].name, 6229 + gfx_v9_0_ras_fields[i].name, 6298 6230 se_id, inst_id, 6299 6231 ded_cnt); 6300 6232 *ded_count += ded_cnt; ··· 6302 6234 } 6303 6235 6304 6236 return 0; 6237 + } 6238 + 6239 + static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev) 6240 + { 6241 + int i, j, k; 6242 + 6243 + /* read back registers to clear the counters */ 6244 + mutex_lock(&adev->grbm_idx_mutex); 6245 + for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) { 6246 + for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) { 6247 + for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) { 6248 + gfx_v9_0_select_se_sh(adev, j, 0x0, k); 6249 + RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i])); 6250 + } 6251 + } 6252 + } 6253 + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000); 6254 + mutex_unlock(&adev->grbm_idx_mutex); 6255 + 6256 + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255); 6257 + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0); 6258 + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255); 6259 + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0); 6260 + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255); 6261 + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0); 6262 + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255); 6263 + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0); 6264 + 6265 + for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) { 6266 + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i); 6267 + RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT); 6268 + } 6269 + 6270 + for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) { 6271 + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i); 6272 + RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT); 6273 + } 6274 + 6275 + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) { 6276 + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i); 6277 + RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT); 6278 + } 6279 + 6280 + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) { 6281 + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i); 6282 + RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT); 6283 + } 6284 + 6285 + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255); 6286 + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255); 6287 + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255); 6288 + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255); 6305 6289 } 6306 6290 6307 6291 static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, ··· 6364 6244 uint32_t i, j, k; 6365 6245 uint32_t reg_value; 6366 6246 6367 - if (adev->asic_type != CHIP_VEGA20) 6247 + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) 6368 6248 return -EINVAL; 6369 6249 6370 6250 err_data->ue_count = 0; ··· 6372 6252 6373 6253 mutex_lock(&adev->grbm_idx_mutex); 6374 6254 6375 - for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) { 6376 - for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) { 6377 - for (k = 0; k < sec_ded_counter_registers[i].instance; k++) { 6255 + for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) { 6256 + for (j = 0; j < gfx_v9_0_edc_counter_regs[i].se_num; j++) { 6257 + for (k = 0; k < gfx_v9_0_edc_counter_regs[i].instance; k++) { 6378 6258 gfx_v9_0_select_se_sh(adev, j, 0, k); 6379 6259 reg_value = 6380 - RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i])); 6260 + RREG32(SOC15_REG_ENTRY_OFFSET(gfx_v9_0_edc_counter_regs[i])); 6381 6261 if (reg_value) 6382 - __get_ras_error_count(&sec_ded_counter_registers[i], 6262 + gfx_v9_0_ras_error_count(&gfx_v9_0_edc_counter_regs[i], 6383 6263 j, k, reg_value, 6384 6264 &sec_count, &ded_count); 6385 6265 }
+978
drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
··· 1 + /* 2 + * Copyright 2020 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #include <linux/kernel.h> 25 + 26 + #include "amdgpu.h" 27 + #include "amdgpu_gfx.h" 28 + #include "soc15.h" 29 + #include "soc15d.h" 30 + #include "amdgpu_atomfirmware.h" 31 + #include "amdgpu_pm.h" 32 + 33 + #include "gc/gc_9_4_1_offset.h" 34 + #include "gc/gc_9_4_1_sh_mask.h" 35 + #include "soc15_common.h" 36 + 37 + #include "gfx_v9_4.h" 38 + #include "amdgpu_ras.h" 39 + 40 + static const struct soc15_reg_entry gfx_v9_4_edc_counter_regs[] = { 41 + /* CPC */ 42 + { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, 1 }, 43 + { SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, 1 }, 44 + /* DC */ 45 + { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, 1 }, 46 + { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, 1 }, 47 + { SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, 1 }, 48 + /* CPF */ 49 + { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, 1 }, 50 + { SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, 1 }, 51 + /* GDS */ 52 + { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, 1 }, 53 + { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 0, 1, 1 }, 54 + { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_DED), 0, 1, 1 }, 55 + { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, 1 }, 56 + { SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, 1 }, 57 + /* SPI */ 58 + { SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 0, 4, 1 }, 59 + /* SQ */ 60 + { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16 }, 61 + { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_DED_CNT), 0, 4, 16 }, 62 + { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_INFO), 0, 4, 16 }, 63 + { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_SEC_CNT), 0, 4, 16 }, 64 + /* SQC */ 65 + { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 0, 4, 6 }, 66 + { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6 }, 67 + { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6 }, 68 + { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), 0, 4, 6 }, 69 + /* TA */ 70 + { SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 0, 4, 16 }, 71 + /* TCA */ 72 + { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2 }, 73 + /* TCC */ 74 + { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16 }, 75 + { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16 }, 76 + /* TCI */ 77 + { SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 1, 72 }, 78 + /* TCP */ 79 + { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16 }, 80 + { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16 }, 81 + { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16 }, 82 + /* TD */ 83 + { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16 }, 84 + /* GCEA */ 85 + { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 1, 32 }, 86 + { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 1, 32 }, 87 + { SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 1, 32 }, 88 + /* RLC */ 89 + { SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), 0, 1, 1 }, 90 + { SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), 0, 1, 1 }, 91 + }; 92 + 93 + static void gfx_v9_4_select_se_sh(struct amdgpu_device *adev, u32 se_num, 94 + u32 sh_num, u32 instance) 95 + { 96 + u32 data; 97 + 98 + if (instance == 0xffffffff) 99 + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, 100 + INSTANCE_BROADCAST_WRITES, 1); 101 + else 102 + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, 103 + instance); 104 + 105 + if (se_num == 0xffffffff) 106 + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 107 + 1); 108 + else 109 + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 110 + 111 + if (sh_num == 0xffffffff) 112 + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 113 + 1); 114 + else 115 + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); 116 + 117 + WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data); 118 + } 119 + 120 + static const struct soc15_ras_field_entry gfx_v9_4_ras_fields[] = { 121 + /* CPC */ 122 + { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 123 + SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT), 124 + SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT) }, 125 + { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 126 + SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT), 127 + SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT) }, 128 + { "CPC_DC_STATE_RAM_ME1", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 129 + SOC15_REG_FIELD(DC_EDC_STATE_CNT, SEC_COUNT_ME1), 130 + SOC15_REG_FIELD(DC_EDC_STATE_CNT, DED_COUNT_ME1) }, 131 + { "CPC_DC_CSINVOC_RAM_ME1", 132 + SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 133 + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT_ME1), 134 + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT_ME1) }, 135 + { "CPC_DC_RESTORE_RAM_ME1", 136 + SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 137 + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT_ME1), 138 + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT_ME1) }, 139 + { "CPC_DC_CSINVOC_RAM1_ME1", 140 + SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 141 + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, SEC_COUNT1_ME1), 142 + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, DED_COUNT1_ME1) }, 143 + { "CPC_DC_RESTORE_RAM1_ME1", 144 + SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 145 + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, SEC_COUNT1_ME1), 146 + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, DED_COUNT1_ME1) }, 147 + 148 + /* CPF */ 149 + { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 150 + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME2), 151 + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME2) }, 152 + { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 153 + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, SEC_COUNT_ME1), 154 + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, DED_COUNT_ME1) }, 155 + { "CPF_TCIU_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 156 + SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT), 157 + SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT) }, 158 + 159 + /* GDS */ 160 + { "GDS_GRBM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_GRBM_CNT), 161 + SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, SEC), 162 + SOC15_REG_FIELD(GDS_EDC_GRBM_CNT, DED) }, 163 + { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 164 + SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC), 165 + SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED) }, 166 + { "GDS_PHY_CMD_RAM_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 167 + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC), 168 + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) }, 169 + { "GDS_PHY_DATA_RAM_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 170 + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SEC), 171 + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_DED) }, 172 + { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 173 + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC), 174 + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) }, 175 + { "GDS_ME1_PIPE0_PIPE_MEM", 176 + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 177 + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC), 178 + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) }, 179 + { "GDS_ME1_PIPE1_PIPE_MEM", 180 + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 181 + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC), 182 + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) }, 183 + { "GDS_ME1_PIPE2_PIPE_MEM", 184 + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 185 + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC), 186 + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) }, 187 + { "GDS_ME1_PIPE3_PIPE_MEM", 188 + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 189 + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC), 190 + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) }, 191 + 192 + /* SPI */ 193 + { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 194 + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SEC_COUNT), 195 + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_DED_COUNT) }, 196 + { "SPI_GDS_EXPREQ", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 197 + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_SEC_COUNT), 198 + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_GDS_EXPREQ_DED_COUNT) }, 199 + { "SPI_WB_GRANT_30", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 200 + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_SEC_COUNT), 201 + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_30_DED_COUNT) }, 202 + { "SPI_WB_GRANT_61", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 203 + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_61_SEC_COUNT), 204 + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_WB_GRANT_61_DED_COUNT) }, 205 + { "SPI_LIFE_CNT", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 206 + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_SEC_COUNT), 207 + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_LIFE_CNT_DED_COUNT) }, 208 + 209 + /* SQ */ 210 + { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 211 + SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT), 212 + SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT) }, 213 + { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 214 + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT), 215 + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT) }, 216 + { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 217 + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT), 218 + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT) }, 219 + { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 220 + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT), 221 + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT) }, 222 + { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 223 + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT), 224 + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT) }, 225 + { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 226 + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT), 227 + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT) }, 228 + { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 229 + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT), 230 + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT) }, 231 + 232 + /* SQC */ 233 + { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 234 + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT), 235 + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) }, 236 + { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 237 + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT), 238 + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) }, 239 + { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 240 + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT), 241 + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) }, 242 + { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 243 + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT), 244 + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) }, 245 + { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 246 + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT), 247 + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) }, 248 + { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 249 + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT), 250 + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) }, 251 + { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 252 + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT), 253 + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) }, 254 + { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 255 + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT), 256 + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) }, 257 + { "SQC_INST_BANKA_UTCL1_MISS_FIFO", 258 + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), 259 + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, 260 + INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT), 261 + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, 262 + INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT) }, 263 + { "SQC_INST_BANKA_MISS_FIFO", 264 + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), 265 + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKA_MISS_FIFO_SEC_COUNT), 266 + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, 267 + INST_BANKA_MISS_FIFO_DED_COUNT) }, 268 + { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 269 + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT), 270 + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) }, 271 + { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 272 + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT), 273 + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) }, 274 + { "SQC_DATA_BANKA_HIT_FIFO", 275 + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), 276 + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_SEC_COUNT), 277 + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_HIT_FIFO_DED_COUNT) }, 278 + { "SQC_DATA_BANKA_MISS_FIFO", 279 + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), 280 + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKA_MISS_FIFO_SEC_COUNT), 281 + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, 282 + DATA_BANKA_MISS_FIFO_DED_COUNT) }, 283 + { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 284 + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT), 285 + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) }, 286 + { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 287 + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT), 288 + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) }, 289 + { "SQC_INST_BANKB_UTCL1_MISS_FIFO", 290 + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), 291 + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, 292 + INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT), 293 + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, 294 + INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT) }, 295 + { "SQC_INST_BANKB_MISS_FIFO", 296 + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), 297 + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, INST_BANKB_MISS_FIFO_SEC_COUNT), 298 + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, 299 + INST_BANKB_MISS_FIFO_DED_COUNT) }, 300 + { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 301 + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT), 302 + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) }, 303 + { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 304 + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT), 305 + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) }, 306 + { "SQC_DATA_BANKB_HIT_FIFO", 307 + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), 308 + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_SEC_COUNT), 309 + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_HIT_FIFO_DED_COUNT) }, 310 + { "SQC_DATA_BANKB_MISS_FIFO", 311 + SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_PARITY_CNT3), 312 + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, DATA_BANKB_MISS_FIFO_SEC_COUNT), 313 + SOC15_REG_FIELD(SQC_EDC_PARITY_CNT3, 314 + DATA_BANKB_MISS_FIFO_DED_COUNT) }, 315 + { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 316 + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT), 317 + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) }, 318 + 319 + /* TA */ 320 + { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 321 + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT), 322 + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) }, 323 + { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 324 + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SEC_COUNT), 325 + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_DED_COUNT) }, 326 + { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 327 + SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SEC_COUNT), 328 + SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_DED_COUNT) }, 329 + { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 330 + SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SEC_COUNT), 331 + SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_DED_COUNT) }, 332 + { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 333 + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SEC_COUNT), 334 + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_DED_COUNT) }, 335 + 336 + /* TCA */ 337 + { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 338 + SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SEC_COUNT), 339 + SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_DED_COUNT) }, 340 + { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 341 + SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SEC_COUNT), 342 + SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_DED_COUNT) }, 343 + 344 + /* TCC */ 345 + { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 346 + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT), 347 + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) }, 348 + { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 349 + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT), 350 + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) }, 351 + { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 352 + SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT), 353 + SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) }, 354 + { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 355 + SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT), 356 + SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) }, 357 + { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 358 + SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_SEC_COUNT), 359 + SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_DEC_DED_COUNT) }, 360 + { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 361 + SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_SEC_COUNT), 362 + SOC15_REG_FIELD(TCC_EDC_CNT2, IN_USE_TRANSFER_DED_COUNT) }, 363 + { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 364 + SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_SEC_COUNT), 365 + SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_DATA_DED_COUNT) }, 366 + { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 367 + SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_SEC_COUNT), 368 + SOC15_REG_FIELD(TCC_EDC_CNT2, RETURN_CONTROL_DED_COUNT) }, 369 + { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 370 + SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_SEC_COUNT), 371 + SOC15_REG_FIELD(TCC_EDC_CNT2, UC_ATOMIC_FIFO_DED_COUNT) }, 372 + { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 373 + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SEC_COUNT), 374 + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_DED_COUNT) }, 375 + { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 376 + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SEC_COUNT), 377 + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_DED_COUNT) }, 378 + { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 379 + SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT), 380 + SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) }, 381 + { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 382 + SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SEC_COUNT), 383 + SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_DED_COUNT) }, 384 + { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 385 + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SEC_COUNT), 386 + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_DED_COUNT) }, 387 + { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 388 + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_SEC_COUNT), 389 + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_NEXT_RAM_DED_COUNT) }, 390 + 391 + /* TCI */ 392 + { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 393 + SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SEC_COUNT), 394 + SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_DED_COUNT) }, 395 + 396 + /* TCP */ 397 + { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 398 + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT), 399 + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) }, 400 + { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 401 + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT), 402 + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) }, 403 + { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 404 + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SEC_COUNT), 405 + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_DED_COUNT) }, 406 + { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 407 + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT), 408 + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_DED_COUNT) }, 409 + { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 410 + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT), 0, 0 }, 411 + { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 412 + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT), 413 + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) }, 414 + { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 415 + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT), 416 + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) }, 417 + 418 + /* TD */ 419 + { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 420 + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT), 421 + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) }, 422 + { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 423 + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT), 424 + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) }, 425 + { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 426 + SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SEC_COUNT), 427 + SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_DED_COUNT) }, 428 + 429 + /* EA */ 430 + { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 431 + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), 432 + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) }, 433 + { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 434 + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), 435 + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) }, 436 + { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 437 + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), 438 + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) }, 439 + { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 440 + SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT), 441 + SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) }, 442 + { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 443 + SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT), 444 + SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) }, 445 + { "EA_GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 446 + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), 447 + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) }, 448 + { "EA_GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 449 + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), 450 + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) }, 451 + { "EA_GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 452 + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), 453 + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) }, 454 + { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 455 + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0, 0 }, 456 + { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, 457 + SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT) }, 458 + { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 459 + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0, 0 }, 460 + { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, 461 + SOC15_REG_FIELD(GCEA_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT) }, 462 + { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 463 + SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0, 0 }, 464 + { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, 465 + SOC15_REG_FIELD(GCEA_EDC_CNT3, IORD_CMDMEM_DED_COUNT) }, 466 + { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 467 + SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0, 0 }, 468 + { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, 469 + SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_CMDMEM_DED_COUNT) }, 470 + { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 471 + SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 0, 0 }, 472 + { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, 473 + SOC15_REG_FIELD(GCEA_EDC_CNT3, IOWR_DATAMEM_DED_COUNT) }, 474 + { "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 475 + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0, 0 }, 476 + { "EA_GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, 477 + SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT) }, 478 + { "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 479 + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0, 0 }, 480 + { "EA_GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, 481 + SOC15_REG_FIELD(GCEA_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT) }, 482 + { "EA_MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 483 + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT), 484 + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_DED_COUNT) }, 485 + { "EA_MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 486 + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT), 487 + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_DED_COUNT) }, 488 + { "EA_MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 489 + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT), 490 + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_DED_COUNT) }, 491 + { "EA_MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 492 + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT), 493 + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_DED_COUNT) }, 494 + { "EA_MAM_A0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 495 + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_SEC_COUNT), 496 + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A0MEM_DED_COUNT) }, 497 + { "EA_MAM_A1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 498 + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_SEC_COUNT), 499 + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A1MEM_DED_COUNT) }, 500 + { "EA_MAM_A2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 501 + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_SEC_COUNT), 502 + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A2MEM_DED_COUNT) }, 503 + { "EA_MAM_A3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 504 + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_SEC_COUNT), 505 + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_A3MEM_DED_COUNT) }, 506 + { "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 507 + SOC15_REG_FIELD(GCEA_EDC_CNT, MAM_AFMEM_SEC_COUNT), 0, 0 }, 508 + { "EA_MAM_AFMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT3), 0, 0, 509 + SOC15_REG_FIELD(GCEA_EDC_CNT3, MAM_AFMEM_DED_COUNT) }, 510 + 511 + /* RLC */ 512 + { "RLCG_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), 513 + SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_SEC_COUNT), 514 + SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_INSTR_RAM_DED_COUNT) }, 515 + { "RLCG_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), 516 + SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_SEC_COUNT), 517 + SOC15_REG_FIELD(RLC_EDC_CNT, RLCG_SCRATCH_RAM_DED_COUNT) }, 518 + { "RLCV_INSTR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), 519 + SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_SEC_COUNT), 520 + SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_INSTR_RAM_DED_COUNT) }, 521 + { "RLCV_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), 522 + SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_SEC_COUNT), 523 + SOC15_REG_FIELD(RLC_EDC_CNT, RLCV_SCRATCH_RAM_DED_COUNT) }, 524 + { "RLC_TCTAG_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), 525 + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_SEC_COUNT), 526 + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_TCTAG_RAM_DED_COUNT) }, 527 + { "RLC_SPM_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), 528 + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_SEC_COUNT), 529 + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SPM_SCRATCH_RAM_DED_COUNT) }, 530 + { "RLC_SRM_DATA_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), 531 + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_SEC_COUNT), 532 + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_DATA_RAM_DED_COUNT) }, 533 + { "RLC_SRM_ADDR_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT), 534 + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_SEC_COUNT), 535 + SOC15_REG_FIELD(RLC_EDC_CNT, RLC_SRM_ADDR_RAM_DED_COUNT) }, 536 + { "RLC_SPM_SE0_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), 537 + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT), 538 + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT) }, 539 + { "RLC_SPM_SE1_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), 540 + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT), 541 + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT) }, 542 + { "RLC_SPM_SE2_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), 543 + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT), 544 + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT) }, 545 + { "RLC_SPM_SE3_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), 546 + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT), 547 + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT) }, 548 + { "RLC_SPM_SE4_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), 549 + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT), 550 + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT) }, 551 + { "RLC_SPM_SE5_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), 552 + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT), 553 + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT) }, 554 + { "RLC_SPM_SE6_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), 555 + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT), 556 + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT) }, 557 + { "RLC_SPM_SE7_SCRATCH_RAM", SOC15_REG_ENTRY(GC, 0, mmRLC_EDC_CNT2), 558 + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT), 559 + SOC15_REG_FIELD(RLC_EDC_CNT2, RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT) }, 560 + }; 561 + 562 + static const char * const vml2_mems[] = { 563 + "UTC_VML2_BANK_CACHE_0_BIGK_MEM0", 564 + "UTC_VML2_BANK_CACHE_0_BIGK_MEM1", 565 + "UTC_VML2_BANK_CACHE_0_4K_MEM0", 566 + "UTC_VML2_BANK_CACHE_0_4K_MEM1", 567 + "UTC_VML2_BANK_CACHE_1_BIGK_MEM0", 568 + "UTC_VML2_BANK_CACHE_1_BIGK_MEM1", 569 + "UTC_VML2_BANK_CACHE_1_4K_MEM0", 570 + "UTC_VML2_BANK_CACHE_1_4K_MEM1", 571 + "UTC_VML2_BANK_CACHE_2_BIGK_MEM0", 572 + "UTC_VML2_BANK_CACHE_2_BIGK_MEM1", 573 + "UTC_VML2_BANK_CACHE_2_4K_MEM0", 574 + "UTC_VML2_BANK_CACHE_2_4K_MEM1", 575 + "UTC_VML2_BANK_CACHE_3_BIGK_MEM0", 576 + "UTC_VML2_BANK_CACHE_3_BIGK_MEM1", 577 + "UTC_VML2_BANK_CACHE_3_4K_MEM0", 578 + "UTC_VML2_BANK_CACHE_3_4K_MEM1", 579 + "UTC_VML2_IFIFO_GROUP0", 580 + "UTC_VML2_IFIFO_GROUP1", 581 + "UTC_VML2_IFIFO_GROUP2", 582 + "UTC_VML2_IFIFO_GROUP3", 583 + "UTC_VML2_IFIFO_GROUP4", 584 + "UTC_VML2_IFIFO_GROUP5", 585 + "UTC_VML2_IFIFO_GROUP6", 586 + "UTC_VML2_IFIFO_GROUP7", 587 + "UTC_VML2_IFIFO_GROUP8", 588 + "UTC_VML2_IFIFO_GROUP9", 589 + "UTC_VML2_IFIFO_GROUP10", 590 + "UTC_VML2_IFIFO_GROUP11", 591 + "UTC_VML2_IFIFO_GROUP12", 592 + "UTC_VML2_IFIFO_GROUP13", 593 + "UTC_VML2_IFIFO_GROUP14", 594 + "UTC_VML2_IFIFO_GROUP15", 595 + "UTC_VML2_IFIFO_GROUP16", 596 + "UTC_VML2_IFIFO_GROUP17", 597 + "UTC_VML2_IFIFO_GROUP18", 598 + "UTC_VML2_IFIFO_GROUP19", 599 + "UTC_VML2_IFIFO_GROUP20", 600 + "UTC_VML2_IFIFO_GROUP21", 601 + "UTC_VML2_IFIFO_GROUP22", 602 + "UTC_VML2_IFIFO_GROUP23", 603 + "UTC_VML2_IFIFO_GROUP24", 604 + }; 605 + 606 + static const char * const vml2_walker_mems[] = { 607 + "UTC_VML2_CACHE_PDE0_MEM0", 608 + "UTC_VML2_CACHE_PDE0_MEM1", 609 + "UTC_VML2_CACHE_PDE1_MEM0", 610 + "UTC_VML2_CACHE_PDE1_MEM1", 611 + "UTC_VML2_CACHE_PDE2_MEM0", 612 + "UTC_VML2_CACHE_PDE2_MEM1", 613 + "UTC_VML2_RDIF_ARADDRS", 614 + "UTC_VML2_RDIF_LOG_FIFO", 615 + "UTC_VML2_QUEUE_REQ", 616 + "UTC_VML2_QUEUE_RET", 617 + }; 618 + 619 + static const char * const utcl2_router_mems[] = { 620 + "UTCL2_ROUTER_GROUP0_VML2_REQ_FIFO0", 621 + "UTCL2_ROUTER_GROUP1_VML2_REQ_FIFO1", 622 + "UTCL2_ROUTER_GROUP2_VML2_REQ_FIFO2", 623 + "UTCL2_ROUTER_GROUP3_VML2_REQ_FIFO3", 624 + "UTCL2_ROUTER_GROUP4_VML2_REQ_FIFO4", 625 + "UTCL2_ROUTER_GROUP5_VML2_REQ_FIFO5", 626 + "UTCL2_ROUTER_GROUP6_VML2_REQ_FIFO6", 627 + "UTCL2_ROUTER_GROUP7_VML2_REQ_FIFO7", 628 + "UTCL2_ROUTER_GROUP8_VML2_REQ_FIFO8", 629 + "UTCL2_ROUTER_GROUP9_VML2_REQ_FIFO9", 630 + "UTCL2_ROUTER_GROUP10_VML2_REQ_FIFO10", 631 + "UTCL2_ROUTER_GROUP11_VML2_REQ_FIFO11", 632 + "UTCL2_ROUTER_GROUP12_VML2_REQ_FIFO12", 633 + "UTCL2_ROUTER_GROUP13_VML2_REQ_FIFO13", 634 + "UTCL2_ROUTER_GROUP14_VML2_REQ_FIFO14", 635 + "UTCL2_ROUTER_GROUP15_VML2_REQ_FIFO15", 636 + "UTCL2_ROUTER_GROUP16_VML2_REQ_FIFO16", 637 + "UTCL2_ROUTER_GROUP17_VML2_REQ_FIFO17", 638 + "UTCL2_ROUTER_GROUP18_VML2_REQ_FIFO18", 639 + "UTCL2_ROUTER_GROUP19_VML2_REQ_FIFO19", 640 + "UTCL2_ROUTER_GROUP20_VML2_REQ_FIFO20", 641 + "UTCL2_ROUTER_GROUP21_VML2_REQ_FIFO21", 642 + "UTCL2_ROUTER_GROUP22_VML2_REQ_FIFO22", 643 + "UTCL2_ROUTER_GROUP23_VML2_REQ_FIFO23", 644 + "UTCL2_ROUTER_GROUP24_VML2_REQ_FIFO24", 645 + }; 646 + 647 + static const char * const atc_l2_cache_2m_mems[] = { 648 + "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM", 649 + "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM", 650 + "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM", 651 + "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM", 652 + }; 653 + 654 + static const char * const atc_l2_cache_4k_mems[] = { 655 + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0", 656 + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1", 657 + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2", 658 + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3", 659 + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4", 660 + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5", 661 + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6", 662 + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7", 663 + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0", 664 + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1", 665 + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2", 666 + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3", 667 + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4", 668 + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5", 669 + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6", 670 + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7", 671 + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0", 672 + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1", 673 + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2", 674 + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3", 675 + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4", 676 + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5", 677 + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6", 678 + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7", 679 + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0", 680 + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1", 681 + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2", 682 + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3", 683 + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4", 684 + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5", 685 + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6", 686 + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7", 687 + }; 688 + 689 + static int gfx_v9_4_query_utc_edc_status(struct amdgpu_device *adev, 690 + struct ras_err_data *err_data) 691 + { 692 + uint32_t i, data; 693 + uint32_t sec_count, ded_count; 694 + 695 + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255); 696 + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL, 0); 697 + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255); 698 + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL, 0); 699 + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255); 700 + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL, 0); 701 + 702 + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255); 703 + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0); 704 + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255); 705 + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0); 706 + 707 + for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) { 708 + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, i); 709 + data = RREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL); 710 + 711 + sec_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, SEC_COUNT); 712 + if (sec_count) { 713 + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, 714 + vml2_mems[i], sec_count); 715 + err_data->ce_count += sec_count; 716 + } 717 + 718 + ded_count = REG_GET_FIELD(data, VML2_MEM_ECC_CNTL, DED_COUNT); 719 + if (ded_count) { 720 + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, 721 + vml2_mems[i], ded_count); 722 + err_data->ue_count += ded_count; 723 + } 724 + } 725 + 726 + for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) { 727 + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, i); 728 + data = RREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL); 729 + 730 + sec_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL, 731 + SEC_COUNT); 732 + if (sec_count) { 733 + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, 734 + vml2_walker_mems[i], sec_count); 735 + err_data->ce_count += sec_count; 736 + } 737 + 738 + ded_count = REG_GET_FIELD(data, VML2_WALKER_MEM_ECC_CNTL, 739 + DED_COUNT); 740 + if (ded_count) { 741 + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, 742 + vml2_walker_mems[i], ded_count); 743 + err_data->ue_count += ded_count; 744 + } 745 + } 746 + 747 + for (i = 0; i < ARRAY_SIZE(utcl2_router_mems); i++) { 748 + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, i); 749 + data = RREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL); 750 + 751 + sec_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, SEC_COUNT); 752 + if (sec_count) { 753 + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, 754 + utcl2_router_mems[i], sec_count); 755 + err_data->ce_count += sec_count; 756 + } 757 + 758 + ded_count = REG_GET_FIELD(data, UTCL2_MEM_ECC_CNTL, DED_COUNT); 759 + if (ded_count) { 760 + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, 761 + utcl2_router_mems[i], ded_count); 762 + err_data->ue_count += ded_count; 763 + } 764 + } 765 + 766 + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) { 767 + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, i); 768 + data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL); 769 + 770 + sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL, 771 + SEC_COUNT); 772 + if (sec_count) { 773 + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, 774 + atc_l2_cache_2m_mems[i], sec_count); 775 + err_data->ce_count += sec_count; 776 + } 777 + 778 + ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_2M_DSM_CNTL, 779 + DED_COUNT); 780 + if (ded_count) { 781 + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, 782 + atc_l2_cache_2m_mems[i], ded_count); 783 + err_data->ue_count += ded_count; 784 + } 785 + } 786 + 787 + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) { 788 + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, i); 789 + data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_CNTL); 790 + 791 + sec_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL, 792 + SEC_COUNT); 793 + if (sec_count) { 794 + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, 795 + atc_l2_cache_4k_mems[i], sec_count); 796 + err_data->ce_count += sec_count; 797 + } 798 + 799 + ded_count = REG_GET_FIELD(data, ATC_L2_CACHE_4K_DSM_CNTL, 800 + DED_COUNT); 801 + if (ded_count) { 802 + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, 803 + atc_l2_cache_4k_mems[i], ded_count); 804 + err_data->ue_count += ded_count; 805 + } 806 + } 807 + 808 + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255); 809 + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255); 810 + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255); 811 + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255); 812 + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255); 813 + 814 + return 0; 815 + } 816 + 817 + static int gfx_v9_4_ras_error_count(const struct soc15_reg_entry *reg, 818 + uint32_t se_id, uint32_t inst_id, 819 + uint32_t value, uint32_t *sec_count, 820 + uint32_t *ded_count) 821 + { 822 + uint32_t i; 823 + uint32_t sec_cnt, ded_cnt; 824 + 825 + for (i = 0; i < ARRAY_SIZE(gfx_v9_4_ras_fields); i++) { 826 + if (gfx_v9_4_ras_fields[i].reg_offset != reg->reg_offset || 827 + gfx_v9_4_ras_fields[i].seg != reg->seg || 828 + gfx_v9_4_ras_fields[i].inst != reg->inst) 829 + continue; 830 + 831 + sec_cnt = (value & gfx_v9_4_ras_fields[i].sec_count_mask) >> 832 + gfx_v9_4_ras_fields[i].sec_count_shift; 833 + if (sec_cnt) { 834 + DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n", 835 + gfx_v9_4_ras_fields[i].name, se_id, inst_id, 836 + sec_cnt); 837 + *sec_count += sec_cnt; 838 + } 839 + 840 + ded_cnt = (value & gfx_v9_4_ras_fields[i].ded_count_mask) >> 841 + gfx_v9_4_ras_fields[i].ded_count_shift; 842 + if (ded_cnt) { 843 + DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n", 844 + gfx_v9_4_ras_fields[i].name, se_id, inst_id, 845 + ded_cnt); 846 + *ded_count += ded_cnt; 847 + } 848 + } 849 + 850 + return 0; 851 + } 852 + 853 + int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, 854 + void *ras_error_status) 855 + { 856 + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; 857 + uint32_t sec_count = 0, ded_count = 0; 858 + uint32_t i, j, k; 859 + uint32_t reg_value; 860 + 861 + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) 862 + return -EINVAL; 863 + 864 + err_data->ue_count = 0; 865 + err_data->ce_count = 0; 866 + 867 + mutex_lock(&adev->grbm_idx_mutex); 868 + 869 + for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) { 870 + for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) { 871 + for (k = 0; k < gfx_v9_4_edc_counter_regs[i].instance; 872 + k++) { 873 + gfx_v9_4_select_se_sh(adev, j, 0, k); 874 + reg_value = RREG32(SOC15_REG_ENTRY_OFFSET( 875 + gfx_v9_4_edc_counter_regs[i])); 876 + if (reg_value) 877 + gfx_v9_4_ras_error_count( 878 + &gfx_v9_4_edc_counter_regs[i], 879 + j, k, reg_value, &sec_count, 880 + &ded_count); 881 + } 882 + } 883 + } 884 + 885 + err_data->ce_count += sec_count; 886 + err_data->ue_count += ded_count; 887 + 888 + gfx_v9_4_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 889 + mutex_unlock(&adev->grbm_idx_mutex); 890 + 891 + gfx_v9_4_query_utc_edc_status(adev, err_data); 892 + 893 + return 0; 894 + } 895 + 896 + void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev) 897 + { 898 + int i, j, k; 899 + 900 + mutex_lock(&adev->grbm_idx_mutex); 901 + for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) { 902 + for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) { 903 + for (k = 0; k < gfx_v9_4_edc_counter_regs[i].instance; 904 + k++) { 905 + gfx_v9_4_select_se_sh(adev, j, 0x0, k); 906 + RREG32(SOC15_REG_ENTRY_OFFSET( 907 + gfx_v9_4_edc_counter_regs[i])); 908 + } 909 + } 910 + } 911 + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 0xe0000000); 912 + mutex_unlock(&adev->grbm_idx_mutex); 913 + 914 + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255); 915 + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL, 0); 916 + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255); 917 + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL, 0); 918 + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255); 919 + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL, 0); 920 + 921 + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255); 922 + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0); 923 + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255); 924 + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL, 0); 925 + 926 + for (i = 0; i < ARRAY_SIZE(vml2_mems); i++) { 927 + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, i); 928 + RREG32_SOC15(GC, 0, mmVML2_MEM_ECC_CNTL); 929 + } 930 + 931 + for (i = 0; i < ARRAY_SIZE(vml2_walker_mems); i++) { 932 + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, i); 933 + RREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_CNTL); 934 + } 935 + 936 + for (i = 0; i < ARRAY_SIZE(utcl2_router_mems); i++) { 937 + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, i); 938 + RREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_CNTL); 939 + } 940 + 941 + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_2m_mems); i++) { 942 + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, i); 943 + RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_CNTL); 944 + } 945 + 946 + for (i = 0; i < ARRAY_SIZE(atc_l2_cache_4k_mems); i++) { 947 + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, i); 948 + RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_CNTL); 949 + } 950 + 951 + WREG32_SOC15(GC, 0, mmVML2_MEM_ECC_INDEX, 255); 952 + WREG32_SOC15(GC, 0, mmVML2_WALKER_MEM_ECC_INDEX, 255); 953 + WREG32_SOC15(GC, 0, mmUTCL2_MEM_ECC_INDEX, 255); 954 + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_DSM_INDEX, 255); 955 + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_DSM_INDEX, 255); 956 + } 957 + 958 + int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, void *inject_if) 959 + { 960 + struct ras_inject_if *info = (struct ras_inject_if *)inject_if; 961 + int ret; 962 + struct ta_ras_trigger_error_input block_info = { 0 }; 963 + 964 + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) 965 + return -EINVAL; 966 + 967 + block_info.block_id = amdgpu_ras_block_to_ta(info->head.block); 968 + block_info.sub_block_index = info->head.sub_block_index; 969 + block_info.inject_error_type = amdgpu_ras_error_to_ta(info->head.type); 970 + block_info.address = info->address; 971 + block_info.value = info->value; 972 + 973 + mutex_lock(&adev->grbm_idx_mutex); 974 + ret = psp_ras_trigger_error(&adev->psp, &block_info); 975 + mutex_unlock(&adev->grbm_idx_mutex); 976 + 977 + return ret; 978 + }
+35
drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
··· 1 + /* 2 + * Copyright 2020 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + 24 + #ifndef __GFX_V9_4_H__ 25 + #define __GFX_V9_4_H__ 26 + 27 + void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev); 28 + 29 + int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, 30 + void *ras_error_status); 31 + 32 + int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev, 33 + void *inject_if); 34 + 35 + #endif /* __GFX_V9_4_H__ */
+8 -31
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
··· 262 262 { 263 263 bool use_semaphore = gmc_v10_0_use_invalidate_semaphore(adev, vmhub); 264 264 struct amdgpu_vmhub *hub = &adev->vmhub[vmhub]; 265 - u32 tmp = gmc_v10_0_get_invalidate_req(vmid, flush_type); 265 + u32 inv_req = gmc_v10_0_get_invalidate_req(vmid, flush_type); 266 + u32 tmp; 266 267 /* Use register 17 for GART */ 267 268 const unsigned eng = 17; 268 269 unsigned int i; ··· 290 289 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); 291 290 } 292 291 293 - WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); 292 + WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req); 294 293 295 294 /* 296 295 * Issue a dummy read to wait for the ACK register to be cleared ··· 419 418 420 419 if (amdgpu_emu_mode == 0 && ring->sched.ready) { 421 420 spin_lock(&adev->gfx.kiq.ring_lock); 422 - amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size); 421 + /* 2 dwords flush + 8 dwords fence */ 422 + amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8); 423 423 kiq->pmf->kiq_invalidate_tlbs(ring, 424 424 pasid, flush_type, all_hub); 425 425 amdgpu_fence_emit_polling(ring, &seq); ··· 443 441 if (all_hub) { 444 442 for (i = 0; i < adev->num_vmhubs; i++) 445 443 gmc_v10_0_flush_gpu_tlb(adev, vmid, 446 - i, 0); 444 + i, flush_type); 447 445 } else { 448 446 gmc_v10_0_flush_gpu_tlb(adev, vmid, 449 - AMDGPU_GFXHUB_0, 0); 447 + AMDGPU_GFXHUB_0, flush_type); 450 448 } 451 449 break; 452 450 } ··· 642 640 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 643 641 int r; 644 642 645 - /* 646 - * Can't free the stolen VGA memory when it might be used for memory 647 - * training again. 648 - */ 649 - if (!adev->fw_vram_usage.mem_train_support) 650 - amdgpu_bo_late_init(adev); 643 + amdgpu_bo_late_init(adev); 651 644 652 645 r = amdgpu_gmc_allocate_vm_inv_eng(adev); 653 646 if (r) ··· 826 829 827 830 adev->gmc.stolen_size = gmc_v10_0_get_vbios_fb_size(adev); 828 831 829 - /* 830 - * In dual GPUs scenario, stolen_size is assigned to zero on the 831 - * secondary GPU, since there is no pre-OS console using that memory. 832 - * Then the bottom region of VRAM was allocated as GTT, unfortunately a 833 - * small region of bottom VRAM was encroached by UMC firmware during 834 - * GDDR6 BIST training, this cause page fault. 835 - * The page fault can be fixed by forcing stolen_size to 3MB, then the 836 - * bottom region of VRAM was allocated as stolen memory, GTT corruption 837 - * avoid. 838 - */ 839 - adev->gmc.stolen_size = max(adev->gmc.stolen_size, 840 - AMDGPU_STOLEN_BIST_TRAINING_DEFAULT_SIZE); 841 - 842 832 /* Memory manager */ 843 833 r = amdgpu_bo_init(adev); 844 834 if (r) ··· 865 881 static int gmc_v10_0_sw_fini(void *handle) 866 882 { 867 883 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 868 - void *stolen_vga_buf; 869 - 870 - /* 871 - * Free the stolen memory if it wasn't already freed in late_init 872 - * because of memory training. 873 - */ 874 - amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, &stolen_vga_buf); 875 884 876 885 amdgpu_vm_manager_fini(adev); 877 886 gmc_v10_0_gart_fini(adev);
+2 -1
drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
··· 381 381 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); 382 382 383 383 #ifdef CONFIG_X86_64 384 - if (adev->flags & AMD_IS_APU) { 384 + if (adev->flags & AMD_IS_APU && 385 + adev->gmc.real_vram_size > adev->gmc.aper_size) { 385 386 adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; 386 387 adev->gmc.aper_size = adev->gmc.real_vram_size; 387 388 }
+8 -7
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 476 476 { 477 477 bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub); 478 478 const unsigned eng = 17; 479 - u32 j, tmp; 479 + u32 j, inv_req, tmp; 480 480 struct amdgpu_vmhub *hub; 481 481 482 482 BUG_ON(vmhub >= adev->num_vmhubs); 483 483 484 484 hub = &adev->vmhub[vmhub]; 485 - tmp = gmc_v9_0_get_invalidate_req(vmid, flush_type); 485 + inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type); 486 486 487 487 /* This is necessary for a HW workaround under SRIOV as well 488 488 * as GFXOFF under bare metal ··· 493 493 uint32_t req = hub->vm_inv_eng0_req + eng; 494 494 uint32_t ack = hub->vm_inv_eng0_ack + eng; 495 495 496 - amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp, 496 + amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, inv_req, 497 497 1 << vmid); 498 498 return; 499 499 } ··· 521 521 DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); 522 522 } 523 523 524 - WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); 524 + WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req); 525 525 526 526 /* 527 527 * Issue a dummy read to wait for the ACK register to be cleared ··· 578 578 579 579 if (ring->sched.ready) { 580 580 spin_lock(&adev->gfx.kiq.ring_lock); 581 - amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size); 581 + /* 2 dwords flush + 8 dwords fence */ 582 + amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8); 582 583 kiq->pmf->kiq_invalidate_tlbs(ring, 583 584 pasid, flush_type, all_hub); 584 585 amdgpu_fence_emit_polling(ring, &seq); ··· 602 601 if (all_hub) { 603 602 for (i = 0; i < adev->num_vmhubs; i++) 604 603 gmc_v9_0_flush_gpu_tlb(adev, vmid, 605 - i, 0); 604 + i, flush_type); 606 605 } else { 607 606 gmc_v9_0_flush_gpu_tlb(adev, vmid, 608 - AMDGPU_GFXHUB_0, 0); 607 + AMDGPU_GFXHUB_0, flush_type); 609 608 } 610 609 break; 611 610 }
+1 -1
drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
··· 690 690 enum amd_clockgating_state state) 691 691 { 692 692 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 693 - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 693 + bool enable = (state == AMD_CG_STATE_GATE); 694 694 695 695 if (enable) { 696 696 if (jpeg_v2_0_is_idle(handle))
+1 -1
drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
··· 469 469 enum amd_clockgating_state state) 470 470 { 471 471 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 472 - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 472 + bool enable = (state == AMD_CG_STATE_GATE); 473 473 int i; 474 474 475 475 for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
+2 -2
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
··· 523 523 case CHIP_RAVEN: 524 524 case CHIP_RENOIR: 525 525 mmhub_v1_0_update_medium_grain_clock_gating(adev, 526 - state == AMD_CG_STATE_GATE ? true : false); 526 + state == AMD_CG_STATE_GATE); 527 527 mmhub_v1_0_update_medium_grain_light_sleep(adev, 528 - state == AMD_CG_STATE_GATE ? true : false); 528 + state == AMD_CG_STATE_GATE); 529 529 break; 530 530 default: 531 531 break;
+2 -2
drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
··· 427 427 case CHIP_NAVI14: 428 428 case CHIP_NAVI12: 429 429 mmhub_v2_0_update_medium_grain_clock_gating(adev, 430 - state == AMD_CG_STATE_GATE ? true : false); 430 + state == AMD_CG_STATE_GATE); 431 431 mmhub_v2_0_update_medium_grain_light_sleep(adev, 432 - state == AMD_CG_STATE_GATE ? true : false); 432 + state == AMD_CG_STATE_GATE); 433 433 break; 434 434 default: 435 435 break;
+697 -8
drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
··· 625 625 switch (adev->asic_type) { 626 626 case CHIP_ARCTURUS: 627 627 mmhub_v9_4_update_medium_grain_clock_gating(adev, 628 - state == AMD_CG_STATE_GATE ? true : false); 628 + state == AMD_CG_STATE_GATE); 629 629 mmhub_v9_4_update_medium_grain_light_sleep(adev, 630 - state == AMD_CG_STATE_GATE ? true : false); 630 + state == AMD_CG_STATE_GATE); 631 631 break; 632 632 default: 633 633 break; ··· 663 663 } 664 664 665 665 static const struct soc15_ras_field_entry mmhub_v9_4_ras_fields[] = { 666 + /* MMHUB Range 0 */ 666 667 { "MMEA0_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 667 668 SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), 668 669 SOC15_REG_FIELD(MMEA0_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), ··· 752 751 0, 0, 753 752 SOC15_REG_FIELD(MMEA0_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), 754 753 }, 754 + { "MMEA0_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 755 + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_SED_COUNT), 756 + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D0MEM_DED_COUNT), 757 + }, 758 + { "MMEA0_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 759 + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_SED_COUNT), 760 + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D1MEM_DED_COUNT), 761 + }, 762 + { "MMEA0_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 763 + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_SED_COUNT), 764 + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D2MEM_DED_COUNT), 765 + }, 766 + { "MMEA0_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 767 + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_SED_COUNT), 768 + SOC15_REG_FIELD(MMEA0_EDC_CNT2, MAM_D3MEM_DED_COUNT), 769 + }, 770 + 771 + /* MMHUB Range 1 */ 755 772 { "MMEA1_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 756 773 SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), 757 774 SOC15_REG_FIELD(MMEA1_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), ··· 857 838 { "MMEA1_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 858 839 0, 0, 859 840 SOC15_REG_FIELD(MMEA1_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), 841 + }, 842 + { "MMEA1_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 843 + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_SED_COUNT), 844 + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D0MEM_DED_COUNT), 845 + }, 846 + { "MMEA1_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 847 + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_SED_COUNT), 848 + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D1MEM_DED_COUNT), 849 + }, 850 + { "MMEA1_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 851 + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_SED_COUNT), 852 + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D2MEM_DED_COUNT), 853 + }, 854 + { "MMEA1_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 855 + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_SED_COUNT), 856 + SOC15_REG_FIELD(MMEA1_EDC_CNT2, MAM_D3MEM_DED_COUNT), 857 + }, 858 + 859 + /* MMHAB Range 2*/ 860 + { "MMEA2_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), 861 + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), 862 + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), 863 + }, 864 + { "MMEA2_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), 865 + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), 866 + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), 867 + }, 868 + { "MMEA2_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), 869 + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), 870 + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), 871 + }, 872 + { "MMEA2_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), 873 + SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_SEC_COUNT), 874 + SOC15_REG_FIELD(MMEA2_EDC_CNT, RRET_TAGMEM_DED_COUNT), 875 + }, 876 + { "MMEA2_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), 877 + SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_SEC_COUNT), 878 + SOC15_REG_FIELD(MMEA2_EDC_CNT, WRET_TAGMEM_DED_COUNT), 879 + }, 880 + { "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), 881 + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 882 + 0, 0, 883 + }, 884 + { "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), 885 + SOC15_REG_FIELD(MMEA2_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 886 + 0, 0, 887 + }, 888 + { "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), 889 + SOC15_REG_FIELD(MMEA2_EDC_CNT, IORD_CMDMEM_SED_COUNT), 890 + 0, 0, 891 + }, 892 + { "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), 893 + SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 894 + 0, 0, 895 + }, 896 + { "MMEA2_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), 897 + SOC15_REG_FIELD(MMEA2_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 898 + 0, 0, 899 + }, 900 + { "MMEA2_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), 901 + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), 902 + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), 903 + }, 904 + { "MMEA2_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), 905 + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), 906 + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), 907 + }, 908 + { "MMEA2_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), 909 + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), 910 + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), 911 + }, 912 + { "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), 913 + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 914 + 0, 0, 915 + }, 916 + { "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), 917 + SOC15_REG_FIELD(MMEA2_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 918 + 0, 0, 919 + }, 920 + { "MMEA2_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), 921 + 0, 0, 922 + SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), 923 + }, 924 + { "MMEA2_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), 925 + 0, 0, 926 + SOC15_REG_FIELD(MMEA2_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), 927 + }, 928 + { "MMEA2_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), 929 + 0, 0, 930 + SOC15_REG_FIELD(MMEA2_EDC_CNT3, IORD_CMDMEM_DED_COUNT), 931 + }, 932 + { "MMEA2_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), 933 + 0, 0, 934 + SOC15_REG_FIELD(MMEA2_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), 935 + }, 936 + { "MMEA2_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), 937 + 0, 0, 938 + SOC15_REG_FIELD(MMEA2_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), 939 + }, 940 + { "MMEA2_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), 941 + 0, 0, 942 + SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), 943 + }, 944 + { "MMEA2_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), 945 + 0, 0, 946 + SOC15_REG_FIELD(MMEA2_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), 947 + }, 948 + { "MMEA2_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), 949 + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D0MEM_SED_COUNT), 950 + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D0MEM_DED_COUNT), 951 + }, 952 + { "MMEA2_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), 953 + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_SED_COUNT), 954 + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D1MEM_DED_COUNT), 955 + }, 956 + { "MMEA2_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), 957 + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_SED_COUNT), 958 + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D2MEM_DED_COUNT), 959 + }, 960 + { "MMEA2_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), 961 + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_SED_COUNT), 962 + SOC15_REG_FIELD(MMEA2_EDC_CNT2, MAM_D3MEM_DED_COUNT), 963 + }, 964 + 965 + /* MMHUB Rang 3 */ 966 + { "MMEA3_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), 967 + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), 968 + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), 969 + }, 970 + { "MMEA3_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), 971 + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), 972 + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), 973 + }, 974 + { "MMEA3_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), 975 + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), 976 + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), 977 + }, 978 + { "MMEA3_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), 979 + SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_SEC_COUNT), 980 + SOC15_REG_FIELD(MMEA3_EDC_CNT, RRET_TAGMEM_DED_COUNT), 981 + }, 982 + { "MMEA3_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), 983 + SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_SEC_COUNT), 984 + SOC15_REG_FIELD(MMEA3_EDC_CNT, WRET_TAGMEM_DED_COUNT), 985 + }, 986 + { "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), 987 + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 988 + 0, 0, 989 + }, 990 + { "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), 991 + SOC15_REG_FIELD(MMEA3_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 992 + 0, 0, 993 + }, 994 + { "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), 995 + SOC15_REG_FIELD(MMEA3_EDC_CNT, IORD_CMDMEM_SED_COUNT), 996 + 0, 0, 997 + }, 998 + { "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), 999 + SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 1000 + 0, 0, 1001 + }, 1002 + { "MMEA3_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), 1003 + SOC15_REG_FIELD(MMEA3_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 1004 + 0, 0, 1005 + }, 1006 + { "MMEA3_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), 1007 + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), 1008 + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), 1009 + }, 1010 + { "MMEA3_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), 1011 + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), 1012 + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), 1013 + }, 1014 + { "MMEA3_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), 1015 + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), 1016 + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), 1017 + }, 1018 + { "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), 1019 + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 1020 + 0, 0, 1021 + }, 1022 + { "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), 1023 + SOC15_REG_FIELD(MMEA3_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 1024 + 0, 0, 1025 + }, 1026 + { "MMEA3_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), 1027 + 0, 0, 1028 + SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), 1029 + }, 1030 + { "MMEA3_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), 1031 + 0, 0, 1032 + SOC15_REG_FIELD(MMEA3_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), 1033 + }, 1034 + { "MMEA3_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), 1035 + 0, 0, 1036 + SOC15_REG_FIELD(MMEA3_EDC_CNT3, IORD_CMDMEM_DED_COUNT), 1037 + }, 1038 + { "MMEA3_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), 1039 + 0, 0, 1040 + SOC15_REG_FIELD(MMEA3_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), 1041 + }, 1042 + { "MMEA3_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), 1043 + 0, 0, 1044 + SOC15_REG_FIELD(MMEA3_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), 1045 + }, 1046 + { "MMEA3_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), 1047 + 0, 0, 1048 + SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), 1049 + }, 1050 + { "MMEA3_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), 1051 + 0, 0, 1052 + SOC15_REG_FIELD(MMEA3_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), 1053 + }, 1054 + { "MMEA3_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), 1055 + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_SED_COUNT), 1056 + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D0MEM_DED_COUNT), 1057 + }, 1058 + { "MMEA3_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), 1059 + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_SED_COUNT), 1060 + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D1MEM_DED_COUNT), 1061 + }, 1062 + { "MMEA3_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), 1063 + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_SED_COUNT), 1064 + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D2MEM_DED_COUNT), 1065 + }, 1066 + { "MMEA3_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), 1067 + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_SED_COUNT), 1068 + SOC15_REG_FIELD(MMEA3_EDC_CNT2, MAM_D3MEM_DED_COUNT), 1069 + }, 1070 + 1071 + /* MMHUB Range 4 */ 1072 + { "MMEA4_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), 1073 + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), 1074 + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), 1075 + }, 1076 + { "MMEA4_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), 1077 + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), 1078 + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), 1079 + }, 1080 + { "MMEA4_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), 1081 + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), 1082 + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), 1083 + }, 1084 + { "MMEA4_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), 1085 + SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_SEC_COUNT), 1086 + SOC15_REG_FIELD(MMEA4_EDC_CNT, RRET_TAGMEM_DED_COUNT), 1087 + }, 1088 + { "MMEA4_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), 1089 + SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_SEC_COUNT), 1090 + SOC15_REG_FIELD(MMEA4_EDC_CNT, WRET_TAGMEM_DED_COUNT), 1091 + }, 1092 + { "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), 1093 + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 1094 + 0, 0, 1095 + }, 1096 + { "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), 1097 + SOC15_REG_FIELD(MMEA4_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 1098 + 0, 0, 1099 + }, 1100 + { "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), 1101 + SOC15_REG_FIELD(MMEA4_EDC_CNT, IORD_CMDMEM_SED_COUNT), 1102 + 0, 0, 1103 + }, 1104 + { "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), 1105 + SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 1106 + 0, 0, 1107 + }, 1108 + { "MMEA4_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), 1109 + SOC15_REG_FIELD(MMEA4_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 1110 + 0, 0, 1111 + }, 1112 + { "MMEA4_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), 1113 + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), 1114 + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), 1115 + }, 1116 + { "MMEA4_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), 1117 + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), 1118 + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), 1119 + }, 1120 + { "MMEA4_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), 1121 + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), 1122 + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), 1123 + }, 1124 + { "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), 1125 + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 1126 + 0, 0, 1127 + }, 1128 + { "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), 1129 + SOC15_REG_FIELD(MMEA4_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 1130 + 0, 0, 1131 + }, 1132 + { "MMEA4_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), 1133 + 0, 0, 1134 + SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), 1135 + }, 1136 + { "MMEA4_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), 1137 + 0, 0, 1138 + SOC15_REG_FIELD(MMEA4_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), 1139 + }, 1140 + { "MMEA4_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), 1141 + 0, 0, 1142 + SOC15_REG_FIELD(MMEA4_EDC_CNT3, IORD_CMDMEM_DED_COUNT), 1143 + }, 1144 + { "MMEA4_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), 1145 + 0, 0, 1146 + SOC15_REG_FIELD(MMEA4_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), 1147 + }, 1148 + { "MMEA4_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), 1149 + 0, 0, 1150 + SOC15_REG_FIELD(MMEA4_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), 1151 + }, 1152 + { "MMEA4_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), 1153 + 0, 0, 1154 + SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), 1155 + }, 1156 + { "MMEA4_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), 1157 + 0, 0, 1158 + SOC15_REG_FIELD(MMEA4_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), 1159 + }, 1160 + { "MMEA4_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), 1161 + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_SED_COUNT), 1162 + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D0MEM_DED_COUNT), 1163 + }, 1164 + { "MMEA4_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), 1165 + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_SED_COUNT), 1166 + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D1MEM_DED_COUNT), 1167 + }, 1168 + { "MMEA4_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), 1169 + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_SED_COUNT), 1170 + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D2MEM_DED_COUNT), 1171 + }, 1172 + { "MMEA4_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), 1173 + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_SED_COUNT), 1174 + SOC15_REG_FIELD(MMEA4_EDC_CNT2, MAM_D3MEM_DED_COUNT), 1175 + }, 1176 + 1177 + /* MMHUAB Range 5 */ 1178 + { "MMEA5_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), 1179 + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), 1180 + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), 1181 + }, 1182 + { "MMEA5_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), 1183 + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), 1184 + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), 1185 + }, 1186 + { "MMEA5_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), 1187 + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), 1188 + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), 1189 + }, 1190 + { "MMEA5_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), 1191 + SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_SEC_COUNT), 1192 + SOC15_REG_FIELD(MMEA5_EDC_CNT, RRET_TAGMEM_DED_COUNT), 1193 + }, 1194 + { "MMEA5_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), 1195 + SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_SEC_COUNT), 1196 + SOC15_REG_FIELD(MMEA5_EDC_CNT, WRET_TAGMEM_DED_COUNT), 1197 + }, 1198 + { "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), 1199 + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 1200 + 0, 0, 1201 + }, 1202 + { "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), 1203 + SOC15_REG_FIELD(MMEA5_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 1204 + 0, 0, 1205 + }, 1206 + { "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), 1207 + SOC15_REG_FIELD(MMEA5_EDC_CNT, IORD_CMDMEM_SED_COUNT), 1208 + 0, 0, 1209 + }, 1210 + { "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), 1211 + SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 1212 + 0, 0, 1213 + }, 1214 + { "MMEA5_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), 1215 + SOC15_REG_FIELD(MMEA5_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 1216 + 0, 0, 1217 + }, 1218 + { "MMEA5_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), 1219 + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), 1220 + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), 1221 + }, 1222 + { "MMEA5_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), 1223 + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), 1224 + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), 1225 + }, 1226 + { "MMEA5_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), 1227 + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), 1228 + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), 1229 + }, 1230 + { "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), 1231 + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 1232 + 0, 0, 1233 + }, 1234 + { "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), 1235 + SOC15_REG_FIELD(MMEA5_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 1236 + 0, 0, 1237 + }, 1238 + { "MMEA5_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), 1239 + 0, 0, 1240 + SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), 1241 + }, 1242 + { "MMEA5_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), 1243 + 0, 0, 1244 + SOC15_REG_FIELD(MMEA5_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), 1245 + }, 1246 + { "MMEA5_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), 1247 + 0, 0, 1248 + SOC15_REG_FIELD(MMEA5_EDC_CNT3, IORD_CMDMEM_DED_COUNT), 1249 + }, 1250 + { "MMEA5_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), 1251 + 0, 0, 1252 + SOC15_REG_FIELD(MMEA5_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), 1253 + }, 1254 + { "MMEA5_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), 1255 + 0, 0, 1256 + SOC15_REG_FIELD(MMEA5_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), 1257 + }, 1258 + { "MMEA5_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), 1259 + 0, 0, 1260 + SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), 1261 + }, 1262 + { "MMEA5_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), 1263 + 0, 0, 1264 + SOC15_REG_FIELD(MMEA5_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), 1265 + }, 1266 + { "MMEA5_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), 1267 + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_SED_COUNT), 1268 + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D0MEM_DED_COUNT), 1269 + }, 1270 + { "MMEA5_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), 1271 + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_SED_COUNT), 1272 + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D1MEM_DED_COUNT), 1273 + }, 1274 + { "MMEA5_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), 1275 + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_SED_COUNT), 1276 + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D2MEM_DED_COUNT), 1277 + }, 1278 + { "MMEA5_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), 1279 + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_SED_COUNT), 1280 + SOC15_REG_FIELD(MMEA5_EDC_CNT2, MAM_D3MEM_DED_COUNT), 1281 + }, 1282 + 1283 + /* MMHUB Range 6 */ 1284 + { "MMEA6_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), 1285 + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), 1286 + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), 1287 + }, 1288 + { "MMEA6_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), 1289 + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), 1290 + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), 1291 + }, 1292 + { "MMEA6_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), 1293 + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), 1294 + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), 1295 + }, 1296 + { "MMEA6_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), 1297 + SOC15_REG_FIELD(MMEA6_EDC_CNT, RRET_TAGMEM_SEC_COUNT), 1298 + SOC15_REG_FIELD(MMEA6_EDC_CNT, RRET_TAGMEM_DED_COUNT), 1299 + }, 1300 + { "MMEA6_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), 1301 + SOC15_REG_FIELD(MMEA6_EDC_CNT, WRET_TAGMEM_SEC_COUNT), 1302 + SOC15_REG_FIELD(MMEA6_EDC_CNT, WRET_TAGMEM_DED_COUNT), 1303 + }, 1304 + { "MMEA6_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), 1305 + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 1306 + 0, 0, 1307 + }, 1308 + { "MMEA6_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), 1309 + SOC15_REG_FIELD(MMEA6_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 1310 + 0, 0, 1311 + }, 1312 + { "MMEA6_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), 1313 + SOC15_REG_FIELD(MMEA6_EDC_CNT, IORD_CMDMEM_SED_COUNT), 1314 + 0, 0, 1315 + }, 1316 + { "MMEA6_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), 1317 + SOC15_REG_FIELD(MMEA6_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 1318 + 0, 0, 1319 + }, 1320 + { "MMEA6_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), 1321 + SOC15_REG_FIELD(MMEA6_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 1322 + 0, 0, 1323 + }, 1324 + { "MMEA6_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), 1325 + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), 1326 + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), 1327 + }, 1328 + { "MMEA6_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), 1329 + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), 1330 + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), 1331 + }, 1332 + { "MMEA6_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), 1333 + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), 1334 + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), 1335 + }, 1336 + { "MMEA6_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), 1337 + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 1338 + 0, 0, 1339 + }, 1340 + { "MMEA6_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), 1341 + SOC15_REG_FIELD(MMEA6_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 1342 + 0, 0, 1343 + }, 1344 + { "MMEA6_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), 1345 + 0, 0, 1346 + SOC15_REG_FIELD(MMEA6_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), 1347 + }, 1348 + { "MMEA6_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), 1349 + 0, 0, 1350 + SOC15_REG_FIELD(MMEA6_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), 1351 + }, 1352 + { "MMEA6_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), 1353 + 0, 0, 1354 + SOC15_REG_FIELD(MMEA6_EDC_CNT3, IORD_CMDMEM_DED_COUNT), 1355 + }, 1356 + { "MMEA6_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), 1357 + 0, 0, 1358 + SOC15_REG_FIELD(MMEA6_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), 1359 + }, 1360 + { "MMEA6_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), 1361 + 0, 0, 1362 + SOC15_REG_FIELD(MMEA6_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), 1363 + }, 1364 + { "MMEA6_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), 1365 + 0, 0, 1366 + SOC15_REG_FIELD(MMEA6_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), 1367 + }, 1368 + { "MMEA6_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), 1369 + 0, 0, 1370 + SOC15_REG_FIELD(MMEA6_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), 1371 + }, 1372 + { "MMEA6_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), 1373 + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D0MEM_SED_COUNT), 1374 + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D0MEM_DED_COUNT), 1375 + }, 1376 + { "MMEA6_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), 1377 + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D1MEM_SED_COUNT), 1378 + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D1MEM_DED_COUNT), 1379 + }, 1380 + { "MMEA6_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), 1381 + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D2MEM_SED_COUNT), 1382 + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D2MEM_DED_COUNT), 1383 + }, 1384 + { "MMEA6_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), 1385 + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D3MEM_SED_COUNT), 1386 + SOC15_REG_FIELD(MMEA6_EDC_CNT2, MAM_D3MEM_DED_COUNT), 1387 + }, 1388 + 1389 + /* MMHUB Range 7*/ 1390 + { "MMEA7_DRAMRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), 1391 + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), 1392 + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT), 1393 + }, 1394 + { "MMEA7_DRAMWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), 1395 + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), 1396 + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT), 1397 + }, 1398 + { "MMEA7_DRAMWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), 1399 + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), 1400 + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT), 1401 + }, 1402 + { "MMEA7_RRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), 1403 + SOC15_REG_FIELD(MMEA7_EDC_CNT, RRET_TAGMEM_SEC_COUNT), 1404 + SOC15_REG_FIELD(MMEA7_EDC_CNT, RRET_TAGMEM_DED_COUNT), 1405 + }, 1406 + { "MMEA7_WRET_TAGMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), 1407 + SOC15_REG_FIELD(MMEA7_EDC_CNT, WRET_TAGMEM_SEC_COUNT), 1408 + SOC15_REG_FIELD(MMEA7_EDC_CNT, WRET_TAGMEM_DED_COUNT), 1409 + }, 1410 + { "MMEA7_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), 1411 + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 1412 + 0, 0, 1413 + }, 1414 + { "MMEA7_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), 1415 + SOC15_REG_FIELD(MMEA7_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 1416 + 0, 0, 1417 + }, 1418 + { "MMEA7_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), 1419 + SOC15_REG_FIELD(MMEA7_EDC_CNT, IORD_CMDMEM_SED_COUNT), 1420 + 0, 0, 1421 + }, 1422 + { "MMEA7_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), 1423 + SOC15_REG_FIELD(MMEA7_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 1424 + 0, 0, 1425 + }, 1426 + { "MMEA7_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), 1427 + SOC15_REG_FIELD(MMEA7_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 1428 + 0, 0, 1429 + }, 1430 + { "MMEA7_GMIRD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), 1431 + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), 1432 + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT), 1433 + }, 1434 + { "MMEA7_GMIWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), 1435 + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), 1436 + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT), 1437 + }, 1438 + { "MMEA7_GMIWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), 1439 + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), 1440 + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT), 1441 + }, 1442 + { "MMEA7_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), 1443 + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 1444 + 0, 0, 1445 + }, 1446 + { "MMEA7_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), 1447 + SOC15_REG_FIELD(MMEA7_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 1448 + 0, 0, 1449 + }, 1450 + { "MMEA7_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), 1451 + 0, 0, 1452 + SOC15_REG_FIELD(MMEA7_EDC_CNT3, DRAMRD_PAGEMEM_DED_COUNT), 1453 + }, 1454 + { "MMEA7_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), 1455 + 0, 0, 1456 + SOC15_REG_FIELD(MMEA7_EDC_CNT3, DRAMWR_PAGEMEM_DED_COUNT), 1457 + }, 1458 + { "MMEA7_IORD_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), 1459 + 0, 0, 1460 + SOC15_REG_FIELD(MMEA7_EDC_CNT3, IORD_CMDMEM_DED_COUNT), 1461 + }, 1462 + { "MMEA7_IOWR_CMDMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), 1463 + 0, 0, 1464 + SOC15_REG_FIELD(MMEA7_EDC_CNT3, IOWR_CMDMEM_DED_COUNT), 1465 + }, 1466 + { "MMEA7_IOWR_DATAMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), 1467 + 0, 0, 1468 + SOC15_REG_FIELD(MMEA7_EDC_CNT3, IOWR_DATAMEM_DED_COUNT), 1469 + }, 1470 + { "MMEA7_GMIRD_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), 1471 + 0, 0, 1472 + SOC15_REG_FIELD(MMEA7_EDC_CNT3, GMIRD_PAGEMEM_DED_COUNT), 1473 + }, 1474 + { "MMEA7_GMIWR_PAGEMEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), 1475 + 0, 0, 1476 + SOC15_REG_FIELD(MMEA7_EDC_CNT3, GMIWR_PAGEMEM_DED_COUNT), 1477 + }, 1478 + { "MMEA7_MAM_D0MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), 1479 + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D0MEM_SED_COUNT), 1480 + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D0MEM_DED_COUNT), 1481 + }, 1482 + { "MMEA7_MAM_D1MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), 1483 + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D1MEM_SED_COUNT), 1484 + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D1MEM_DED_COUNT), 1485 + }, 1486 + { "MMEA7_MAM_D2MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), 1487 + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D2MEM_SED_COUNT), 1488 + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D2MEM_DED_COUNT), 1489 + }, 1490 + { "MMEA7_MAM_D3MEM", SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), 1491 + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D3MEM_SED_COUNT), 1492 + SOC15_REG_FIELD(MMEA7_EDC_CNT2, MAM_D3MEM_DED_COUNT), 860 1493 } 861 1494 }; 862 1495 863 1496 static const struct soc15_reg_entry mmhub_v9_4_edc_cnt_regs[] = { 864 - { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 0, 0, 0}, 865 - { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 0, 0, 0}, 866 - { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), 0, 0, 0}, 867 - { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 0, 0, 0}, 868 - { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 0, 0, 0}, 869 - { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, 0}, 1497 + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT), 0, 0, 0 }, 1498 + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT2), 0, 0, 0 }, 1499 + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA0_EDC_CNT3), 0, 0, 0 }, 1500 + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT), 0, 0, 0 }, 1501 + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT2), 0, 0, 0 }, 1502 + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA1_EDC_CNT3), 0, 0, 0 }, 1503 + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT), 0, 0, 0 }, 1504 + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT2), 0, 0, 0 }, 1505 + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA2_EDC_CNT3), 0, 0, 0 }, 1506 + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT), 0, 0, 0 }, 1507 + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT2), 0, 0, 0 }, 1508 + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA3_EDC_CNT3), 0, 0, 0 }, 1509 + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT), 0, 0, 0 }, 1510 + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT2), 0, 0, 0 }, 1511 + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA4_EDC_CNT3), 0, 0, 0 }, 1512 + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT), 0, 0, 0 }, 1513 + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT2), 0, 0, 0 }, 1514 + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA5_EDC_CNT3), 0, 0, 0 }, 1515 + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT), 0, 0, 0 }, 1516 + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT2), 0, 0, 0 }, 1517 + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA6_EDC_CNT3), 0, 0, 0 }, 1518 + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT), 0, 0, 0 }, 1519 + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT2), 0, 0, 0 }, 1520 + { SOC15_REG_ENTRY(MMHUB, 0, mmMMEA7_EDC_CNT3), 0, 0, 0 }, 870 1521 }; 871 1522 872 1523 static int mmhub_v9_4_get_ras_error_count(const struct soc15_reg_entry *reg,
+1 -1
drivers/gpu/drm/amd/amdgpu/navi10_ih.c
··· 426 426 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 427 427 428 428 navi10_ih_update_clockgating_state(adev, 429 - state == AMD_CG_STATE_GATE ? true : false); 429 + state == AMD_CG_STATE_GATE); 430 430 return 0; 431 431 } 432 432
+4 -4
drivers/gpu/drm/amd/amdgpu/nv.c
··· 950 950 case CHIP_NAVI14: 951 951 case CHIP_NAVI12: 952 952 adev->nbio.funcs->update_medium_grain_clock_gating(adev, 953 - state == AMD_CG_STATE_GATE ? true : false); 953 + state == AMD_CG_STATE_GATE); 954 954 adev->nbio.funcs->update_medium_grain_light_sleep(adev, 955 - state == AMD_CG_STATE_GATE ? true : false); 955 + state == AMD_CG_STATE_GATE); 956 956 nv_update_hdp_mem_power_gating(adev, 957 - state == AMD_CG_STATE_GATE ? true : false); 957 + state == AMD_CG_STATE_GATE); 958 958 nv_update_hdp_clock_gating(adev, 959 - state == AMD_CG_STATE_GATE ? true : false); 959 + state == AMD_CG_STATE_GATE); 960 960 break; 961 961 default: 962 962 break;
+34 -3
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
··· 22 22 23 23 #include <linux/firmware.h> 24 24 #include <linux/module.h> 25 + #include <linux/vmalloc.h> 25 26 26 27 #include "amdgpu.h" 27 28 #include "amdgpu_psp.h" ··· 972 971 */ 973 972 static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) 974 973 { 975 - int ret; 976 - uint32_t p2c_header[4]; 977 974 struct psp_memory_training_context *ctx = &psp->mem_train_ctx; 978 975 uint32_t *pcache = (uint32_t*)ctx->sys_cache; 976 + struct amdgpu_device *adev = psp->adev; 977 + uint32_t p2c_header[4]; 978 + uint32_t sz; 979 + void *buf; 980 + int ret; 979 981 980 982 if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) { 981 983 DRM_DEBUG("Memory training is not supported.\n"); ··· 993 989 return 0; 994 990 } 995 991 996 - amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false); 992 + amdgpu_device_vram_access(adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false); 997 993 DRM_DEBUG("sys_cache[%08x,%08x,%08x,%08x] p2c_header[%08x,%08x,%08x,%08x]\n", 998 994 pcache[0], pcache[1], pcache[2], pcache[3], 999 995 p2c_header[0], p2c_header[1], p2c_header[2], p2c_header[3]); ··· 1030 1026 DRM_DEBUG("Memory training ops:%x.\n", ops); 1031 1027 1032 1028 if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) { 1029 + /* 1030 + * Long traing will encroach certain mount of bottom VRAM, 1031 + * saving the content of this bottom VRAM to system memory 1032 + * before training, and restoring it after training to avoid 1033 + * VRAM corruption. 1034 + */ 1035 + sz = GDDR6_MEM_TRAINING_ENCROACHED_SIZE; 1036 + 1037 + if (adev->gmc.visible_vram_size < sz || !adev->mman.aper_base_kaddr) { 1038 + DRM_ERROR("visible_vram_size %llx or aper_base_kaddr %p is not initialized.\n", 1039 + adev->gmc.visible_vram_size, 1040 + adev->mman.aper_base_kaddr); 1041 + return -EINVAL; 1042 + } 1043 + 1044 + buf = vmalloc(sz); 1045 + if (!buf) { 1046 + DRM_ERROR("failed to allocate system memory.\n"); 1047 + return -ENOMEM; 1048 + } 1049 + 1050 + memcpy_fromio(buf, adev->mman.aper_base_kaddr, sz); 1033 1051 ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN); 1034 1052 if (ret) { 1035 1053 DRM_ERROR("Send long training msg failed.\n"); 1054 + vfree(buf); 1036 1055 return ret; 1037 1056 } 1057 + 1058 + memcpy_toio(adev->mman.aper_base_kaddr, buf, sz); 1059 + adev->nbio.funcs->hdp_flush(adev, NULL); 1060 + vfree(buf); 1038 1061 } 1039 1062 1040 1063 if (ops & PSP_MEM_TRAIN_SAVE) {
+2 -2
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
··· 2176 2176 case CHIP_ARCTURUS: 2177 2177 case CHIP_RENOIR: 2178 2178 sdma_v4_0_update_medium_grain_clock_gating(adev, 2179 - state == AMD_CG_STATE_GATE ? true : false); 2179 + state == AMD_CG_STATE_GATE); 2180 2180 sdma_v4_0_update_medium_grain_light_sleep(adev, 2181 - state == AMD_CG_STATE_GATE ? true : false); 2181 + state == AMD_CG_STATE_GATE); 2182 2182 break; 2183 2183 default: 2184 2184 break;
+2 -2
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
··· 1525 1525 case CHIP_NAVI14: 1526 1526 case CHIP_NAVI12: 1527 1527 sdma_v5_0_update_medium_grain_clock_gating(adev, 1528 - state == AMD_CG_STATE_GATE ? true : false); 1528 + state == AMD_CG_STATE_GATE); 1529 1529 sdma_v5_0_update_medium_grain_light_sleep(adev, 1530 - state == AMD_CG_STATE_GATE ? true : false); 1530 + state == AMD_CG_STATE_GATE); 1531 1531 break; 1532 1532 default: 1533 1533 break;
+1 -1
drivers/gpu/drm/amd/amdgpu/si_dma.c
··· 648 648 bool enable; 649 649 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 650 650 651 - enable = (state == AMD_CG_STATE_GATE) ? true : false; 651 + enable = (state == AMD_CG_STATE_GATE); 652 652 653 653 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) { 654 654 for (i = 0; i < adev->sdma.num_instances; i++) {
+18 -14
drivers/gpu/drm/amd/amdgpu/soc15.c
··· 537 537 538 538 static int soc15_asic_reset(struct amdgpu_device *adev) 539 539 { 540 + /* original raven doesn't have full asic reset */ 541 + if (adev->pdev->device == 0x15dd && adev->rev_id < 0x8) 542 + return 0; 543 + 540 544 switch (soc15_asic_reset_method(adev)) { 541 545 case AMD_RESET_METHOD_BACO: 542 546 if (!adev->in_suspend) ··· 1471 1467 case CHIP_VEGA12: 1472 1468 case CHIP_VEGA20: 1473 1469 adev->nbio.funcs->update_medium_grain_clock_gating(adev, 1474 - state == AMD_CG_STATE_GATE ? true : false); 1470 + state == AMD_CG_STATE_GATE); 1475 1471 adev->nbio.funcs->update_medium_grain_light_sleep(adev, 1476 - state == AMD_CG_STATE_GATE ? true : false); 1472 + state == AMD_CG_STATE_GATE); 1477 1473 soc15_update_hdp_light_sleep(adev, 1478 - state == AMD_CG_STATE_GATE ? true : false); 1474 + state == AMD_CG_STATE_GATE); 1479 1475 soc15_update_drm_clock_gating(adev, 1480 - state == AMD_CG_STATE_GATE ? true : false); 1476 + state == AMD_CG_STATE_GATE); 1481 1477 soc15_update_drm_light_sleep(adev, 1482 - state == AMD_CG_STATE_GATE ? true : false); 1478 + state == AMD_CG_STATE_GATE); 1483 1479 soc15_update_rom_medium_grain_clock_gating(adev, 1484 - state == AMD_CG_STATE_GATE ? true : false); 1480 + state == AMD_CG_STATE_GATE); 1485 1481 adev->df.funcs->update_medium_grain_clock_gating(adev, 1486 - state == AMD_CG_STATE_GATE ? true : false); 1482 + state == AMD_CG_STATE_GATE); 1487 1483 break; 1488 1484 case CHIP_RAVEN: 1489 1485 case CHIP_RENOIR: 1490 1486 adev->nbio.funcs->update_medium_grain_clock_gating(adev, 1491 - state == AMD_CG_STATE_GATE ? true : false); 1487 + state == AMD_CG_STATE_GATE); 1492 1488 adev->nbio.funcs->update_medium_grain_light_sleep(adev, 1493 - state == AMD_CG_STATE_GATE ? true : false); 1489 + state == AMD_CG_STATE_GATE); 1494 1490 soc15_update_hdp_light_sleep(adev, 1495 - state == AMD_CG_STATE_GATE ? true : false); 1491 + state == AMD_CG_STATE_GATE); 1496 1492 soc15_update_drm_clock_gating(adev, 1497 - state == AMD_CG_STATE_GATE ? true : false); 1493 + state == AMD_CG_STATE_GATE); 1498 1494 soc15_update_drm_light_sleep(adev, 1499 - state == AMD_CG_STATE_GATE ? true : false); 1495 + state == AMD_CG_STATE_GATE); 1500 1496 soc15_update_rom_medium_grain_clock_gating(adev, 1501 - state == AMD_CG_STATE_GATE ? true : false); 1497 + state == AMD_CG_STATE_GATE); 1502 1498 break; 1503 1499 case CHIP_ARCTURUS: 1504 1500 soc15_update_hdp_light_sleep(adev, 1505 - state == AMD_CG_STATE_GATE ? true : false); 1501 + state == AMD_CG_STATE_GATE); 1506 1502 break; 1507 1503 default: 1508 1504 break;
+1 -1
drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
··· 763 763 enum amd_clockgating_state state) 764 764 { 765 765 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 766 - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 766 + bool enable = (state == AMD_CG_STATE_GATE); 767 767 768 768 if (enable) { 769 769 /* wait for STATUS to clear */
+1 -1
drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
··· 1421 1421 enum amd_clockgating_state state) 1422 1422 { 1423 1423 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1424 - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 1424 + bool enable = (state == AMD_CG_STATE_GATE); 1425 1425 1426 1426 if (enable) { 1427 1427 /* wait for STATUS to clear */
+1 -1
drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
··· 739 739 enum amd_clockgating_state state) 740 740 { 741 741 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 742 - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 742 + bool enable = (state == AMD_CG_STATE_GATE); 743 743 int i; 744 744 745 745 if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
+1 -1
drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
··· 887 887 enum amd_clockgating_state state) 888 888 { 889 889 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 890 - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 890 + bool enable = (state == AMD_CG_STATE_GATE); 891 891 int i; 892 892 893 893 if ((adev->asic_type == CHIP_POLARIS10) ||
+1 -1
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
··· 1346 1346 enum amd_clockgating_state state) 1347 1347 { 1348 1348 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1349 - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 1349 + bool enable = (state == AMD_CG_STATE_GATE); 1350 1350 1351 1351 if (enable) { 1352 1352 /* wait for STATUS to clear */
+2 -2
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
··· 1213 1213 enum amd_clockgating_state state) 1214 1214 { 1215 1215 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1216 - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 1216 + bool enable = (state == AMD_CG_STATE_GATE); 1217 1217 1218 1218 if (enable) { 1219 1219 /* wait for STATUS to clear */ ··· 1624 1624 return 0; 1625 1625 } 1626 1626 1627 - static int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring) 1627 + int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring) 1628 1628 { 1629 1629 struct amdgpu_device *adev = ring->adev; 1630 1630 uint32_t tmp = 0;
+1
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.h
··· 37 37 unsigned vmid, uint64_t pd_addr); 38 38 extern void vcn_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, 39 39 uint32_t reg, uint32_t val); 40 + extern int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring); 40 41 41 42 extern void vcn_v2_0_enc_ring_insert_end(struct amdgpu_ring *ring); 42 43 extern void vcn_v2_0_enc_ring_emit_fence(struct amdgpu_ring *ring, u64 addr,
+54 -51
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
··· 435 435 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 436 436 if (!indirect) { 437 437 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 438 - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 438 + UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 439 439 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_lo), 0, indirect); 440 440 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 441 - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 441 + UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 442 442 (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst_idx].tmr_mc_addr_hi), 0, indirect); 443 443 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 444 - UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); 444 + UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); 445 445 } else { 446 446 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 447 - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); 447 + UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); 448 448 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 449 - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); 449 + UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); 450 450 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 451 - UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); 451 + UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); 452 452 } 453 453 offset = 0; 454 454 } else { 455 455 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 456 - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 456 + UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 457 457 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); 458 458 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 459 - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 459 + UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 460 460 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); 461 461 offset = size; 462 462 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 463 - UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET0), 463 + UVD, 0, mmUVD_VCPU_CACHE_OFFSET0), 464 464 AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect); 465 465 } 466 466 467 467 if (!indirect) 468 468 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 469 - UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect); 469 + UVD, 0, mmUVD_VCPU_CACHE_SIZE0), size, 0, indirect); 470 470 else 471 471 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 472 - UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); 472 + UVD, 0, mmUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); 473 473 474 474 /* cache window 1: stack */ 475 475 if (!indirect) { 476 476 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 477 - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 477 + UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 478 478 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); 479 479 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 480 - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 480 + UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 481 481 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); 482 482 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 483 - UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); 483 + UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); 484 484 } else { 485 485 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 486 - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); 486 + UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); 487 487 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 488 - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); 488 + UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); 489 489 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 490 - UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); 490 + UVD, 0, mmUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); 491 491 } 492 492 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 493 - UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); 493 + UVD, 0, mmUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); 494 494 495 495 /* cache window 2: context */ 496 496 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 497 - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), 497 + UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), 498 498 lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); 499 499 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 500 - UVD, inst_idx, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), 500 + UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), 501 501 upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE), 0, indirect); 502 502 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 503 - UVD, inst_idx, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); 503 + UVD, 0, mmUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); 504 504 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 505 - UVD, inst_idx, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); 505 + UVD, 0, mmUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); 506 506 507 507 /* non-cache window */ 508 508 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 509 - UVD, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect); 509 + UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), 0, 0, indirect); 510 510 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 511 - UVD, inst_idx, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect); 511 + UVD, 0, mmUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), 0, 0, indirect); 512 512 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 513 - UVD, inst_idx, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); 513 + UVD, 0, mmUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); 514 514 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 515 - UVD, inst_idx, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect); 515 + UVD, 0, mmUVD_VCPU_NONCACHE_SIZE0), 0, 0, indirect); 516 516 517 517 /* VCN global tiling registers */ 518 518 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 519 - UVD, inst_idx, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); 519 + UVD, 0, mmUVD_GFX8_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); 520 520 } 521 521 522 522 /** ··· 670 670 UVD_CGC_CTRL__VCPU_MODE_MASK | 671 671 UVD_CGC_CTRL__MMSCH_MODE_MASK); 672 672 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 673 - UVD, inst_idx, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect); 673 + UVD, 0, mmUVD_CGC_CTRL), reg_data, sram_sel, indirect); 674 674 675 675 /* turn off clock gating */ 676 676 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 677 - UVD, inst_idx, mmUVD_CGC_GATE), 0, sram_sel, indirect); 677 + UVD, 0, mmUVD_CGC_GATE), 0, sram_sel, indirect); 678 678 679 679 /* turn on SUVD clock gating */ 680 680 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 681 - UVD, inst_idx, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect); 681 + UVD, 0, mmUVD_SUVD_CGC_GATE), 1, sram_sel, indirect); 682 682 683 683 /* turn on sw mode in UVD_SUVD_CGC_CTRL */ 684 684 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 685 - UVD, inst_idx, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect); 685 + UVD, 0, mmUVD_SUVD_CGC_CTRL), 0, sram_sel, indirect); 686 686 } 687 687 688 688 /** ··· 772 772 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; 773 773 tmp |= UVD_VCPU_CNTL__BLK_RST_MASK; 774 774 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 775 - UVD, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect); 775 + UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect); 776 776 777 777 /* disable master interupt */ 778 778 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 779 - UVD, inst_idx, mmUVD_MASTINT_EN), 0, 0, indirect); 779 + UVD, 0, mmUVD_MASTINT_EN), 0, 0, indirect); 780 780 781 781 /* setup mmUVD_LMI_CTRL */ 782 782 tmp = (0x8 | UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | ··· 788 788 (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | 789 789 0x00100000L); 790 790 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 791 - UVD, inst_idx, mmUVD_LMI_CTRL), tmp, 0, indirect); 791 + UVD, 0, mmUVD_LMI_CTRL), tmp, 0, indirect); 792 792 793 793 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 794 - UVD, inst_idx, mmUVD_MPC_CNTL), 794 + UVD, 0, mmUVD_MPC_CNTL), 795 795 0x2 << UVD_MPC_CNTL__REPLACEMENT_MODE__SHIFT, 0, indirect); 796 796 797 797 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 798 - UVD, inst_idx, mmUVD_MPC_SET_MUXA0), 798 + UVD, 0, mmUVD_MPC_SET_MUXA0), 799 799 ((0x1 << UVD_MPC_SET_MUXA0__VARA_1__SHIFT) | 800 800 (0x2 << UVD_MPC_SET_MUXA0__VARA_2__SHIFT) | 801 801 (0x3 << UVD_MPC_SET_MUXA0__VARA_3__SHIFT) | 802 802 (0x4 << UVD_MPC_SET_MUXA0__VARA_4__SHIFT)), 0, indirect); 803 803 804 804 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 805 - UVD, inst_idx, mmUVD_MPC_SET_MUXB0), 805 + UVD, 0, mmUVD_MPC_SET_MUXB0), 806 806 ((0x1 << UVD_MPC_SET_MUXB0__VARB_1__SHIFT) | 807 807 (0x2 << UVD_MPC_SET_MUXB0__VARB_2__SHIFT) | 808 808 (0x3 << UVD_MPC_SET_MUXB0__VARB_3__SHIFT) | 809 809 (0x4 << UVD_MPC_SET_MUXB0__VARB_4__SHIFT)), 0, indirect); 810 810 811 811 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 812 - UVD, inst_idx, mmUVD_MPC_SET_MUX), 812 + UVD, 0, mmUVD_MPC_SET_MUX), 813 813 ((0x0 << UVD_MPC_SET_MUX__SET_0__SHIFT) | 814 814 (0x1 << UVD_MPC_SET_MUX__SET_1__SHIFT) | 815 815 (0x2 << UVD_MPC_SET_MUX__SET_2__SHIFT)), 0, indirect); ··· 817 817 vcn_v2_5_mc_resume_dpg_mode(adev, inst_idx, indirect); 818 818 819 819 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 820 - UVD, inst_idx, mmUVD_REG_XX_MASK), 0x10, 0, indirect); 820 + UVD, 0, mmUVD_REG_XX_MASK), 0x10, 0, indirect); 821 821 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 822 - UVD, inst_idx, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect); 822 + UVD, 0, mmUVD_RBC_XX_IB_REG_CHECK), 0x3, 0, indirect); 823 823 824 824 /* enable LMI MC and UMC channels */ 825 825 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 826 - UVD, inst_idx, mmUVD_LMI_CTRL2), 0, 0, indirect); 826 + UVD, 0, mmUVD_LMI_CTRL2), 0, 0, indirect); 827 827 828 828 /* unblock VCPU register access */ 829 829 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 830 - UVD, inst_idx, mmUVD_RB_ARB_CTRL), 0, 0, indirect); 830 + UVD, 0, mmUVD_RB_ARB_CTRL), 0, 0, indirect); 831 831 832 832 tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); 833 833 tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; 834 834 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 835 - UVD, inst_idx, mmUVD_VCPU_CNTL), tmp, 0, indirect); 835 + UVD, 0, mmUVD_VCPU_CNTL), tmp, 0, indirect); 836 836 837 837 /* enable master interrupt */ 838 838 WREG32_SOC15_DPG_MODE_2_0(inst_idx, SOC15_DPG_MODE_OFFSET_2_0( 839 - UVD, inst_idx, mmUVD_MASTINT_EN), 839 + UVD, 0, mmUVD_MASTINT_EN), 840 840 UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); 841 841 842 842 if (indirect) ··· 891 891 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 892 892 if (adev->vcn.harvest_config & (1 << i)) 893 893 continue; 894 - if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) 895 - return vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram); 894 + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 895 + r = vcn_v2_5_start_dpg_mode(adev, i, adev->vcn.indirect_sram); 896 + continue; 897 + } 896 898 897 899 /* disable register anti-hang mechanism */ 898 900 WREG32_P(SOC15_REG_OFFSET(UVD, i, mmUVD_POWER_STATUS), 0, ··· 904 902 tmp = RREG32_SOC15(UVD, i, mmUVD_STATUS) | UVD_STATUS__UVD_BUSY; 905 903 WREG32_SOC15(UVD, i, mmUVD_STATUS, tmp); 906 904 } 905 + 906 + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) 907 + return 0; 907 908 908 909 /*SW clock gating */ 909 910 vcn_v2_5_disable_clock_gating(adev); ··· 1299 1294 for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 1300 1295 if (adev->vcn.harvest_config & (1 << i)) 1301 1296 continue; 1302 - 1303 1297 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 1304 1298 r = vcn_v2_5_stop_dpg_mode(adev, i); 1305 - goto power_off; 1299 + continue; 1306 1300 } 1307 1301 1308 1302 /* wait for vcn idle */ ··· 1353 1349 ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1354 1350 } 1355 1351 1356 - power_off: 1357 1352 if (adev->pm.dpm_enabled) 1358 1353 amdgpu_dpm_enable_uvd(adev, false); 1359 1354 ··· 1491 1488 .emit_ib = vcn_v2_0_dec_ring_emit_ib, 1492 1489 .emit_fence = vcn_v2_0_dec_ring_emit_fence, 1493 1490 .emit_vm_flush = vcn_v2_0_dec_ring_emit_vm_flush, 1494 - .test_ring = amdgpu_vcn_dec_ring_test_ring, 1491 + .test_ring = vcn_v2_0_dec_ring_test_ring, 1495 1492 .test_ib = amdgpu_vcn_dec_ring_test_ib, 1496 1493 .insert_nop = vcn_v2_0_dec_ring_insert_nop, 1497 1494 .insert_start = vcn_v2_0_dec_ring_insert_start, ··· 1666 1663 enum amd_clockgating_state state) 1667 1664 { 1668 1665 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1669 - bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 1666 + bool enable = (state == AMD_CG_STATE_GATE); 1670 1667 1671 1668 if (amdgpu_sriov_vf(adev)) 1672 1669 return 0;
+1 -1
drivers/gpu/drm/amd/amdgpu/vega10_ih.c
··· 717 717 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 718 718 719 719 vega10_ih_update_clockgating_state(adev, 720 - state == AMD_CG_STATE_GATE ? true : false); 720 + state == AMD_CG_STATE_GATE); 721 721 return 0; 722 722 723 723 }
+6 -4
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
··· 1237 1237 1238 1238 list_add(&q->list, &qpd->queues_list); 1239 1239 qpd->queue_count++; 1240 + 1241 + if (q->properties.type == KFD_QUEUE_TYPE_SDMA) 1242 + dqm->sdma_queue_count++; 1243 + else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) 1244 + dqm->xgmi_sdma_queue_count++; 1245 + 1240 1246 if (q->properties.is_active) { 1241 1247 dqm->queue_count++; 1242 1248 retval = execute_queues_cpsch(dqm, 1243 1249 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); 1244 1250 } 1245 1251 1246 - if (q->properties.type == KFD_QUEUE_TYPE_SDMA) 1247 - dqm->sdma_queue_count++; 1248 - else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) 1249 - dqm->xgmi_sdma_queue_count++; 1250 1252 /* 1251 1253 * Unconditionally increment this counter, regardless of the queue's 1252 1254 * type or whether the queue is active.
+46 -36
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 940 940 goto error; 941 941 } 942 942 943 - dc_hardware_init(adev->dm.dc); 944 - 945 943 r = dm_dmub_hw_init(adev); 946 944 if (r) { 947 945 DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 948 946 goto error; 949 947 } 948 + 949 + dc_hardware_init(adev->dm.dc); 950 950 951 951 adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); 952 952 if (!adev->dm.freesync_module) { ··· 7759 7759 struct drm_crtc_state *new_crtc_state, *old_crtc_state; 7760 7760 struct dm_crtc_state *new_dm_crtc_state, *old_dm_crtc_state; 7761 7761 struct dc_stream_status *status = NULL; 7762 - 7763 - struct dc_surface_update *updates; 7764 7762 enum surface_update_type update_type = UPDATE_TYPE_FAST; 7763 + struct surface_info_bundle { 7764 + struct dc_surface_update surface_updates[MAX_SURFACES]; 7765 + struct dc_plane_info plane_infos[MAX_SURFACES]; 7766 + struct dc_scaling_info scaling_infos[MAX_SURFACES]; 7767 + struct dc_flip_addrs flip_addrs[MAX_SURFACES]; 7768 + struct dc_stream_update stream_update; 7769 + } *bundle; 7765 7770 7766 - updates = kcalloc(MAX_SURFACES, sizeof(*updates), GFP_KERNEL); 7771 + bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); 7767 7772 7768 - if (!updates) { 7769 - DRM_ERROR("Failed to allocate plane updates\n"); 7773 + if (!bundle) { 7774 + DRM_ERROR("Failed to allocate update bundle\n"); 7770 7775 /* Set type to FULL to avoid crashing in DC*/ 7771 7776 update_type = UPDATE_TYPE_FULL; 7772 7777 goto cleanup; 7773 7778 } 7774 7779 7775 7780 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 7776 - struct dc_scaling_info scaling_info; 7777 - struct dc_stream_update stream_update; 7778 7781 7779 - memset(&stream_update, 0, sizeof(stream_update)); 7782 + memset(bundle, 0, sizeof(struct surface_info_bundle)); 7780 7783 7781 7784 new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); 7782 7785 old_dm_crtc_state = to_dm_crtc_state(old_crtc_state); ··· 7796 7793 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, j) { 7797 7794 const struct amdgpu_framebuffer *amdgpu_fb = 7798 7795 to_amdgpu_framebuffer(new_plane_state->fb); 7799 - struct dc_plane_info plane_info; 7800 - struct dc_flip_addrs flip_addr; 7796 + struct dc_plane_info *plane_info = &bundle->plane_infos[num_plane]; 7797 + struct dc_flip_addrs *flip_addr = &bundle->flip_addrs[num_plane]; 7798 + struct dc_scaling_info *scaling_info = &bundle->scaling_infos[num_plane]; 7801 7799 uint64_t tiling_flags; 7802 7800 7803 7801 new_plane_crtc = new_plane_state->crtc; ··· 7816 7812 if (crtc != new_plane_crtc) 7817 7813 continue; 7818 7814 7819 - updates[num_plane].surface = new_dm_plane_state->dc_state; 7815 + bundle->surface_updates[num_plane].surface = 7816 + new_dm_plane_state->dc_state; 7820 7817 7821 7818 if (new_crtc_state->mode_changed) { 7822 - stream_update.dst = new_dm_crtc_state->stream->dst; 7823 - stream_update.src = new_dm_crtc_state->stream->src; 7819 + bundle->stream_update.dst = new_dm_crtc_state->stream->dst; 7820 + bundle->stream_update.src = new_dm_crtc_state->stream->src; 7824 7821 } 7825 7822 7826 7823 if (new_crtc_state->color_mgmt_changed) { 7827 - updates[num_plane].gamma = 7824 + bundle->surface_updates[num_plane].gamma = 7828 7825 new_dm_plane_state->dc_state->gamma_correction; 7829 - updates[num_plane].in_transfer_func = 7826 + bundle->surface_updates[num_plane].in_transfer_func = 7830 7827 new_dm_plane_state->dc_state->in_transfer_func; 7831 - stream_update.gamut_remap = 7828 + bundle->stream_update.gamut_remap = 7832 7829 &new_dm_crtc_state->stream->gamut_remap_matrix; 7833 - stream_update.output_csc_transform = 7830 + bundle->stream_update.output_csc_transform = 7834 7831 &new_dm_crtc_state->stream->csc_color_matrix; 7835 - stream_update.out_transfer_func = 7832 + bundle->stream_update.out_transfer_func = 7836 7833 new_dm_crtc_state->stream->out_transfer_func; 7837 7834 } 7838 7835 7839 7836 ret = fill_dc_scaling_info(new_plane_state, 7840 - &scaling_info); 7837 + scaling_info); 7841 7838 if (ret) 7842 7839 goto cleanup; 7843 7840 7844 - updates[num_plane].scaling_info = &scaling_info; 7841 + bundle->surface_updates[num_plane].scaling_info = scaling_info; 7845 7842 7846 7843 if (amdgpu_fb) { 7847 7844 ret = get_fb_info(amdgpu_fb, &tiling_flags); 7848 7845 if (ret) 7849 7846 goto cleanup; 7850 7847 7851 - memset(&flip_addr, 0, sizeof(flip_addr)); 7852 - 7853 7848 ret = fill_dc_plane_info_and_addr( 7854 7849 dm->adev, new_plane_state, tiling_flags, 7855 - &plane_info, 7856 - &flip_addr.address); 7850 + plane_info, 7851 + &flip_addr->address); 7857 7852 if (ret) 7858 7853 goto cleanup; 7859 7854 7860 - updates[num_plane].plane_info = &plane_info; 7861 - updates[num_plane].flip_addr = &flip_addr; 7855 + bundle->surface_updates[num_plane].plane_info = plane_info; 7856 + bundle->surface_updates[num_plane].flip_addr = flip_addr; 7862 7857 } 7863 7858 7864 7859 num_plane++; ··· 7878 7875 7879 7876 status = dc_stream_get_status_from_state(old_dm_state->context, 7880 7877 new_dm_crtc_state->stream); 7881 - stream_update.stream = new_dm_crtc_state->stream; 7878 + bundle->stream_update.stream = new_dm_crtc_state->stream; 7882 7879 /* 7883 7880 * TODO: DC modifies the surface during this call so we need 7884 7881 * to lock here - find a way to do this without locking. 7885 7882 */ 7886 7883 mutex_lock(&dm->dc_lock); 7887 - update_type = dc_check_update_surfaces_for_stream(dc, updates, num_plane, 7888 - &stream_update, status); 7884 + update_type = dc_check_update_surfaces_for_stream( 7885 + dc, bundle->surface_updates, num_plane, 7886 + &bundle->stream_update, status); 7889 7887 mutex_unlock(&dm->dc_lock); 7890 7888 7891 7889 if (update_type > UPDATE_TYPE_MED) { ··· 7896 7892 } 7897 7893 7898 7894 cleanup: 7899 - kfree(updates); 7895 + kfree(bundle); 7900 7896 7901 7897 *out_type = update_type; 7902 7898 return ret; ··· 8167 8163 goto fail; 8168 8164 #endif 8169 8165 8166 + /* 8167 + * Perform validation of MST topology in the state: 8168 + * We need to perform MST atomic check before calling 8169 + * dc_validate_global_state(), or there is a chance 8170 + * to get stuck in an infinite loop and hang eventually. 8171 + */ 8172 + ret = drm_dp_mst_atomic_check(state); 8173 + if (ret) 8174 + goto fail; 8175 + 8170 8176 if (dc_validate_global_state(dc, dm_state->context, false) != DC_OK) { 8171 8177 ret = -EINVAL; 8172 8178 goto fail; ··· 8205 8191 dc_retain_state(old_dm_state->context); 8206 8192 } 8207 8193 } 8208 - /* Perform validation of MST topology in the state*/ 8209 - ret = drm_dp_mst_atomic_check(state); 8210 - if (ret) 8211 - goto fail; 8212 8194 8213 8195 /* Store the overall update type for use later in atomic check. */ 8214 8196 for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
+19
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
··· 135 135 mutex_unlock(&hdcp_w->mutex); 136 136 } 137 137 138 + static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, 139 + unsigned int link_index, 140 + struct amdgpu_dm_connector *aconnector) 141 + { 142 + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; 143 + 144 + mutex_lock(&hdcp_w->mutex); 145 + hdcp_w->aconnector = aconnector; 146 + 147 + mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); 148 + 149 + process_output(hdcp_w); 150 + mutex_unlock(&hdcp_w->mutex); 151 + } 138 152 void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index) 139 153 { 140 154 struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; ··· 317 303 memset(link, 0, sizeof(*link)); 318 304 319 305 display->index = aconnector->base.index; 306 + 307 + if (config->dpms_off) { 308 + hdcp_remove_display(hdcp_work, link_index, aconnector); 309 + return; 310 + } 320 311 display->state = MOD_HDCP_DISPLAY_ACTIVE; 321 312 322 313 if (aconnector->dc_sink != NULL)
+4 -9
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
··· 216 216 drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port); 217 217 } 218 218 219 - ret = drm_dp_update_payload_part1(mst_mgr); 219 + /* It's OK for this to fail */ 220 + drm_dp_update_payload_part1(mst_mgr); 220 221 221 222 /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or 222 223 * AUX message. The sequence is slot 1-63 allocated sequence for each ··· 225 224 * sequence. copy DRM MST allocation to dc */ 226 225 227 226 get_payload_table(aconnector, proposed_table); 228 - 229 - if (ret) 230 - return false; 231 227 232 228 return true; 233 229 } ··· 283 285 struct amdgpu_dm_connector *aconnector; 284 286 struct drm_dp_mst_topology_mgr *mst_mgr; 285 287 struct drm_dp_mst_port *mst_port; 286 - int ret; 287 288 288 289 aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; 289 290 ··· 296 299 if (!mst_mgr->mst_state) 297 300 return false; 298 301 299 - ret = drm_dp_update_payload_part2(mst_mgr); 300 - 301 - if (ret) 302 - return false; 302 + /* It's OK for this to fail */ 303 + drm_dp_update_payload_part2(mst_mgr); 303 304 304 305 if (!enable) 305 306 drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
+1 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
··· 632 632 if (drm_dp_atomic_find_vcpi_slots(state, 633 633 params[next_index].port->mgr, 634 634 params[next_index].port, 635 - vars[next_index].pbn,\ 635 + vars[next_index].pbn, 636 636 dm_mst_get_pbn_divider(dc_link)) < 0) 637 637 return; 638 638 if (!drm_dp_mst_atomic_check(state)) {
+74 -4
drivers/gpu/drm/amd/display/dc/bios/command_table2.c
··· 89 89 struct bios_parser *bp, 90 90 struct bp_encoder_control *cntl); 91 91 92 + static enum bp_result encoder_control_fallback( 93 + struct bios_parser *bp, 94 + struct bp_encoder_control *cntl); 95 + 92 96 static void init_dig_encoder_control(struct bios_parser *bp) 93 97 { 94 98 uint32_t version = ··· 104 100 break; 105 101 default: 106 102 dm_output_to_console("Don't have dig_encoder_control for v%d\n", version); 107 - bp->cmd_tbl.dig_encoder_control = NULL; 103 + bp->cmd_tbl.dig_encoder_control = encoder_control_fallback; 108 104 break; 109 105 } 110 106 } ··· 188 184 return result; 189 185 } 190 186 187 + static enum bp_result encoder_control_fallback( 188 + struct bios_parser *bp, 189 + struct bp_encoder_control *cntl) 190 + { 191 + if (bp->base.ctx->dc->ctx->dmub_srv && 192 + bp->base.ctx->dc->debug.dmub_command_table) { 193 + return encoder_control_digx_v1_5(bp, cntl); 194 + } 195 + 196 + return BP_RESULT_FAILURE; 197 + } 198 + 191 199 /***************************************************************************** 192 200 ****************************************************************************** 193 201 ** ··· 209 193 *****************************************************************************/ 210 194 211 195 static enum bp_result transmitter_control_v1_6( 196 + struct bios_parser *bp, 197 + struct bp_transmitter_control *cntl); 198 + 199 + static enum bp_result transmitter_control_fallback( 212 200 struct bios_parser *bp, 213 201 struct bp_transmitter_control *cntl); 214 202 ··· 229 209 break; 230 210 default: 231 211 dm_output_to_console("Don't have transmitter_control for v%d\n", crev); 232 - bp->cmd_tbl.transmitter_control = NULL; 212 + bp->cmd_tbl.transmitter_control = transmitter_control_fallback; 233 213 break; 234 214 } 235 215 } ··· 293 273 return result; 294 274 } 295 275 276 + static enum bp_result transmitter_control_fallback( 277 + struct bios_parser *bp, 278 + struct bp_transmitter_control *cntl) 279 + { 280 + if (bp->base.ctx->dc->ctx->dmub_srv && 281 + bp->base.ctx->dc->debug.dmub_command_table) { 282 + return transmitter_control_v1_6(bp, cntl); 283 + } 284 + 285 + return BP_RESULT_FAILURE; 286 + } 287 + 296 288 /****************************************************************************** 297 289 ****************************************************************************** 298 290 ** ··· 317 285 struct bios_parser *bp, 318 286 struct bp_pixel_clock_parameters *bp_params); 319 287 288 + static enum bp_result set_pixel_clock_fallback( 289 + struct bios_parser *bp, 290 + struct bp_pixel_clock_parameters *bp_params); 291 + 320 292 static void init_set_pixel_clock(struct bios_parser *bp) 321 293 { 322 294 switch (BIOS_CMD_TABLE_PARA_REVISION(setpixelclock)) { ··· 330 294 default: 331 295 dm_output_to_console("Don't have set_pixel_clock for v%d\n", 332 296 BIOS_CMD_TABLE_PARA_REVISION(setpixelclock)); 333 - bp->cmd_tbl.set_pixel_clock = NULL; 297 + bp->cmd_tbl.set_pixel_clock = set_pixel_clock_fallback; 334 298 break; 335 299 } 336 300 } ··· 434 398 result = BP_RESULT_OK; 435 399 } 436 400 return result; 401 + } 402 + 403 + static enum bp_result set_pixel_clock_fallback( 404 + struct bios_parser *bp, 405 + struct bp_pixel_clock_parameters *bp_params) 406 + { 407 + if (bp->base.ctx->dc->ctx->dmub_srv && 408 + bp->base.ctx->dc->debug.dmub_command_table) { 409 + return set_pixel_clock_v7(bp, bp_params); 410 + } 411 + 412 + return BP_RESULT_FAILURE; 437 413 } 438 414 439 415 /****************************************************************************** ··· 680 632 enum controller_id crtc_id, 681 633 enum bp_pipe_control_action action); 682 634 635 + static enum bp_result enable_disp_power_gating_fallback( 636 + struct bios_parser *bp, 637 + enum controller_id crtc_id, 638 + enum bp_pipe_control_action action); 639 + 683 640 static void init_enable_disp_power_gating( 684 641 struct bios_parser *bp) 685 642 { ··· 696 643 default: 697 644 dm_output_to_console("Don't enable_disp_power_gating enable_crtc for v%d\n", 698 645 BIOS_CMD_TABLE_PARA_REVISION(enabledisppowergating)); 699 - bp->cmd_tbl.enable_disp_power_gating = NULL; 646 + bp->cmd_tbl.enable_disp_power_gating = enable_disp_power_gating_fallback; 700 647 break; 701 648 } 702 649 } ··· 710 657 power_gating.header.type = DMUB_CMD__VBIOS; 711 658 power_gating.header.sub_type = DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING; 712 659 power_gating.power_gating.pwr = *pwr; 660 + 661 + /* ATOM_ENABLE is old API in DMUB */ 662 + if (power_gating.power_gating.pwr.enable == ATOM_ENABLE) 663 + power_gating.power_gating.pwr.enable = ATOM_INIT; 713 664 714 665 dc_dmub_srv_cmd_queue(dmcub, &power_gating.header); 715 666 dc_dmub_srv_cmd_execute(dmcub); ··· 750 693 result = BP_RESULT_OK; 751 694 752 695 return result; 696 + } 697 + 698 + static enum bp_result enable_disp_power_gating_fallback( 699 + struct bios_parser *bp, 700 + enum controller_id crtc_id, 701 + enum bp_pipe_control_action action) 702 + { 703 + if (bp->base.ctx->dc->ctx->dmub_srv && 704 + bp->base.ctx->dc->debug.dmub_command_table) { 705 + return enable_disp_power_gating_v2_1(bp, crtc_id, action); 706 + } 707 + 708 + return BP_RESULT_FAILURE; 753 709 } 754 710 755 711 /******************************************************************************
+23 -11
drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
··· 1435 1435 struct dc_context *ctx = dc->ctx; 1436 1436 struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0}; 1437 1437 bool res; 1438 + unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx; 1438 1439 1439 1440 /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */ 1440 1441 res = dm_pp_get_clock_levels_by_type_with_voltage( ··· 1447 1446 res = verify_clock_values(&fclks); 1448 1447 1449 1448 if (res) { 1450 - ASSERT(fclks.num_levels >= 3); 1451 - dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 32 * (fclks.data[0].clocks_in_khz / 1000.0) / 1000.0; 1452 - dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->number_of_channels * 1453 - (fclks.data[fclks.num_levels - (fclks.num_levels > 2 ? 3 : 2)].clocks_in_khz / 1000.0) 1454 - * ddr4_dram_factor_single_Channel / 1000.0; 1455 - dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->number_of_channels * 1456 - (fclks.data[fclks.num_levels - 2].clocks_in_khz / 1000.0) 1457 - * ddr4_dram_factor_single_Channel / 1000.0; 1458 - dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->number_of_channels * 1459 - (fclks.data[fclks.num_levels - 1].clocks_in_khz / 1000.0) 1460 - * ddr4_dram_factor_single_Channel / 1000.0; 1449 + ASSERT(fclks.num_levels); 1450 + 1451 + vmin0p65_idx = 0; 1452 + vmid0p72_idx = fclks.num_levels - 1453 + (fclks.num_levels > 2 ? 3 : (fclks.num_levels > 1 ? 2 : 1)); 1454 + vnom0p8_idx = fclks.num_levels - (fclks.num_levels > 1 ? 2 : 1); 1455 + vmax0p9_idx = fclks.num_levels - 1; 1456 + 1457 + dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = 1458 + 32 * (fclks.data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0; 1459 + dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = 1460 + dc->dcn_soc->number_of_channels * 1461 + (fclks.data[vmid0p72_idx].clocks_in_khz / 1000.0) 1462 + * ddr4_dram_factor_single_Channel / 1000.0; 1463 + dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = 1464 + dc->dcn_soc->number_of_channels * 1465 + (fclks.data[vnom0p8_idx].clocks_in_khz / 1000.0) 1466 + * ddr4_dram_factor_single_Channel / 1000.0; 1467 + dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = 1468 + dc->dcn_soc->number_of_channels * 1469 + (fclks.data[vmax0p9_idx].clocks_in_khz / 1000.0) 1470 + * ddr4_dram_factor_single_Channel / 1000.0; 1461 1471 } else 1462 1472 BREAK_TO_DEBUGGER(); 1463 1473
+9 -8
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 2462 2462 enum dc_acpi_cm_power_state power_state) 2463 2463 { 2464 2464 struct kref refcount; 2465 - struct display_mode_lib *dml = kzalloc(sizeof(struct display_mode_lib), 2466 - GFP_KERNEL); 2467 - 2468 - ASSERT(dml); 2469 - if (!dml) 2470 - return; 2465 + struct display_mode_lib *dml; 2471 2466 2472 2467 switch (power_state) { 2473 2468 case DC_ACPI_CM_POWER_STATE_D0: ··· 2485 2490 * clean state, and dc hw programming optimizations will not 2486 2491 * cause any trouble. 2487 2492 */ 2493 + dml = kzalloc(sizeof(struct display_mode_lib), 2494 + GFP_KERNEL); 2495 + 2496 + ASSERT(dml); 2497 + if (!dml) 2498 + return; 2488 2499 2489 2500 /* Preserve refcount */ 2490 2501 refcount = dc->current_state->refcount; ··· 2504 2503 dc->current_state->refcount = refcount; 2505 2504 dc->current_state->bw_ctx.dml = *dml; 2506 2505 2506 + kfree(dml); 2507 + 2507 2508 break; 2508 2509 } 2509 - 2510 - kfree(dml); 2511 2510 } 2512 2511 2513 2512 void dc_resume(struct dc *dc)
+18 -12
drivers/gpu/drm/amd/display/dc/core/dc_link.c
··· 851 851 if (memcmp(&link->dpcd_caps, &prev_dpcd_caps, sizeof(struct dpcd_caps))) 852 852 same_dpcd = false; 853 853 } 854 - /* Active dongle plug in without display or downstream unplug*/ 854 + /* Active dongle downstream unplug*/ 855 855 if (link->type == dc_connection_active_dongle && 856 856 link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) { 857 - if (prev_sink != NULL) { 857 + if (prev_sink != NULL) 858 858 /* Downstream unplug */ 859 859 dc_sink_release(prev_sink); 860 - } else { 861 - /* Empty dongle plug in */ 862 - dp_verify_link_cap_with_retries(link, 863 - &link->reported_link_cap, 864 - LINK_TRAINING_MAX_VERIFY_RETRY); 865 - } 866 860 return true; 867 861 } 868 862 ··· 963 969 same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid); 964 970 965 971 if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT && 966 - sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX && 967 - reason != DETECT_REASON_HPDRX) { 972 + sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) { 968 973 /* 969 974 * TODO debug why Dell 2413 doesn't like 970 975 * two link trainings ··· 2875 2882 // Clear all of MST payload then reallocate 2876 2883 for (i = 0; i < MAX_PIPES; i++) { 2877 2884 pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; 2878 - if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link && 2885 + 2886 + /* driver enable split pipe for external monitors 2887 + * we have to check pipe_ctx is split pipe or not 2888 + * If it's split pipe, driver using top pipe to 2889 + * reaallocate. 2890 + */ 2891 + if (!pipe_ctx || pipe_ctx->top_pipe) 2892 + continue; 2893 + 2894 + if (pipe_ctx->stream && pipe_ctx->stream->link == link && 2879 2895 pipe_ctx->stream->dpms_off == false && 2880 2896 pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 2881 2897 deallocate_mst_payload(pipe_ctx); ··· 2893 2891 2894 2892 for (i = 0; i < MAX_PIPES; i++) { 2895 2893 pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; 2896 - if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link && 2894 + 2895 + if (!pipe_ctx || pipe_ctx->top_pipe) 2896 + continue; 2897 + 2898 + if (pipe_ctx->stream && pipe_ctx->stream->link == link && 2897 2899 pipe_ctx->stream->dpms_off == false && 2898 2900 pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { 2899 2901 /* enable/disable PHY will clear connection between BE and FE
+2 -1
drivers/gpu/drm/amd/display/dc/dc.h
··· 39 39 #include "inc/hw/dmcu.h" 40 40 #include "dml/display_mode_lib.h" 41 41 42 - #define DC_VER "3.2.68" 42 + #define DC_VER "3.2.69" 43 43 44 44 #define MAX_SURFACES 3 45 45 #define MAX_PLANES 6 ··· 425 425 bool validate_dml_output; 426 426 bool enable_dmcub_surface_flip; 427 427 bool usbc_combo_phy_reset_wa; 428 + bool disable_dsc; 428 429 }; 429 430 430 431 struct dc_debug_data {
+18 -55
drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
··· 100 100 dce_i2c_hw->buffer_used_bytes; 101 101 } 102 102 103 - static uint32_t get_speed( 104 - const struct dce_i2c_hw *dce_i2c_hw) 105 - { 106 - uint32_t pre_scale = 0; 107 - 108 - REG_GET(SPEED, DC_I2C_DDC1_PRESCALE, &pre_scale); 109 - 110 - /* [anaumov] it seems following is unnecessary */ 111 - /*ASSERT(value.bits.DC_I2C_DDC1_PRESCALE);*/ 112 - return pre_scale ? 113 - dce_i2c_hw->reference_frequency / pre_scale : 114 - dce_i2c_hw->default_speed; 115 - } 116 - 117 103 static void process_channel_reply( 118 104 struct dce_i2c_hw *dce_i2c_hw, 119 105 struct i2c_payload *reply) ··· 264 278 struct dce_i2c_hw *dce_i2c_hw, 265 279 uint32_t speed) 266 280 { 281 + uint32_t xtal_ref_div = 0; 282 + uint32_t prescale = 0; 283 + 284 + REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div); 285 + 286 + if (xtal_ref_div == 0) 287 + xtal_ref_div = 2; 288 + 289 + prescale = ((dce_i2c_hw->reference_frequency * 2) / xtal_ref_div) / speed; 267 290 268 291 if (speed) { 269 292 if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL) 270 293 REG_UPDATE_N(SPEED, 3, 271 - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed, 294 + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale, 272 295 FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2, 273 296 FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1); 274 297 else 275 298 REG_UPDATE_N(SPEED, 2, 276 - FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), dce_i2c_hw->reference_frequency / speed, 299 + FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale, 277 300 FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2); 278 301 } 279 302 } ··· 339 344 bool safe_to_reset; 340 345 341 346 /* Restore original HW engine speed */ 342 - 343 - set_speed(dce_i2c_hw, dce_i2c_hw->original_speed); 344 - 347 + set_speed(dce_i2c_hw, dce_i2c_hw->default_speed); 345 348 346 349 /* Reset HW engine */ 347 350 { ··· 371 378 { 372 379 uint32_t counter = 0; 373 380 enum gpio_result result; 374 - uint32_t current_speed; 375 381 struct dce_i2c_hw *dce_i2c_hw = NULL; 376 382 377 383 if (!ddc) ··· 407 415 return NULL; 408 416 409 417 dce_i2c_hw->ddc = ddc; 410 - 411 - current_speed = get_speed(dce_i2c_hw); 412 - 413 - if (current_speed) 414 - dce_i2c_hw->original_speed = current_speed; 415 418 416 419 if (!setup_engine(dce_i2c_hw)) { 417 420 release_engine(dce_i2c_hw); ··· 465 478 466 479 static uint32_t get_transaction_timeout_hw( 467 480 const struct dce_i2c_hw *dce_i2c_hw, 468 - uint32_t length) 481 + uint32_t length, 482 + uint32_t speed) 469 483 { 470 - 471 - uint32_t speed = get_speed(dce_i2c_hw); 472 - 473 - 474 - 475 484 uint32_t period_timeout; 476 485 uint32_t num_of_clock_stretches; 477 486 ··· 487 504 bool dce_i2c_hw_engine_submit_payload( 488 505 struct dce_i2c_hw *dce_i2c_hw, 489 506 struct i2c_payload *payload, 490 - bool middle_of_transaction) 507 + bool middle_of_transaction, 508 + uint32_t speed) 491 509 { 492 510 493 511 struct i2c_request_transaction_data request; ··· 526 542 /* obtain timeout value before submitting request */ 527 543 528 544 transaction_timeout = get_transaction_timeout_hw( 529 - dce_i2c_hw, payload->length + 1); 545 + dce_i2c_hw, payload->length + 1, speed); 530 546 531 547 submit_channel_request_hw( 532 548 dce_i2c_hw, &request); ··· 572 588 struct i2c_payload *payload = cmd->payloads + index_of_payload; 573 589 574 590 if (!dce_i2c_hw_engine_submit_payload( 575 - dce_i2c_hw, payload, mot)) { 591 + dce_i2c_hw, payload, mot, cmd->speed)) { 576 592 result = false; 577 593 break; 578 594 } 579 - 580 - 581 595 582 596 ++index_of_payload; 583 597 } ··· 607 625 dce_i2c_hw->buffer_used_bytes = 0; 608 626 dce_i2c_hw->transaction_count = 0; 609 627 dce_i2c_hw->engine_keep_power_up_count = 1; 610 - dce_i2c_hw->original_speed = DEFAULT_I2C_HW_SPEED; 611 628 dce_i2c_hw->default_speed = DEFAULT_I2C_HW_SPEED; 612 629 dce_i2c_hw->send_reset_length = 0; 613 630 dce_i2c_hw->setup_limit = I2C_SETUP_TIME_LIMIT_DCE; ··· 621 640 const struct dce_i2c_shift *shifts, 622 641 const struct dce_i2c_mask *masks) 623 642 { 624 - 625 - uint32_t xtal_ref_div = 0; 626 - 627 643 dce_i2c_hw_construct(dce_i2c_hw, 628 644 ctx, 629 645 engine_id, ··· 628 650 shifts, 629 651 masks); 630 652 dce_i2c_hw->buffer_size = I2C_HW_BUFFER_SIZE_DCE100; 631 - 632 - REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div); 633 - 634 - if (xtal_ref_div == 0) 635 - xtal_ref_div = 2; 636 - 637 - /*Calculating Reference Clock by divding original frequency by 638 - * XTAL_REF_DIV. 639 - * At upper level, uint32_t reference_frequency = 640 - * dal_dce_i2c_get_reference_clock(as) >> 1 641 - * which already divided by 2. So we need x2 to get original 642 - * reference clock from ppll_info 643 - */ 644 - dce_i2c_hw->reference_frequency = 645 - (dce_i2c_hw->reference_frequency * 2) / xtal_ref_div; 646 653 } 647 654 648 655 void dce112_i2c_hw_construct(
-1
drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.h
··· 256 256 257 257 struct dce_i2c_hw { 258 258 struct ddc *ddc; 259 - uint32_t original_speed; 260 259 uint32_t engine_keep_power_up_count; 261 260 uint32_t transaction_count; 262 261 uint32_t buffer_used_bytes;
+30
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
··· 2911 2911 hubbub->funcs->update_dchub(hubbub, dh_data); 2912 2912 } 2913 2913 2914 + static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx) 2915 + { 2916 + struct pipe_ctx *test_pipe; 2917 + const struct rect *r1 = &pipe_ctx->plane_res.scl_data.recout, *r2; 2918 + int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b; 2919 + 2920 + /** 2921 + * Disable the cursor if there's another pipe above this with a 2922 + * plane that contains this pipe's viewport to prevent double cursor 2923 + * and incorrect scaling artifacts. 2924 + */ 2925 + for (test_pipe = pipe_ctx->top_pipe; test_pipe; 2926 + test_pipe = test_pipe->top_pipe) { 2927 + if (!test_pipe->plane_state->visible) 2928 + continue; 2929 + 2930 + r2 = &test_pipe->plane_res.scl_data.recout; 2931 + r2_r = r2->x + r2->width; 2932 + r2_b = r2->y + r2->height; 2933 + 2934 + if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b) 2935 + return true; 2936 + } 2937 + 2938 + return false; 2939 + } 2940 + 2914 2941 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx) 2915 2942 { 2916 2943 struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position; ··· 2981 2954 2982 2955 if (pipe_ctx->plane_state->address.type 2983 2956 == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE) 2957 + pos_cpy.enable = false; 2958 + 2959 + if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx)) 2984 2960 pos_cpy.enable = false; 2985 2961 2986 2962 // Swap axis and mirror horizontally
+6 -6
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
··· 261 261 address->video_progressive.luma_addr.high_part == 0xf4) 262 262 return; 263 263 264 - if ((rotation_angle == 0 || rotation_angle == 180) 264 + if ((rotation_angle == ROTATION_ANGLE_0 || rotation_angle == ROTATION_ANGLE_180) 265 265 && viewport_c_height <= 512) 266 266 return; 267 267 268 - if ((rotation_angle == 90 || rotation_angle == 270) 268 + if ((rotation_angle == ROTATION_ANGLE_90 || rotation_angle == ROTATION_ANGLE_270) 269 269 && viewport_c_width <= 512) 270 270 return; 271 271 272 272 switch (rotation_angle) { 273 - case 0: /* 0 degree rotation */ 273 + case ROTATION_ANGLE_0: /* 0 degree rotation */ 274 274 row_height = 128; 275 275 patched_viewport_height = (viewport_c_height / row_height + 1) * row_height + 1; 276 276 patched_viewport_width = viewport_c_width; 277 277 hubp21->PLAT_54186_wa_chroma_addr_offset = 0; 278 278 break; 279 - case 2: /* 180 degree rotation */ 279 + case ROTATION_ANGLE_180: /* 180 degree rotation */ 280 280 row_height = 128; 281 281 patched_viewport_height = viewport_c_height + row_height; 282 282 patched_viewport_width = viewport_c_width; 283 283 hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - chroma_pitch * row_height * chroma_bpe; 284 284 break; 285 - case 1: /* 90 degree rotation */ 285 + case ROTATION_ANGLE_90: /* 90 degree rotation */ 286 286 row_height = 256; 287 287 if (h_mirror_en) { 288 288 patched_viewport_height = viewport_c_height; ··· 294 294 hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - tile_blk_size; 295 295 } 296 296 break; 297 - case 3: /* 270 degree rotation */ 297 + case ROTATION_ANGLE_270: /* 270 degree rotation */ 298 298 row_height = 256; 299 299 if (h_mirror_en) { 300 300 patched_viewport_height = viewport_c_height;
+1 -1
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
··· 830 830 .disable_dcc = DCC_ENABLE, 831 831 .vsr_support = true, 832 832 .performance_trace = false, 833 - .max_downscale_src_width = 3840, 833 + .max_downscale_src_width = 4096, 834 834 .disable_pplib_wm_range = false, 835 835 .scl_reset_length10 = true, 836 836 .sanity_checks = true,
+13 -6
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20.c
··· 38 38 39 39 #define BPP_INVALID 0 40 40 #define BPP_BLENDED_PIPE 0xffffffff 41 + #define DCN20_MAX_420_IMAGE_WIDTH 4096 41 42 42 43 static double adjust_ReturnBW( 43 44 struct display_mode_lib *mode_lib, ··· 3895 3894 && i == mode_lib->vba.soc.num_states) 3896 3895 mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 3897 3896 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); 3898 - if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) { 3899 - locals->ODMCombineEnablePerState[i][k] = false; 3900 - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; 3901 - } else { 3902 - locals->ODMCombineEnablePerState[i][k] = true; 3903 - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; 3897 + 3898 + locals->ODMCombineEnablePerState[i][k] = false; 3899 + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; 3900 + if (mode_lib->vba.ODMCapability) { 3901 + if (locals->PlaneRequiredDISPCLKWithoutODMCombine > mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) { 3902 + locals->ODMCombineEnablePerState[i][k] = true; 3903 + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; 3904 + } else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { 3905 + locals->ODMCombineEnablePerState[i][k] = true; 3906 + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; 3907 + } 3904 3908 } 3909 + 3905 3910 if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity 3906 3911 && locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k] 3907 3912 && locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
+16 -8
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
··· 39 39 #define BPP_INVALID 0 40 40 #define BPP_BLENDED_PIPE 0xffffffff 41 41 #define DCN20_MAX_DSC_IMAGE_WIDTH 5184 42 + #define DCN20_MAX_420_IMAGE_WIDTH 4096 42 43 43 44 static double adjust_ReturnBW( 44 45 struct display_mode_lib *mode_lib, ··· 3936 3935 && i == mode_lib->vba.soc.num_states) 3937 3936 mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 3938 3937 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); 3939 - if (mode_lib->vba.ODMCapability == false || 3940 - (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown 3941 - && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN20_MAX_DSC_IMAGE_WIDTH))) { 3942 - locals->ODMCombineEnablePerState[i][k] = false; 3943 - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; 3944 - } else { 3945 - locals->ODMCombineEnablePerState[i][k] = true; 3946 - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; 3938 + 3939 + locals->ODMCombineEnablePerState[i][k] = false; 3940 + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; 3941 + if (mode_lib->vba.ODMCapability) { 3942 + if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) { 3943 + locals->ODMCombineEnablePerState[i][k] = true; 3944 + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; 3945 + } else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN20_MAX_DSC_IMAGE_WIDTH)) { 3946 + locals->ODMCombineEnablePerState[i][k] = true; 3947 + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; 3948 + } else if (locals->HActive[k] > DCN20_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { 3949 + locals->ODMCombineEnablePerState[i][k] = true; 3950 + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; 3951 + } 3947 3952 } 3953 + 3948 3954 if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity 3949 3955 && locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k] 3950 3956 && locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
+16 -8
drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
··· 65 65 #define BPP_INVALID 0 66 66 #define BPP_BLENDED_PIPE 0xffffffff 67 67 #define DCN21_MAX_DSC_IMAGE_WIDTH 5184 68 + #define DCN21_MAX_420_IMAGE_WIDTH 4096 68 69 69 70 static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib); 70 71 static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation( ··· 3972 3971 && i == mode_lib->vba.soc.num_states) 3973 3972 mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 3974 3973 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); 3975 - if (mode_lib->vba.ODMCapability == false || 3976 - (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown 3977 - && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN21_MAX_DSC_IMAGE_WIDTH))) { 3978 - locals->ODMCombineEnablePerState[i][k] = false; 3979 - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; 3980 - } else { 3981 - locals->ODMCombineEnablePerState[i][k] = true; 3982 - mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; 3974 + 3975 + locals->ODMCombineEnablePerState[i][k] = false; 3976 + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; 3977 + if (mode_lib->vba.ODMCapability) { 3978 + if (locals->PlaneRequiredDISPCLKWithoutODMCombine > MaxMaxDispclkRoundedDown) { 3979 + locals->ODMCombineEnablePerState[i][k] = true; 3980 + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; 3981 + } else if (locals->DSCEnabled[k] && (locals->HActive[k] > DCN21_MAX_DSC_IMAGE_WIDTH)) { 3982 + locals->ODMCombineEnablePerState[i][k] = true; 3983 + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; 3984 + } else if (locals->HActive[k] > DCN21_MAX_420_IMAGE_WIDTH && locals->OutputFormat[k] == dm_420) { 3985 + locals->ODMCombineEnablePerState[i][k] = true; 3986 + mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine; 3987 + } 3983 3988 } 3989 + 3984 3990 if (locals->MinDPPCLKUsingSingleDPP[k] * (1.0 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0) <= mode_lib->vba.MaxDppclkRoundedDownToDFSGranularity 3985 3991 && locals->SwathWidthYSingleDPP[k] <= locals->MaximumSwathWidth[k] 3986 3992 && locals->ODMCombineEnablePerState[i][k] == dm_odm_combine_mode_disabled) {
+2 -2
drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
··· 763 763 double SwathWidthC[DC__NUM_DPP__MAX]; 764 764 unsigned int BytePerPixelY[DC__NUM_DPP__MAX]; 765 765 unsigned int BytePerPixelC[DC__NUM_DPP__MAX]; 766 - long dummyinteger1; 767 - long dummyinteger2; 766 + unsigned int dummyinteger1; 767 + unsigned int dummyinteger2; 768 768 double FinalDRAMClockChangeLatency; 769 769 double Tdmdl_vm[DC__NUM_DPP__MAX]; 770 770 double Tdmdl[DC__NUM_DPP__MAX];
+2 -1
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
··· 224 224 225 225 memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps)); 226 226 if (dsc) { 227 - dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz); 227 + if (!dsc->ctx->dc->debug.disable_dsc) 228 + dsc->funcs->dsc_get_enc_caps(dsc_enc_caps, pixel_clock_100Hz); 228 229 if (dsc->ctx->dc->debug.native422_support) 229 230 dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 1; 230 231 }
+17
drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
··· 231 231 struct dmub_srv_hw_funcs { 232 232 /* private: internal use only */ 233 233 234 + void (*init)(struct dmub_srv *dmub); 235 + 234 236 void (*reset)(struct dmub_srv *dmub); 235 237 236 238 void (*reset_release)(struct dmub_srv *dmub); ··· 417 415 */ 418 416 enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub, 419 417 const struct dmub_srv_hw_params *params); 418 + 419 + /** 420 + * dmub_srv_hw_reset() - puts the DMUB hardware in reset state if initialized 421 + * @dmub: the dmub service 422 + * 423 + * Before destroying the DMUB service or releasing the backing framebuffer 424 + * memory we'll need to put the DMCUB into reset first. 425 + * 426 + * A subsequent call to dmub_srv_hw_init() will re-enable the DMCUB. 427 + * 428 + * Return: 429 + * DMUB_STATUS_OK - success 430 + * DMUB_STATUS_INVALID - unspecified error 431 + */ 432 + enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub); 420 433 421 434 /** 422 435 * dmub_srv_cmd_queue() - queues a command to the DMUB
+21 -4
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
··· 54 54 55 55 /* Shared functions. */ 56 56 57 + static void dmub_dcn20_get_fb_base_offset(struct dmub_srv *dmub, 58 + uint64_t *fb_base, 59 + uint64_t *fb_offset) 60 + { 61 + uint32_t tmp; 62 + 63 + REG_GET(DCN_VM_FB_LOCATION_BASE, FB_BASE, &tmp); 64 + *fb_base = (uint64_t)tmp << 24; 65 + 66 + REG_GET(DCN_VM_FB_OFFSET, FB_OFFSET, &tmp); 67 + *fb_offset = (uint64_t)tmp << 24; 68 + } 69 + 57 70 static inline void dmub_dcn20_translate_addr(const union dmub_addr *addr_in, 58 71 uint64_t fb_base, 59 72 uint64_t fb_offset, ··· 80 67 REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 1); 81 68 REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); 82 69 REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); 70 + REG_WRITE(DMCUB_INBOX1_RPTR, 0); 71 + REG_WRITE(DMCUB_INBOX1_WPTR, 0); 83 72 } 84 73 85 74 void dmub_dcn20_reset_release(struct dmub_srv *dmub) ··· 97 82 const struct dmub_window *cw1) 98 83 { 99 84 union dmub_addr offset; 100 - uint64_t fb_base = dmub->fb_base, fb_offset = dmub->fb_offset; 85 + uint64_t fb_base, fb_offset; 86 + 87 + dmub_dcn20_get_fb_base_offset(dmub, &fb_base, &fb_offset); 101 88 102 89 REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); 103 90 REG_UPDATE_2(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE, 0x3, ··· 135 118 const struct dmub_window *cw6) 136 119 { 137 120 union dmub_addr offset; 138 - uint64_t fb_base = dmub->fb_base, fb_offset = dmub->fb_offset; 121 + uint64_t fb_base, fb_offset; 122 + 123 + dmub_dcn20_get_fb_base_offset(dmub, &fb_base, &fb_offset); 139 124 140 125 dmub_dcn20_translate_addr(&cw2->offset, fb_base, fb_offset, &offset); 141 126 ··· 192 173 193 174 REG_WRITE(DMCUB_INBOX1_BASE_ADDRESS, 0x80000000); 194 175 REG_WRITE(DMCUB_INBOX1_SIZE, inbox1->top - inbox1->base); 195 - REG_WRITE(DMCUB_INBOX1_RPTR, 0); 196 - REG_WRITE(DMCUB_INBOX1_WPTR, 0); 197 176 } 198 177 199 178 uint32_t dmub_dcn20_get_inbox1_rptr(struct dmub_srv *dmub)
+6 -2
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
··· 92 92 DMUB_SR(DMCUB_SCRATCH14) \ 93 93 DMUB_SR(DMCUB_SCRATCH15) \ 94 94 DMUB_SR(CC_DC_PIPE_DIS) \ 95 - DMUB_SR(MMHUBBUB_SOFT_RESET) 95 + DMUB_SR(MMHUBBUB_SOFT_RESET) \ 96 + DMUB_SR(DCN_VM_FB_LOCATION_BASE) \ 97 + DMUB_SR(DCN_VM_FB_OFFSET) 96 98 97 99 #define DMUB_COMMON_FIELDS() \ 98 100 DMUB_SF(DMCUB_CNTL, DMCUB_ENABLE) \ ··· 123 121 DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_TOP_ADDRESS) \ 124 122 DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \ 125 123 DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \ 126 - DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) 124 + DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \ 125 + DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \ 126 + DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET) 127 127 128 128 struct dmub_srv_common_reg_offset { 129 129 #define DMUB_SR(reg) uint32_t reg;
+19
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
··· 312 312 if (!dmub->sw_init) 313 313 return DMUB_STATUS_INVALID; 314 314 315 + if (!dmub->hw_init) 316 + return DMUB_STATUS_OK; 317 + 315 318 if (dmub->hw_funcs.is_hw_init) 316 319 *is_hw_init = dmub->hw_funcs.is_hw_init(dmub); 317 320 ··· 414 411 dmub->hw_funcs.reset_release(dmub); 415 412 416 413 dmub->hw_init = true; 414 + 415 + return DMUB_STATUS_OK; 416 + } 417 + 418 + enum dmub_status dmub_srv_hw_reset(struct dmub_srv *dmub) 419 + { 420 + if (!dmub->sw_init) 421 + return DMUB_STATUS_INVALID; 422 + 423 + if (dmub->hw_init == false) 424 + return DMUB_STATUS_OK; 425 + 426 + if (dmub->hw_funcs.reset) 427 + dmub->hw_funcs.reset(dmub); 428 + 429 + dmub->hw_init = false; 417 430 418 431 return DMUB_STATUS_OK; 419 432 }
+129 -178
drivers/gpu/drm/amd/display/modules/color/color_gamma.c
··· 1673 1673 1674 1674 #define _EXTRA_POINTS 3 1675 1675 1676 - bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, 1677 - const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed, 1678 - const struct freesync_hdr_tf_params *fs_params) 1679 - { 1680 - struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts; 1681 - struct dividers dividers; 1682 - 1683 - struct pwl_float_data *rgb_user = NULL; 1684 - struct pwl_float_data_ex *rgb_regamma = NULL; 1685 - struct gamma_pixel *axis_x = NULL; 1686 - struct pixel_gamma_point *coeff = NULL; 1687 - enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB; 1688 - bool ret = false; 1689 - 1690 - if (output_tf->type == TF_TYPE_BYPASS) 1691 - return false; 1692 - 1693 - /* we can use hardcoded curve for plain SRGB TF */ 1694 - if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true && 1695 - output_tf->tf == TRANSFER_FUNCTION_SRGB) { 1696 - if (ramp == NULL) 1697 - return true; 1698 - if ((ramp->is_identity && ramp->type != GAMMA_CS_TFM_1D) || 1699 - (!mapUserRamp && ramp->type == GAMMA_RGB_256)) 1700 - return true; 1701 - } 1702 - 1703 - output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; 1704 - 1705 - if (ramp && ramp->type != GAMMA_CS_TFM_1D && 1706 - (mapUserRamp || ramp->type != GAMMA_RGB_256)) { 1707 - rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS, 1708 - sizeof(*rgb_user), 1709 - GFP_KERNEL); 1710 - if (!rgb_user) 1711 - goto rgb_user_alloc_fail; 1712 - 1713 - axis_x = kvcalloc(ramp->num_entries + 3, sizeof(*axis_x), 1714 - GFP_KERNEL); 1715 - if (!axis_x) 1716 - goto axis_x_alloc_fail; 1717 - 1718 - dividers.divider1 = dc_fixpt_from_fraction(3, 2); 1719 - dividers.divider2 = dc_fixpt_from_int(2); 1720 - dividers.divider3 = dc_fixpt_from_fraction(5, 2); 1721 - 1722 - build_evenly_distributed_points( 1723 - axis_x, 1724 - ramp->num_entries, 1725 - dividers); 1726 - 1727 - if (ramp->type == GAMMA_RGB_256 && mapUserRamp) 1728 - scale_gamma(rgb_user, ramp, dividers); 1729 - else if (ramp->type == GAMMA_RGB_FLOAT_1024) 1730 - scale_gamma_dx(rgb_user, ramp, dividers); 1731 - } 1732 - 1733 - rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, 1734 - sizeof(*rgb_regamma), 1735 - GFP_KERNEL); 1736 - if (!rgb_regamma) 1737 - goto rgb_regamma_alloc_fail; 1738 - 1739 - coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff), 1740 - GFP_KERNEL); 1741 - if (!coeff) 1742 - goto coeff_alloc_fail; 1743 - 1744 - tf = output_tf->tf; 1745 - if (tf == TRANSFER_FUNCTION_PQ) { 1746 - tf_pts->end_exponent = 7; 1747 - tf_pts->x_point_at_y1_red = 125; 1748 - tf_pts->x_point_at_y1_green = 125; 1749 - tf_pts->x_point_at_y1_blue = 125; 1750 - 1751 - build_pq(rgb_regamma, 1752 - MAX_HW_POINTS, 1753 - coordinates_x, 1754 - output_tf->sdr_ref_white_level); 1755 - } else if (tf == TRANSFER_FUNCTION_GAMMA22 && 1756 - fs_params != NULL && fs_params->skip_tm == 0) { 1757 - build_freesync_hdr(rgb_regamma, 1758 - MAX_HW_POINTS, 1759 - coordinates_x, 1760 - fs_params); 1761 - } else if (tf == TRANSFER_FUNCTION_HLG) { 1762 - build_freesync_hdr(rgb_regamma, 1763 - MAX_HW_POINTS, 1764 - coordinates_x, 1765 - fs_params); 1766 - 1767 - } else { 1768 - tf_pts->end_exponent = 0; 1769 - tf_pts->x_point_at_y1_red = 1; 1770 - tf_pts->x_point_at_y1_green = 1; 1771 - tf_pts->x_point_at_y1_blue = 1; 1772 - 1773 - build_regamma(rgb_regamma, 1774 - MAX_HW_POINTS, 1775 - coordinates_x, tf); 1776 - } 1777 - map_regamma_hw_to_x_user(ramp, coeff, rgb_user, 1778 - coordinates_x, axis_x, rgb_regamma, 1779 - MAX_HW_POINTS, tf_pts, 1780 - (mapUserRamp || (ramp && ramp->type != GAMMA_RGB_256)) && 1781 - (ramp && ramp->type != GAMMA_CS_TFM_1D)); 1782 - 1783 - if (ramp && ramp->type == GAMMA_CS_TFM_1D) 1784 - apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts); 1785 - 1786 - ret = true; 1787 - 1788 - kvfree(coeff); 1789 - coeff_alloc_fail: 1790 - kvfree(rgb_regamma); 1791 - rgb_regamma_alloc_fail: 1792 - kvfree(axis_x); 1793 - axis_x_alloc_fail: 1794 - kvfree(rgb_user); 1795 - rgb_user_alloc_fail: 1796 - return ret; 1797 - } 1798 - 1799 1676 bool calculate_user_regamma_coeff(struct dc_transfer_func *output_tf, 1800 1677 const struct regamma_lut *regamma) 1801 1678 { ··· 1920 2043 return ret; 1921 2044 } 1922 2045 1923 - 1924 - bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, 2046 + static bool calculate_curve(enum dc_transfer_func_predefined trans, 1925 2047 struct dc_transfer_func_distributed_points *points, 2048 + struct pwl_float_data_ex *rgb_regamma, 2049 + const struct freesync_hdr_tf_params *fs_params, 1926 2050 uint32_t sdr_ref_white_level) 1927 2051 { 1928 2052 uint32_t i; 1929 2053 bool ret = false; 1930 - struct pwl_float_data_ex *rgb_regamma = NULL; 1931 2054 1932 2055 if (trans == TRANSFER_FUNCTION_UNITY || 1933 2056 trans == TRANSFER_FUNCTION_LINEAR) { ··· 1937 2060 points->x_point_at_y1_blue = 1; 1938 2061 1939 2062 for (i = 0; i <= MAX_HW_POINTS ; i++) { 1940 - points->red[i] = coordinates_x[i].x; 1941 - points->green[i] = coordinates_x[i].x; 1942 - points->blue[i] = coordinates_x[i].x; 2063 + rgb_regamma[i].r = coordinates_x[i].x; 2064 + rgb_regamma[i].g = coordinates_x[i].x; 2065 + rgb_regamma[i].b = coordinates_x[i].x; 1943 2066 } 2067 + 1944 2068 ret = true; 1945 2069 } else if (trans == TRANSFER_FUNCTION_PQ) { 1946 - rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, 1947 - sizeof(*rgb_regamma), 1948 - GFP_KERNEL); 1949 - if (!rgb_regamma) 1950 - goto rgb_regamma_alloc_fail; 1951 2070 points->end_exponent = 7; 1952 2071 points->x_point_at_y1_red = 125; 1953 2072 points->x_point_at_y1_green = 125; 1954 2073 points->x_point_at_y1_blue = 125; 1955 2074 1956 - 1957 2075 build_pq(rgb_regamma, 1958 2076 MAX_HW_POINTS, 1959 2077 coordinates_x, 1960 2078 sdr_ref_white_level); 1961 - for (i = 0; i <= MAX_HW_POINTS ; i++) { 1962 - points->red[i] = rgb_regamma[i].r; 1963 - points->green[i] = rgb_regamma[i].g; 1964 - points->blue[i] = rgb_regamma[i].b; 1965 - } 2079 + 1966 2080 ret = true; 1967 - 1968 - kvfree(rgb_regamma); 1969 - } else if (trans == TRANSFER_FUNCTION_SRGB || 1970 - trans == TRANSFER_FUNCTION_BT709 || 1971 - trans == TRANSFER_FUNCTION_GAMMA22 || 1972 - trans == TRANSFER_FUNCTION_GAMMA24 || 1973 - trans == TRANSFER_FUNCTION_GAMMA26) { 1974 - rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, 1975 - sizeof(*rgb_regamma), 1976 - GFP_KERNEL); 1977 - if (!rgb_regamma) 1978 - goto rgb_regamma_alloc_fail; 1979 - points->end_exponent = 0; 1980 - points->x_point_at_y1_red = 1; 1981 - points->x_point_at_y1_green = 1; 1982 - points->x_point_at_y1_blue = 1; 1983 - 1984 - build_regamma(rgb_regamma, 2081 + } else if (trans == TRANSFER_FUNCTION_GAMMA22 && 2082 + fs_params != NULL && fs_params->skip_tm == 0) { 2083 + build_freesync_hdr(rgb_regamma, 1985 2084 MAX_HW_POINTS, 1986 2085 coordinates_x, 1987 - trans); 1988 - for (i = 0; i <= MAX_HW_POINTS ; i++) { 1989 - points->red[i] = rgb_regamma[i].r; 1990 - points->green[i] = rgb_regamma[i].g; 1991 - points->blue[i] = rgb_regamma[i].b; 1992 - } 1993 - ret = true; 2086 + fs_params); 1994 2087 1995 - kvfree(rgb_regamma); 2088 + ret = true; 1996 2089 } else if (trans == TRANSFER_FUNCTION_HLG) { 1997 - rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, 1998 - sizeof(*rgb_regamma), 1999 - GFP_KERNEL); 2000 - if (!rgb_regamma) 2001 - goto rgb_regamma_alloc_fail; 2002 2090 points->end_exponent = 4; 2003 2091 points->x_point_at_y1_red = 12; 2004 2092 points->x_point_at_y1_green = 12; ··· 1973 2131 MAX_HW_POINTS, 1974 2132 coordinates_x, 1975 2133 80, 1000); 1976 - for (i = 0; i <= MAX_HW_POINTS ; i++) { 1977 - points->red[i] = rgb_regamma[i].r; 1978 - points->green[i] = rgb_regamma[i].g; 1979 - points->blue[i] = rgb_regamma[i].b; 1980 - } 2134 + 1981 2135 ret = true; 1982 - kvfree(rgb_regamma); 2136 + } else { 2137 + // trans == TRANSFER_FUNCTION_SRGB 2138 + // trans == TRANSFER_FUNCTION_BT709 2139 + // trans == TRANSFER_FUNCTION_GAMMA22 2140 + // trans == TRANSFER_FUNCTION_GAMMA24 2141 + // trans == TRANSFER_FUNCTION_GAMMA26 2142 + points->end_exponent = 0; 2143 + points->x_point_at_y1_red = 1; 2144 + points->x_point_at_y1_green = 1; 2145 + points->x_point_at_y1_blue = 1; 2146 + 2147 + build_regamma(rgb_regamma, 2148 + MAX_HW_POINTS, 2149 + coordinates_x, 2150 + trans); 2151 + 2152 + ret = true; 1983 2153 } 1984 - rgb_regamma_alloc_fail: 2154 + 1985 2155 return ret; 1986 2156 } 1987 2157 2158 + bool mod_color_calculate_regamma_params(struct dc_transfer_func *output_tf, 2159 + const struct dc_gamma *ramp, bool mapUserRamp, bool canRomBeUsed, 2160 + const struct freesync_hdr_tf_params *fs_params) 2161 + { 2162 + struct dc_transfer_func_distributed_points *tf_pts = &output_tf->tf_pts; 2163 + struct dividers dividers; 2164 + 2165 + struct pwl_float_data *rgb_user = NULL; 2166 + struct pwl_float_data_ex *rgb_regamma = NULL; 2167 + struct gamma_pixel *axis_x = NULL; 2168 + struct pixel_gamma_point *coeff = NULL; 2169 + enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB; 2170 + bool ret = false; 2171 + 2172 + if (output_tf->type == TF_TYPE_BYPASS) 2173 + return false; 2174 + 2175 + /* we can use hardcoded curve for plain SRGB TF */ 2176 + if (output_tf->type == TF_TYPE_PREDEFINED && canRomBeUsed == true && 2177 + output_tf->tf == TRANSFER_FUNCTION_SRGB) { 2178 + if (ramp == NULL) 2179 + return true; 2180 + if ((ramp->is_identity && ramp->type != GAMMA_CS_TFM_1D) || 2181 + (!mapUserRamp && ramp->type == GAMMA_RGB_256)) 2182 + return true; 2183 + } 2184 + 2185 + output_tf->type = TF_TYPE_DISTRIBUTED_POINTS; 2186 + 2187 + if (ramp && ramp->type != GAMMA_CS_TFM_1D && 2188 + (mapUserRamp || ramp->type != GAMMA_RGB_256)) { 2189 + rgb_user = kvcalloc(ramp->num_entries + _EXTRA_POINTS, 2190 + sizeof(*rgb_user), 2191 + GFP_KERNEL); 2192 + if (!rgb_user) 2193 + goto rgb_user_alloc_fail; 2194 + 2195 + axis_x = kvcalloc(ramp->num_entries + 3, sizeof(*axis_x), 2196 + GFP_KERNEL); 2197 + if (!axis_x) 2198 + goto axis_x_alloc_fail; 2199 + 2200 + dividers.divider1 = dc_fixpt_from_fraction(3, 2); 2201 + dividers.divider2 = dc_fixpt_from_int(2); 2202 + dividers.divider3 = dc_fixpt_from_fraction(5, 2); 2203 + 2204 + build_evenly_distributed_points( 2205 + axis_x, 2206 + ramp->num_entries, 2207 + dividers); 2208 + 2209 + if (ramp->type == GAMMA_RGB_256 && mapUserRamp) 2210 + scale_gamma(rgb_user, ramp, dividers); 2211 + else if (ramp->type == GAMMA_RGB_FLOAT_1024) 2212 + scale_gamma_dx(rgb_user, ramp, dividers); 2213 + } 2214 + 2215 + rgb_regamma = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, 2216 + sizeof(*rgb_regamma), 2217 + GFP_KERNEL); 2218 + if (!rgb_regamma) 2219 + goto rgb_regamma_alloc_fail; 2220 + 2221 + coeff = kvcalloc(MAX_HW_POINTS + _EXTRA_POINTS, sizeof(*coeff), 2222 + GFP_KERNEL); 2223 + if (!coeff) 2224 + goto coeff_alloc_fail; 2225 + 2226 + tf = output_tf->tf; 2227 + 2228 + ret = calculate_curve(tf, 2229 + tf_pts, 2230 + rgb_regamma, 2231 + fs_params, 2232 + output_tf->sdr_ref_white_level); 2233 + 2234 + if (ret) { 2235 + map_regamma_hw_to_x_user(ramp, coeff, rgb_user, 2236 + coordinates_x, axis_x, rgb_regamma, 2237 + MAX_HW_POINTS, tf_pts, 2238 + (mapUserRamp || (ramp && ramp->type != GAMMA_RGB_256)) && 2239 + (ramp && ramp->type != GAMMA_CS_TFM_1D)); 2240 + 2241 + if (ramp && ramp->type == GAMMA_CS_TFM_1D) 2242 + apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts); 2243 + } 2244 + 2245 + kvfree(coeff); 2246 + coeff_alloc_fail: 2247 + kvfree(rgb_regamma); 2248 + rgb_regamma_alloc_fail: 2249 + kvfree(axis_x); 2250 + axis_x_alloc_fail: 2251 + kvfree(rgb_user); 2252 + rgb_user_alloc_fail: 2253 + return ret; 2254 + } 1988 2255 1989 2256 bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, 1990 2257 struct dc_transfer_func_distributed_points *points)
-4
drivers/gpu/drm/amd/display/modules/color/color_gamma.h
··· 103 103 bool mod_color_calculate_degamma_params(struct dc_transfer_func *output_tf, 104 104 const struct dc_gamma *ramp, bool mapUserRamp); 105 105 106 - bool mod_color_calculate_curve(enum dc_transfer_func_predefined trans, 107 - struct dc_transfer_func_distributed_points *points, 108 - uint32_t sdr_ref_white_level); 109 - 110 106 bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, 111 107 struct dc_transfer_func_distributed_points *points); 112 108
+1 -1
drivers/gpu/drm/amd/display/modules/freesync/freesync.c
··· 381 381 bool update = false; 382 382 unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us; 383 383 384 - //Compute the exit refresh rate and exit frame duration 384 + /* Compute the exit refresh rate and exit frame duration */ 385 385 unsigned int exit_refresh_rate_in_milli_hz = ((1000000000/max_render_time_in_us) 386 386 + (1000*FIXED_REFRESH_EXIT_MARGIN_IN_HZ)); 387 387 unsigned int exit_frame_duration_in_us = 1000000000/exit_refresh_rate_in_milli_hz;
+1 -1
drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
··· 63 63 uint8_t hdcp_capable_dp; 64 64 uint8_t binfo_read_dp; 65 65 uint8_t r0p_available_dp; 66 - uint8_t link_integiry_check; 66 + uint8_t link_integrity_check; 67 67 uint8_t reauth_request_check; 68 68 uint8_t stream_encryption_dp; 69 69 };
+4 -4
drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
··· 283 283 hdcp, "bstatus_read")) 284 284 goto out; 285 285 if (!mod_hdcp_execute_and_set(check_link_integrity_dp, 286 - &input->link_integiry_check, &status, 287 - hdcp, "link_integiry_check")) 286 + &input->link_integrity_check, &status, 287 + hdcp, "link_integrity_check")) 288 288 goto out; 289 289 if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp, 290 290 &input->reauth_request_check, &status, ··· 431 431 hdcp, "bstatus_read")) 432 432 goto out; 433 433 if (!mod_hdcp_execute_and_set(check_link_integrity_dp, 434 - &input->link_integiry_check, &status, 435 - hdcp, "link_integiry_check")) 434 + &input->link_integrity_check, &status, 435 + hdcp, "link_integrity_check")) 436 436 goto out; 437 437 if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp, 438 438 &input->reauth_request_check, &status,
+2 -2
drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
··· 241 241 } 242 242 break; 243 243 case D1_A4_AUTHENTICATED: 244 - if (input->link_integiry_check != PASS || 244 + if (input->link_integrity_check != PASS || 245 245 input->reauth_request_check != PASS) { 246 246 /* 1A-07: restart hdcp on a link integrity failure */ 247 247 fail_and_restart_in_ms(0, &status, output); ··· 249 249 } 250 250 break; 251 251 case D1_A6_WAIT_FOR_READY: 252 - if (input->link_integiry_check == FAIL || 252 + if (input->link_integrity_check == FAIL || 253 253 input->reauth_request_check == FAIL) { 254 254 fail_and_restart_in_ms(0, &status, output); 255 255 break;
+3
drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
··· 71 71 #define smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3 0x1d098UL 72 72 #define smnDF_PIE_AON_FabricIndirectConfigAccessDataHi3 0x1d09cUL 73 73 74 + #define smnDF_CS_UMC_AON0_DramBaseAddress0 0x1c110UL 75 + #define smnDF_CS_UMC_AON0_DramLimitAddress0 0x1c114UL 76 + 74 77 #endif
+8
drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
··· 53 53 #define DF_CS_UMC_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000E00L 54 54 #define DF_CS_UMC_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L 55 55 56 + //DF_CS_UMC_AON0_DramLimitAddress0 57 + #define DF_CS_UMC_AON0_DramLimitAddress0__DstFabricID__SHIFT 0x0 58 + #define DF_CS_UMC_AON0_DramLimitAddress0__AllowReqIO__SHIFT 0xa 59 + #define DF_CS_UMC_AON0_DramLimitAddress0__DramLimitAddr__SHIFT 0xc 60 + #define DF_CS_UMC_AON0_DramLimitAddress0__DstFabricID_MASK 0x000003FFL 61 + #define DF_CS_UMC_AON0_DramLimitAddress0__AllowReqIO_MASK 0x00000400L 62 + #define DF_CS_UMC_AON0_DramLimitAddress0__DramLimitAddr_MASK 0xFFFFF000L 63 + 56 64 #endif
+4 -2
drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h
··· 2060 2060 2061 2061 // addressBlock: gc_sqdec 2062 2062 //SQ_CONFIG 2063 - #define SQ_CONFIG__UNUSED__SHIFT 0x0 2063 + #define SQ_CONFIG__DISABLE_BARRIER_WAITCNT__SHIFT 0x0 2064 + #define SQ_CONFIG__UNUSED__SHIFT 0x1 2064 2065 #define SQ_CONFIG__OVERRIDE_ALU_BUSY__SHIFT 0x7 2065 2066 #define SQ_CONFIG__DEBUG_EN__SHIFT 0x8 2066 2067 #define SQ_CONFIG__DEBUG_SINGLE_MEMOP__SHIFT 0x9 ··· 2080 2079 #define SQ_CONFIG__DISABLE_SP_REDUNDANT_THREAD_GATING__SHIFT 0x1d 2081 2080 #define SQ_CONFIG__DISABLE_FLAT_SOFT_CLAUSE__SHIFT 0x1e 2082 2081 #define SQ_CONFIG__DISABLE_MIMG_SOFT_CLAUSE__SHIFT 0x1f 2083 - #define SQ_CONFIG__UNUSED_MASK 0x0000007FL 2082 + #define SQ_CONFIG__DISABLE_BARRIER_WAITCNT_MASK 0x00000001L 2083 + #define SQ_CONFIG__UNUSED_MASK 0x0000007EL 2084 2084 #define SQ_CONFIG__OVERRIDE_ALU_BUSY_MASK 0x00000080L 2085 2085 #define SQ_CONFIG__DEBUG_EN_MASK 0x00000100L 2086 2086 #define SQ_CONFIG__DEBUG_SINGLE_MEMOP_MASK 0x00000200L
+264
drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_offset.h
··· 1 + /* 2 + * Copyright (C) 2020 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included 12 + * in all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 15 + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN 18 + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 19 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 20 + */ 21 + #ifndef _gc_9_4_1_OFFSET_HEADER 22 + #define _gc_9_4_1_OFFSET_HEADER 23 + 24 + // addressBlock: gc_grbmdec 25 + // base address: 0x8000 26 + #define mmGRBM_CNTL 0x0000 27 + #define mmGRBM_CNTL_BASE_IDX 0 28 + #define mmGRBM_SKEW_CNTL 0x0001 29 + #define mmGRBM_SKEW_CNTL_BASE_IDX 0 30 + #define mmGRBM_STATUS2 0x0002 31 + #define mmGRBM_STATUS2_BASE_IDX 0 32 + #define mmGRBM_PWR_CNTL 0x0003 33 + #define mmGRBM_PWR_CNTL_BASE_IDX 0 34 + #define mmGRBM_STATUS 0x0004 35 + #define mmGRBM_STATUS_BASE_IDX 0 36 + #define mmGRBM_STATUS_SE0 0x0005 37 + #define mmGRBM_STATUS_SE0_BASE_IDX 0 38 + #define mmGRBM_STATUS_SE1 0x0006 39 + #define mmGRBM_STATUS_SE1_BASE_IDX 0 40 + #define mmGRBM_SOFT_RESET 0x0008 41 + #define mmGRBM_SOFT_RESET_BASE_IDX 0 42 + #define mmGRBM_GFX_CLKEN_CNTL 0x000c 43 + #define mmGRBM_GFX_CLKEN_CNTL_BASE_IDX 0 44 + #define mmGRBM_WAIT_IDLE_CLOCKS 0x000d 45 + #define mmGRBM_WAIT_IDLE_CLOCKS_BASE_IDX 0 46 + #define mmGRBM_STATUS_SE2 0x000e 47 + #define mmGRBM_STATUS_SE2_BASE_IDX 0 48 + #define mmGRBM_STATUS_SE3 0x000f 49 + #define mmGRBM_STATUS_SE3_BASE_IDX 0 50 + #define mmGRBM_READ_ERROR 0x0016 51 + #define mmGRBM_READ_ERROR_BASE_IDX 0 52 + #define mmGRBM_READ_ERROR2 0x0017 53 + #define mmGRBM_READ_ERROR2_BASE_IDX 0 54 + #define mmGRBM_INT_CNTL 0x0018 55 + #define mmGRBM_INT_CNTL_BASE_IDX 0 56 + #define mmGRBM_TRAP_OP 0x0019 57 + #define mmGRBM_TRAP_OP_BASE_IDX 0 58 + #define mmGRBM_TRAP_ADDR 0x001a 59 + #define mmGRBM_TRAP_ADDR_BASE_IDX 0 60 + #define mmGRBM_TRAP_ADDR_MSK 0x001b 61 + #define mmGRBM_TRAP_ADDR_MSK_BASE_IDX 0 62 + #define mmGRBM_TRAP_WD 0x001c 63 + #define mmGRBM_TRAP_WD_BASE_IDX 0 64 + #define mmGRBM_TRAP_WD_MSK 0x001d 65 + #define mmGRBM_TRAP_WD_MSK_BASE_IDX 0 66 + #define mmGRBM_DSM_BYPASS 0x001e 67 + #define mmGRBM_DSM_BYPASS_BASE_IDX 0 68 + #define mmGRBM_WRITE_ERROR 0x001f 69 + #define mmGRBM_WRITE_ERROR_BASE_IDX 0 70 + #define mmGRBM_IOV_ERROR 0x0020 71 + #define mmGRBM_IOV_ERROR_BASE_IDX 0 72 + #define mmGRBM_CHIP_REVISION 0x0021 73 + #define mmGRBM_CHIP_REVISION_BASE_IDX 0 74 + #define mmGRBM_GFX_CNTL 0x0022 75 + #define mmGRBM_GFX_CNTL_BASE_IDX 0 76 + #define mmGRBM_RSMU_CFG 0x0023 77 + #define mmGRBM_RSMU_CFG_BASE_IDX 0 78 + #define mmGRBM_IH_CREDIT 0x0024 79 + #define mmGRBM_IH_CREDIT_BASE_IDX 0 80 + #define mmGRBM_PWR_CNTL2 0x0025 81 + #define mmGRBM_PWR_CNTL2_BASE_IDX 0 82 + #define mmGRBM_UTCL2_INVAL_RANGE_START 0x0026 83 + #define mmGRBM_UTCL2_INVAL_RANGE_START_BASE_IDX 0 84 + #define mmGRBM_UTCL2_INVAL_RANGE_END 0x0027 85 + #define mmGRBM_UTCL2_INVAL_RANGE_END_BASE_IDX 0 86 + #define mmGRBM_RSMU_READ_ERROR 0x0028 87 + #define mmGRBM_RSMU_READ_ERROR_BASE_IDX 0 88 + #define mmGRBM_CHICKEN_BITS 0x0029 89 + #define mmGRBM_CHICKEN_BITS_BASE_IDX 0 90 + #define mmGRBM_FENCE_RANGE0 0x002a 91 + #define mmGRBM_FENCE_RANGE0_BASE_IDX 0 92 + #define mmGRBM_FENCE_RANGE1 0x002b 93 + #define mmGRBM_FENCE_RANGE1_BASE_IDX 0 94 + #define mmGRBM_NOWHERE 0x003f 95 + #define mmGRBM_NOWHERE_BASE_IDX 0 96 + #define mmGRBM_SCRATCH_REG0 0x0040 97 + #define mmGRBM_SCRATCH_REG0_BASE_IDX 0 98 + #define mmGRBM_SCRATCH_REG1 0x0041 99 + #define mmGRBM_SCRATCH_REG1_BASE_IDX 0 100 + #define mmGRBM_SCRATCH_REG2 0x0042 101 + #define mmGRBM_SCRATCH_REG2_BASE_IDX 0 102 + #define mmGRBM_SCRATCH_REG3 0x0043 103 + #define mmGRBM_SCRATCH_REG3_BASE_IDX 0 104 + #define mmGRBM_SCRATCH_REG4 0x0044 105 + #define mmGRBM_SCRATCH_REG4_BASE_IDX 0 106 + #define mmGRBM_SCRATCH_REG5 0x0045 107 + #define mmGRBM_SCRATCH_REG5_BASE_IDX 0 108 + #define mmGRBM_SCRATCH_REG6 0x0046 109 + #define mmGRBM_SCRATCH_REG6_BASE_IDX 0 110 + #define mmGRBM_SCRATCH_REG7 0x0047 111 + #define mmGRBM_SCRATCH_REG7_BASE_IDX 0 112 + 113 + // addressBlock: gc_cppdec2 114 + // base address: 0xc600 115 + #define mmCPF_EDC_TAG_CNT 0x1189 116 + #define mmCPF_EDC_TAG_CNT_BASE_IDX 0 117 + #define mmCPF_EDC_ROQ_CNT 0x118a 118 + #define mmCPF_EDC_ROQ_CNT_BASE_IDX 0 119 + #define mmCPG_EDC_TAG_CNT 0x118b 120 + #define mmCPG_EDC_TAG_CNT_BASE_IDX 0 121 + #define mmCPG_EDC_DMA_CNT 0x118d 122 + #define mmCPG_EDC_DMA_CNT_BASE_IDX 0 123 + #define mmCPC_EDC_SCRATCH_CNT 0x118e 124 + #define mmCPC_EDC_SCRATCH_CNT_BASE_IDX 0 125 + #define mmCPC_EDC_UCODE_CNT 0x118f 126 + #define mmCPC_EDC_UCODE_CNT_BASE_IDX 0 127 + #define mmDC_EDC_STATE_CNT 0x1191 128 + #define mmDC_EDC_STATE_CNT_BASE_IDX 0 129 + #define mmDC_EDC_CSINVOC_CNT 0x1192 130 + #define mmDC_EDC_CSINVOC_CNT_BASE_IDX 0 131 + #define mmDC_EDC_RESTORE_CNT 0x1193 132 + #define mmDC_EDC_RESTORE_CNT_BASE_IDX 0 133 + 134 + // addressBlock: gc_gdsdec 135 + // base address: 0x9700 136 + #define mmGDS_EDC_CNT 0x05c5 137 + #define mmGDS_EDC_CNT_BASE_IDX 0 138 + #define mmGDS_EDC_GRBM_CNT 0x05c6 139 + #define mmGDS_EDC_GRBM_CNT_BASE_IDX 0 140 + #define mmGDS_EDC_OA_DED 0x05c7 141 + #define mmGDS_EDC_OA_DED_BASE_IDX 0 142 + #define mmGDS_EDC_OA_PHY_CNT 0x05cb 143 + #define mmGDS_EDC_OA_PHY_CNT_BASE_IDX 0 144 + #define mmGDS_EDC_OA_PIPE_CNT 0x05cc 145 + #define mmGDS_EDC_OA_PIPE_CNT_BASE_IDX 0 146 + 147 + // addressBlock: gc_shsdec 148 + // base address: 0x9000 149 + #define mmSPI_EDC_CNT 0x0445 150 + #define mmSPI_EDC_CNT_BASE_IDX 0 151 + 152 + // addressBlock: gc_sqdec 153 + // base address: 0x8c00 154 + #define mmSQC_EDC_CNT2 0x032c 155 + #define mmSQC_EDC_CNT2_BASE_IDX 0 156 + #define mmSQC_EDC_CNT3 0x032d 157 + #define mmSQC_EDC_CNT3_BASE_IDX 0 158 + #define mmSQC_EDC_PARITY_CNT3 0x032e 159 + #define mmSQC_EDC_PARITY_CNT3_BASE_IDX 0 160 + #define mmSQC_EDC_CNT 0x03a2 161 + #define mmSQC_EDC_CNT_BASE_IDX 0 162 + #define mmSQ_EDC_SEC_CNT 0x03a3 163 + #define mmSQ_EDC_SEC_CNT_BASE_IDX 0 164 + #define mmSQ_EDC_DED_CNT 0x03a4 165 + #define mmSQ_EDC_DED_CNT_BASE_IDX 0 166 + #define mmSQ_EDC_INFO 0x03a5 167 + #define mmSQ_EDC_INFO_BASE_IDX 0 168 + #define mmSQ_EDC_CNT 0x03a6 169 + #define mmSQ_EDC_CNT_BASE_IDX 0 170 + 171 + // addressBlock: gc_tpdec 172 + // base address: 0x9400 173 + #define mmTA_EDC_CNT 0x0586 174 + #define mmTA_EDC_CNT_BASE_IDX 0 175 + 176 + // addressBlock: gc_tcdec 177 + // base address: 0xac00 178 + #define mmTCP_EDC_CNT 0x0b17 179 + #define mmTCP_EDC_CNT_BASE_IDX 0 180 + #define mmTCP_EDC_CNT_NEW 0x0b18 181 + #define mmTCP_EDC_CNT_NEW_BASE_IDX 0 182 + #define mmTCP_ATC_EDC_GATCL1_CNT 0x12b1 183 + #define mmTCP_ATC_EDC_GATCL1_CNT_BASE_IDX 0 184 + #define mmTCI_EDC_CNT 0x0b60 185 + #define mmTCI_EDC_CNT_BASE_IDX 0 186 + #define mmTCC_EDC_CNT 0x0b82 187 + #define mmTCC_EDC_CNT_BASE_IDX 0 188 + #define mmTCC_EDC_CNT2 0x0b83 189 + #define mmTCC_EDC_CNT2_BASE_IDX 0 190 + #define mmTCA_EDC_CNT 0x0bc5 191 + #define mmTCA_EDC_CNT_BASE_IDX 0 192 + 193 + // addressBlock: gc_tpdec 194 + // base address: 0x9400 195 + #define mmTD_EDC_CNT 0x052e 196 + #define mmTD_EDC_CNT_BASE_IDX 0 197 + #define mmTA_EDC_CNT 0x0586 198 + #define mmTA_EDC_CNT_BASE_IDX 0 199 + 200 + // addressBlock: gc_ea_gceadec2 201 + // base address: 0x9c00 202 + #define mmGCEA_EDC_CNT 0x0706 203 + #define mmGCEA_EDC_CNT_BASE_IDX 0 204 + #define mmGCEA_EDC_CNT2 0x0707 205 + #define mmGCEA_EDC_CNT2_BASE_IDX 0 206 + #define mmGCEA_EDC_CNT3 0x071b 207 + #define mmGCEA_EDC_CNT3_BASE_IDX 0 208 + 209 + // addressBlock: gc_gfxudec 210 + // base address: 0x30000 211 + #define mmSCRATCH_REG0 0x2040 212 + #define mmSCRATCH_REG0_BASE_IDX 1 213 + #define mmSCRATCH_REG1 0x2041 214 + #define mmSCRATCH_REG1_BASE_IDX 1 215 + #define mmSCRATCH_REG2 0x2042 216 + #define mmSCRATCH_REG2_BASE_IDX 1 217 + #define mmSCRATCH_REG3 0x2043 218 + #define mmSCRATCH_REG3_BASE_IDX 1 219 + #define mmSCRATCH_REG4 0x2044 220 + #define mmSCRATCH_REG4_BASE_IDX 1 221 + #define mmSCRATCH_REG5 0x2045 222 + #define mmSCRATCH_REG5_BASE_IDX 1 223 + #define mmSCRATCH_REG6 0x2046 224 + #define mmSCRATCH_REG6_BASE_IDX 1 225 + #define mmSCRATCH_REG7 0x2047 226 + #define mmSCRATCH_REG7_BASE_IDX 1 227 + #define mmGRBM_GFX_INDEX 0x2200 228 + #define mmGRBM_GFX_INDEX_BASE_IDX 1 229 + 230 + // addressBlock: gc_utcl2_atcl2dec 231 + // base address: 0xa000 232 + #define mmATC_L2_CACHE_4K_DSM_INDEX 0x080e 233 + #define mmATC_L2_CACHE_4K_DSM_INDEX_BASE_IDX 0 234 + #define mmATC_L2_CACHE_2M_DSM_INDEX 0x080f 235 + #define mmATC_L2_CACHE_2M_DSM_INDEX_BASE_IDX 0 236 + #define mmATC_L2_CACHE_4K_DSM_CNTL 0x0810 237 + #define mmATC_L2_CACHE_4K_DSM_CNTL_BASE_IDX 0 238 + #define mmATC_L2_CACHE_2M_DSM_CNTL 0x0811 239 + #define mmATC_L2_CACHE_2M_DSM_CNTL_BASE_IDX 0 240 + 241 + // addressBlock: gc_utcl2_vml2pfdec 242 + // base address: 0xa100 243 + #define mmVML2_MEM_ECC_INDEX 0x0860 244 + #define mmVML2_MEM_ECC_INDEX_BASE_IDX 0 245 + #define mmVML2_WALKER_MEM_ECC_INDEX 0x0861 246 + #define mmVML2_WALKER_MEM_ECC_INDEX_BASE_IDX 0 247 + #define mmUTCL2_MEM_ECC_INDEX 0x0862 248 + #define mmUTCL2_MEM_ECC_INDEX_BASE_IDX 0 249 + 250 + #define mmVML2_MEM_ECC_CNTL 0x0863 251 + #define mmVML2_MEM_ECC_CNTL_BASE_IDX 0 252 + #define mmVML2_WALKER_MEM_ECC_CNTL 0x0864 253 + #define mmVML2_WALKER_MEM_ECC_CNTL_BASE_IDX 0 254 + #define mmUTCL2_MEM_ECC_CNTL 0x0865 255 + #define mmUTCL2_MEM_ECC_CNTL_BASE_IDX 0 256 + 257 + // addressBlock: gc_rlcpdec 258 + // base address: 0x3b000 259 + #define mmRLC_EDC_CNT 0x4d40 260 + #define mmRLC_EDC_CNT_BASE_IDX 1 261 + #define mmRLC_EDC_CNT2 0x4d41 262 + #define mmRLC_EDC_CNT2_BASE_IDX 1 263 + 264 + #endif
+748
drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_4_1_sh_mask.h
··· 1 + /* 2 + * Copyright (C) 2020 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included 12 + * in all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 15 + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN 18 + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 19 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 20 + */ 21 + #ifndef _gc_9_4_1_SH_MASK_HEADER 22 + #define _gc_9_4_1_SH_MASK_HEADER 23 + 24 + // addressBlock: gc_cppdec2 25 + //CPF_EDC_TAG_CNT 26 + #define CPF_EDC_TAG_CNT__DED_COUNT__SHIFT 0x0 27 + #define CPF_EDC_TAG_CNT__SEC_COUNT__SHIFT 0x2 28 + #define CPF_EDC_TAG_CNT__DED_COUNT_MASK 0x00000003L 29 + #define CPF_EDC_TAG_CNT__SEC_COUNT_MASK 0x0000000CL 30 + //CPF_EDC_ROQ_CNT 31 + #define CPF_EDC_ROQ_CNT__DED_COUNT_ME1__SHIFT 0x0 32 + #define CPF_EDC_ROQ_CNT__SEC_COUNT_ME1__SHIFT 0x2 33 + #define CPF_EDC_ROQ_CNT__DED_COUNT_ME2__SHIFT 0x4 34 + #define CPF_EDC_ROQ_CNT__SEC_COUNT_ME2__SHIFT 0x6 35 + #define CPF_EDC_ROQ_CNT__DED_COUNT_ME1_MASK 0x00000003L 36 + #define CPF_EDC_ROQ_CNT__SEC_COUNT_ME1_MASK 0x0000000CL 37 + #define CPF_EDC_ROQ_CNT__DED_COUNT_ME2_MASK 0x00000030L 38 + #define CPF_EDC_ROQ_CNT__SEC_COUNT_ME2_MASK 0x000000C0L 39 + //CPG_EDC_TAG_CNT 40 + #define CPG_EDC_TAG_CNT__DED_COUNT__SHIFT 0x0 41 + #define CPG_EDC_TAG_CNT__SEC_COUNT__SHIFT 0x2 42 + #define CPG_EDC_TAG_CNT__DED_COUNT_MASK 0x00000003L 43 + #define CPG_EDC_TAG_CNT__SEC_COUNT_MASK 0x0000000CL 44 + //CPG_EDC_DMA_CNT 45 + #define CPG_EDC_DMA_CNT__ROQ_DED_COUNT__SHIFT 0x0 46 + #define CPG_EDC_DMA_CNT__ROQ_SEC_COUNT__SHIFT 0x2 47 + #define CPG_EDC_DMA_CNT__TAG_DED_COUNT__SHIFT 0x4 48 + #define CPG_EDC_DMA_CNT__TAG_SEC_COUNT__SHIFT 0x6 49 + #define CPG_EDC_DMA_CNT__ROQ_DED_COUNT_MASK 0x00000003L 50 + #define CPG_EDC_DMA_CNT__ROQ_SEC_COUNT_MASK 0x0000000CL 51 + #define CPG_EDC_DMA_CNT__TAG_DED_COUNT_MASK 0x00000030L 52 + #define CPG_EDC_DMA_CNT__TAG_SEC_COUNT_MASK 0x000000C0L 53 + //CPC_EDC_SCRATCH_CNT 54 + #define CPC_EDC_SCRATCH_CNT__DED_COUNT__SHIFT 0x0 55 + #define CPC_EDC_SCRATCH_CNT__SEC_COUNT__SHIFT 0x2 56 + #define CPC_EDC_SCRATCH_CNT__DED_COUNT_MASK 0x00000003L 57 + #define CPC_EDC_SCRATCH_CNT__SEC_COUNT_MASK 0x0000000CL 58 + //CPC_EDC_UCODE_CNT 59 + #define CPC_EDC_UCODE_CNT__DED_COUNT__SHIFT 0x0 60 + #define CPC_EDC_UCODE_CNT__SEC_COUNT__SHIFT 0x2 61 + #define CPC_EDC_UCODE_CNT__DED_COUNT_MASK 0x00000003L 62 + #define CPC_EDC_UCODE_CNT__SEC_COUNT_MASK 0x0000000CL 63 + //DC_EDC_STATE_CNT 64 + #define DC_EDC_STATE_CNT__DED_COUNT_ME1__SHIFT 0x0 65 + #define DC_EDC_STATE_CNT__SEC_COUNT_ME1__SHIFT 0x2 66 + #define DC_EDC_STATE_CNT__DED_COUNT_ME1_MASK 0x00000003L 67 + #define DC_EDC_STATE_CNT__SEC_COUNT_ME1_MASK 0x0000000CL 68 + //DC_EDC_CSINVOC_CNT 69 + #define DC_EDC_CSINVOC_CNT__DED_COUNT_ME1__SHIFT 0x0 70 + #define DC_EDC_CSINVOC_CNT__SEC_COUNT_ME1__SHIFT 0x2 71 + #define DC_EDC_CSINVOC_CNT__DED_COUNT1_ME1__SHIFT 0x4 72 + #define DC_EDC_CSINVOC_CNT__SEC_COUNT1_ME1__SHIFT 0x6 73 + #define DC_EDC_CSINVOC_CNT__DED_COUNT_ME1_MASK 0x00000003L 74 + #define DC_EDC_CSINVOC_CNT__SEC_COUNT_ME1_MASK 0x0000000CL 75 + #define DC_EDC_CSINVOC_CNT__DED_COUNT1_ME1_MASK 0x00000030L 76 + #define DC_EDC_CSINVOC_CNT__SEC_COUNT1_ME1_MASK 0x000000C0L 77 + //DC_EDC_RESTORE_CNT 78 + #define DC_EDC_RESTORE_CNT__DED_COUNT_ME1__SHIFT 0x0 79 + #define DC_EDC_RESTORE_CNT__SEC_COUNT_ME1__SHIFT 0x2 80 + #define DC_EDC_RESTORE_CNT__DED_COUNT1_ME1__SHIFT 0x4 81 + #define DC_EDC_RESTORE_CNT__SEC_COUNT1_ME1__SHIFT 0x6 82 + #define DC_EDC_RESTORE_CNT__DED_COUNT_ME1_MASK 0x00000003L 83 + #define DC_EDC_RESTORE_CNT__SEC_COUNT_ME1_MASK 0x0000000CL 84 + #define DC_EDC_RESTORE_CNT__DED_COUNT1_ME1_MASK 0x00000030L 85 + #define DC_EDC_RESTORE_CNT__SEC_COUNT1_ME1_MASK 0x000000C0L 86 + 87 + // addressBlock: gc_gdsdec 88 + //GDS_EDC_CNT 89 + #define GDS_EDC_CNT__GDS_MEM_DED__SHIFT 0x0 90 + #define GDS_EDC_CNT__GDS_MEM_SEC__SHIFT 0x4 91 + #define GDS_EDC_CNT__UNUSED__SHIFT 0x6 92 + #define GDS_EDC_CNT__GDS_MEM_DED_MASK 0x00000003L 93 + #define GDS_EDC_CNT__GDS_MEM_SEC_MASK 0x00000030L 94 + #define GDS_EDC_CNT__UNUSED_MASK 0xFFFFFFC0L 95 + //GDS_EDC_GRBM_CNT 96 + #define GDS_EDC_GRBM_CNT__DED__SHIFT 0x0 97 + #define GDS_EDC_GRBM_CNT__SEC__SHIFT 0x2 98 + #define GDS_EDC_GRBM_CNT__UNUSED__SHIFT 0x4 99 + #define GDS_EDC_GRBM_CNT__DED_MASK 0x00000003L 100 + #define GDS_EDC_GRBM_CNT__SEC_MASK 0x0000000CL 101 + #define GDS_EDC_GRBM_CNT__UNUSED_MASK 0xFFFFFFF0L 102 + //GDS_EDC_OA_DED 103 + #define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED__SHIFT 0x0 104 + #define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED__SHIFT 0x1 105 + #define GDS_EDC_OA_DED__ME0_CS_DED__SHIFT 0x2 106 + #define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED__SHIFT 0x3 107 + #define GDS_EDC_OA_DED__ME1_PIPE0_DED__SHIFT 0x4 108 + #define GDS_EDC_OA_DED__ME1_PIPE1_DED__SHIFT 0x5 109 + #define GDS_EDC_OA_DED__ME1_PIPE2_DED__SHIFT 0x6 110 + #define GDS_EDC_OA_DED__ME1_PIPE3_DED__SHIFT 0x7 111 + #define GDS_EDC_OA_DED__ME2_PIPE0_DED__SHIFT 0x8 112 + #define GDS_EDC_OA_DED__ME2_PIPE1_DED__SHIFT 0x9 113 + #define GDS_EDC_OA_DED__ME2_PIPE2_DED__SHIFT 0xa 114 + #define GDS_EDC_OA_DED__ME2_PIPE3_DED__SHIFT 0xb 115 + #define GDS_EDC_OA_DED__UNUSED1__SHIFT 0xc 116 + #define GDS_EDC_OA_DED__ME0_GFXHP3D_PIX_DED_MASK 0x00000001L 117 + #define GDS_EDC_OA_DED__ME0_GFXHP3D_VTX_DED_MASK 0x00000002L 118 + #define GDS_EDC_OA_DED__ME0_CS_DED_MASK 0x00000004L 119 + #define GDS_EDC_OA_DED__ME0_GFXHP3D_GS_DED_MASK 0x00000008L 120 + #define GDS_EDC_OA_DED__ME1_PIPE0_DED_MASK 0x00000010L 121 + #define GDS_EDC_OA_DED__ME1_PIPE1_DED_MASK 0x00000020L 122 + #define GDS_EDC_OA_DED__ME1_PIPE2_DED_MASK 0x00000040L 123 + #define GDS_EDC_OA_DED__ME1_PIPE3_DED_MASK 0x00000080L 124 + #define GDS_EDC_OA_DED__ME2_PIPE0_DED_MASK 0x00000100L 125 + #define GDS_EDC_OA_DED__ME2_PIPE1_DED_MASK 0x00000200L 126 + #define GDS_EDC_OA_DED__ME2_PIPE2_DED_MASK 0x00000400L 127 + #define GDS_EDC_OA_DED__ME2_PIPE3_DED_MASK 0x00000800L 128 + #define GDS_EDC_OA_DED__UNUSED1_MASK 0xFFFFF000L 129 + //GDS_EDC_OA_PHY_CNT 130 + #define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC__SHIFT 0x0 131 + #define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED__SHIFT 0x2 132 + #define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC__SHIFT 0x4 133 + #define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED__SHIFT 0x6 134 + #define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SEC__SHIFT 0x8 135 + #define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_DED__SHIFT 0xa 136 + #define GDS_EDC_OA_PHY_CNT__UNUSED1__SHIFT 0xc 137 + #define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_SEC_MASK 0x00000003L 138 + #define GDS_EDC_OA_PHY_CNT__ME0_CS_PIPE_MEM_DED_MASK 0x0000000CL 139 + #define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_SEC_MASK 0x00000030L 140 + #define GDS_EDC_OA_PHY_CNT__PHY_CMD_RAM_MEM_DED_MASK 0x000000C0L 141 + #define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_SEC_MASK 0x00000300L 142 + #define GDS_EDC_OA_PHY_CNT__PHY_DATA_RAM_MEM_DED_MASK 0x00000C00L 143 + #define GDS_EDC_OA_PHY_CNT__UNUSED1_MASK 0xFFFFF000L 144 + //GDS_EDC_OA_PIPE_CNT 145 + #define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC__SHIFT 0x0 146 + #define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED__SHIFT 0x2 147 + #define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC__SHIFT 0x4 148 + #define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED__SHIFT 0x6 149 + #define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC__SHIFT 0x8 150 + #define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED__SHIFT 0xa 151 + #define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC__SHIFT 0xc 152 + #define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED__SHIFT 0xe 153 + #define GDS_EDC_OA_PIPE_CNT__UNUSED__SHIFT 0x10 154 + #define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_SEC_MASK 0x00000003L 155 + #define GDS_EDC_OA_PIPE_CNT__ME1_PIPE0_PIPE_MEM_DED_MASK 0x0000000CL 156 + #define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_SEC_MASK 0x00000030L 157 + #define GDS_EDC_OA_PIPE_CNT__ME1_PIPE1_PIPE_MEM_DED_MASK 0x000000C0L 158 + #define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_SEC_MASK 0x00000300L 159 + #define GDS_EDC_OA_PIPE_CNT__ME1_PIPE2_PIPE_MEM_DED_MASK 0x00000C00L 160 + #define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_SEC_MASK 0x00003000L 161 + #define GDS_EDC_OA_PIPE_CNT__ME1_PIPE3_PIPE_MEM_DED_MASK 0x0000C000L 162 + #define GDS_EDC_OA_PIPE_CNT__UNUSED_MASK 0xFFFF0000L 163 + 164 + // addressBlock: gc_shsdec 165 + //SPI_EDC_CNT 166 + #define SPI_EDC_CNT__SPI_SR_MEM_SEC_COUNT__SHIFT 0x0 167 + #define SPI_EDC_CNT__SPI_SR_MEM_DED_COUNT__SHIFT 0x2 168 + #define SPI_EDC_CNT__SPI_GDS_EXPREQ_SEC_COUNT__SHIFT 0x4 169 + #define SPI_EDC_CNT__SPI_GDS_EXPREQ_DED_COUNT__SHIFT 0x6 170 + #define SPI_EDC_CNT__SPI_WB_GRANT_30_SEC_COUNT__SHIFT 0x8 171 + #define SPI_EDC_CNT__SPI_WB_GRANT_30_DED_COUNT__SHIFT 0xa 172 + #define SPI_EDC_CNT__SPI_WB_GRANT_61_SEC_COUNT__SHIFT 0xc 173 + #define SPI_EDC_CNT__SPI_WB_GRANT_61_DED_COUNT__SHIFT 0xe 174 + #define SPI_EDC_CNT__SPI_LIFE_CNT_SEC_COUNT__SHIFT 0x10 175 + #define SPI_EDC_CNT__SPI_LIFE_CNT_DED_COUNT__SHIFT 0x12 176 + #define SPI_EDC_CNT__SPI_SR_MEM_SEC_COUNT_MASK 0x00000003L 177 + #define SPI_EDC_CNT__SPI_SR_MEM_DED_COUNT_MASK 0x0000000CL 178 + #define SPI_EDC_CNT__SPI_GDS_EXPREQ_SEC_COUNT_MASK 0x00000030L 179 + #define SPI_EDC_CNT__SPI_GDS_EXPREQ_DED_COUNT_MASK 0x000000C0L 180 + #define SPI_EDC_CNT__SPI_WB_GRANT_30_SEC_COUNT_MASK 0x00000300L 181 + #define SPI_EDC_CNT__SPI_WB_GRANT_30_DED_COUNT_MASK 0x00000C00L 182 + #define SPI_EDC_CNT__SPI_WB_GRANT_61_SEC_COUNT_MASK 0x00003000L 183 + #define SPI_EDC_CNT__SPI_WB_GRANT_61_DED_COUNT_MASK 0x0000C000L 184 + #define SPI_EDC_CNT__SPI_LIFE_CNT_SEC_COUNT_MASK 0x00030000L 185 + #define SPI_EDC_CNT__SPI_LIFE_CNT_DED_COUNT_MASK 0x000C0000L 186 + 187 + // addressBlock: gc_sqdec 188 + //SQC_EDC_CNT2 189 + #define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_SEC_COUNT__SHIFT 0x0 190 + #define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_DED_COUNT__SHIFT 0x2 191 + #define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_SEC_COUNT__SHIFT 0x4 192 + #define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_DED_COUNT__SHIFT 0x6 193 + #define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_SEC_COUNT__SHIFT 0x8 194 + #define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_DED_COUNT__SHIFT 0xa 195 + #define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_SEC_COUNT__SHIFT 0xc 196 + #define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_DED_COUNT__SHIFT 0xe 197 + #define SQC_EDC_CNT2__INST_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x10 198 + #define SQC_EDC_CNT2__INST_UTCL1_LFIFO_DED_COUNT__SHIFT 0x12 199 + #define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_SEC_COUNT_MASK 0x00000003L 200 + #define SQC_EDC_CNT2__INST_BANKA_TAG_RAM_DED_COUNT_MASK 0x0000000CL 201 + #define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_SEC_COUNT_MASK 0x00000030L 202 + #define SQC_EDC_CNT2__INST_BANKA_BANK_RAM_DED_COUNT_MASK 0x000000C0L 203 + #define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_SEC_COUNT_MASK 0x00000300L 204 + #define SQC_EDC_CNT2__DATA_BANKA_TAG_RAM_DED_COUNT_MASK 0x00000C00L 205 + #define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_SEC_COUNT_MASK 0x00003000L 206 + #define SQC_EDC_CNT2__DATA_BANKA_BANK_RAM_DED_COUNT_MASK 0x0000C000L 207 + #define SQC_EDC_CNT2__INST_UTCL1_LFIFO_SEC_COUNT_MASK 0x00030000L 208 + #define SQC_EDC_CNT2__INST_UTCL1_LFIFO_DED_COUNT_MASK 0x000C0000L 209 + //SQC_EDC_CNT3 210 + #define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_SEC_COUNT__SHIFT 0x0 211 + #define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_DED_COUNT__SHIFT 0x2 212 + #define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_SEC_COUNT__SHIFT 0x4 213 + #define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_DED_COUNT__SHIFT 0x6 214 + #define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_SEC_COUNT__SHIFT 0x8 215 + #define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_DED_COUNT__SHIFT 0xa 216 + #define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_SEC_COUNT__SHIFT 0xc 217 + #define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_DED_COUNT__SHIFT 0xe 218 + #define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_SEC_COUNT_MASK 0x00000003L 219 + #define SQC_EDC_CNT3__INST_BANKB_TAG_RAM_DED_COUNT_MASK 0x0000000CL 220 + #define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_SEC_COUNT_MASK 0x00000030L 221 + #define SQC_EDC_CNT3__INST_BANKB_BANK_RAM_DED_COUNT_MASK 0x000000C0L 222 + #define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_SEC_COUNT_MASK 0x00000300L 223 + #define SQC_EDC_CNT3__DATA_BANKB_TAG_RAM_DED_COUNT_MASK 0x00000C00L 224 + #define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_SEC_COUNT_MASK 0x00003000L 225 + #define SQC_EDC_CNT3__DATA_BANKB_BANK_RAM_DED_COUNT_MASK 0x0000C000L 226 + //SQC_EDC_PARITY_CNT3 227 + #define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT__SHIFT 0x0 228 + #define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT__SHIFT 0x2 229 + #define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_SEC_COUNT__SHIFT 0x4 230 + #define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_DED_COUNT__SHIFT 0x6 231 + #define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_SEC_COUNT__SHIFT 0x8 232 + #define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_DED_COUNT__SHIFT 0xa 233 + #define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_SEC_COUNT__SHIFT 0xc 234 + #define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_DED_COUNT__SHIFT 0xe 235 + #define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT__SHIFT 0x10 236 + #define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT__SHIFT 0x12 237 + #define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_SEC_COUNT__SHIFT 0x14 238 + #define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_DED_COUNT__SHIFT 0x16 239 + #define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_SEC_COUNT__SHIFT 0x18 240 + #define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_DED_COUNT__SHIFT 0x1a 241 + #define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_SEC_COUNT__SHIFT 0x1c 242 + #define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_DED_COUNT__SHIFT 0x1e 243 + #define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_SEC_COUNT_MASK 0x00000003L 244 + #define SQC_EDC_PARITY_CNT3__INST_BANKA_UTCL1_MISS_FIFO_DED_COUNT_MASK 0x0000000CL 245 + #define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_SEC_COUNT_MASK 0x00000030L 246 + #define SQC_EDC_PARITY_CNT3__INST_BANKA_MISS_FIFO_DED_COUNT_MASK 0x000000C0L 247 + #define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_SEC_COUNT_MASK 0x00000300L 248 + #define SQC_EDC_PARITY_CNT3__DATA_BANKA_HIT_FIFO_DED_COUNT_MASK 0x00000C00L 249 + #define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_SEC_COUNT_MASK 0x00003000L 250 + #define SQC_EDC_PARITY_CNT3__DATA_BANKA_MISS_FIFO_DED_COUNT_MASK 0x0000C000L 251 + #define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_SEC_COUNT_MASK 0x00030000L 252 + #define SQC_EDC_PARITY_CNT3__INST_BANKB_UTCL1_MISS_FIFO_DED_COUNT_MASK 0x000C0000L 253 + #define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_SEC_COUNT_MASK 0x00300000L 254 + #define SQC_EDC_PARITY_CNT3__INST_BANKB_MISS_FIFO_DED_COUNT_MASK 0x00C00000L 255 + #define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_SEC_COUNT_MASK 0x03000000L 256 + #define SQC_EDC_PARITY_CNT3__DATA_BANKB_HIT_FIFO_DED_COUNT_MASK 0x0C000000L 257 + #define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_SEC_COUNT_MASK 0x30000000L 258 + #define SQC_EDC_PARITY_CNT3__DATA_BANKB_MISS_FIFO_DED_COUNT_MASK 0xC0000000L 259 + //SQC_EDC_CNT 260 + #define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x0 261 + #define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x2 262 + #define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x4 263 + #define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_DED_COUNT__SHIFT 0x6 264 + #define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x8 265 + #define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_DED_COUNT__SHIFT 0xa 266 + #define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_SEC_COUNT__SHIFT 0xc 267 + #define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_DED_COUNT__SHIFT 0xe 268 + #define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x10 269 + #define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x12 270 + #define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x14 271 + #define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_DED_COUNT__SHIFT 0x16 272 + #define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_SEC_COUNT__SHIFT 0x18 273 + #define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_DED_COUNT__SHIFT 0x1a 274 + #define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_SEC_COUNT__SHIFT 0x1c 275 + #define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_DED_COUNT__SHIFT 0x1e 276 + #define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00000003L 277 + #define SQC_EDC_CNT__DATA_CU0_WRITE_DATA_BUF_DED_COUNT_MASK 0x0000000CL 278 + #define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_SEC_COUNT_MASK 0x00000030L 279 + #define SQC_EDC_CNT__DATA_CU0_UTCL1_LFIFO_DED_COUNT_MASK 0x000000C0L 280 + #define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00000300L 281 + #define SQC_EDC_CNT__DATA_CU1_WRITE_DATA_BUF_DED_COUNT_MASK 0x00000C00L 282 + #define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_SEC_COUNT_MASK 0x00003000L 283 + #define SQC_EDC_CNT__DATA_CU1_UTCL1_LFIFO_DED_COUNT_MASK 0x0000C000L 284 + #define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_SEC_COUNT_MASK 0x00030000L 285 + #define SQC_EDC_CNT__DATA_CU2_WRITE_DATA_BUF_DED_COUNT_MASK 0x000C0000L 286 + #define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_SEC_COUNT_MASK 0x00300000L 287 + #define SQC_EDC_CNT__DATA_CU2_UTCL1_LFIFO_DED_COUNT_MASK 0x00C00000L 288 + #define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_SEC_COUNT_MASK 0x03000000L 289 + #define SQC_EDC_CNT__DATA_CU3_WRITE_DATA_BUF_DED_COUNT_MASK 0x0C000000L 290 + #define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_SEC_COUNT_MASK 0x30000000L 291 + #define SQC_EDC_CNT__DATA_CU3_UTCL1_LFIFO_DED_COUNT_MASK 0xC0000000L 292 + //SQ_EDC_SEC_CNT 293 + #define SQ_EDC_SEC_CNT__LDS_SEC__SHIFT 0x0 294 + #define SQ_EDC_SEC_CNT__SGPR_SEC__SHIFT 0x8 295 + #define SQ_EDC_SEC_CNT__VGPR_SEC__SHIFT 0x10 296 + #define SQ_EDC_SEC_CNT__LDS_SEC_MASK 0x000000FFL 297 + #define SQ_EDC_SEC_CNT__SGPR_SEC_MASK 0x0000FF00L 298 + #define SQ_EDC_SEC_CNT__VGPR_SEC_MASK 0x00FF0000L 299 + //SQ_EDC_DED_CNT 300 + #define SQ_EDC_DED_CNT__LDS_DED__SHIFT 0x0 301 + #define SQ_EDC_DED_CNT__SGPR_DED__SHIFT 0x8 302 + #define SQ_EDC_DED_CNT__VGPR_DED__SHIFT 0x10 303 + #define SQ_EDC_DED_CNT__LDS_DED_MASK 0x000000FFL 304 + #define SQ_EDC_DED_CNT__SGPR_DED_MASK 0x0000FF00L 305 + #define SQ_EDC_DED_CNT__VGPR_DED_MASK 0x00FF0000L 306 + //SQ_EDC_INFO 307 + #define SQ_EDC_INFO__WAVE_ID__SHIFT 0x0 308 + #define SQ_EDC_INFO__SIMD_ID__SHIFT 0x4 309 + #define SQ_EDC_INFO__SOURCE__SHIFT 0x6 310 + #define SQ_EDC_INFO__VM_ID__SHIFT 0x9 311 + #define SQ_EDC_INFO__WAVE_ID_MASK 0x0000000FL 312 + #define SQ_EDC_INFO__SIMD_ID_MASK 0x00000030L 313 + #define SQ_EDC_INFO__SOURCE_MASK 0x000001C0L 314 + #define SQ_EDC_INFO__VM_ID_MASK 0x00001E00L 315 + //SQ_EDC_CNT 316 + #define SQ_EDC_CNT__LDS_D_SEC_COUNT__SHIFT 0x0 317 + #define SQ_EDC_CNT__LDS_D_DED_COUNT__SHIFT 0x2 318 + #define SQ_EDC_CNT__LDS_I_SEC_COUNT__SHIFT 0x4 319 + #define SQ_EDC_CNT__LDS_I_DED_COUNT__SHIFT 0x6 320 + #define SQ_EDC_CNT__SGPR_SEC_COUNT__SHIFT 0x8 321 + #define SQ_EDC_CNT__SGPR_DED_COUNT__SHIFT 0xa 322 + #define SQ_EDC_CNT__VGPR0_SEC_COUNT__SHIFT 0xc 323 + #define SQ_EDC_CNT__VGPR0_DED_COUNT__SHIFT 0xe 324 + #define SQ_EDC_CNT__VGPR1_SEC_COUNT__SHIFT 0x10 325 + #define SQ_EDC_CNT__VGPR1_DED_COUNT__SHIFT 0x12 326 + #define SQ_EDC_CNT__VGPR2_SEC_COUNT__SHIFT 0x14 327 + #define SQ_EDC_CNT__VGPR2_DED_COUNT__SHIFT 0x16 328 + #define SQ_EDC_CNT__VGPR3_SEC_COUNT__SHIFT 0x18 329 + #define SQ_EDC_CNT__VGPR3_DED_COUNT__SHIFT 0x1a 330 + #define SQ_EDC_CNT__LDS_D_SEC_COUNT_MASK 0x00000003L 331 + #define SQ_EDC_CNT__LDS_D_DED_COUNT_MASK 0x0000000CL 332 + #define SQ_EDC_CNT__LDS_I_SEC_COUNT_MASK 0x00000030L 333 + #define SQ_EDC_CNT__LDS_I_DED_COUNT_MASK 0x000000C0L 334 + #define SQ_EDC_CNT__SGPR_SEC_COUNT_MASK 0x00000300L 335 + #define SQ_EDC_CNT__SGPR_DED_COUNT_MASK 0x00000C00L 336 + #define SQ_EDC_CNT__VGPR0_SEC_COUNT_MASK 0x00003000L 337 + #define SQ_EDC_CNT__VGPR0_DED_COUNT_MASK 0x0000C000L 338 + #define SQ_EDC_CNT__VGPR1_SEC_COUNT_MASK 0x00030000L 339 + #define SQ_EDC_CNT__VGPR1_DED_COUNT_MASK 0x000C0000L 340 + #define SQ_EDC_CNT__VGPR2_SEC_COUNT_MASK 0x00300000L 341 + #define SQ_EDC_CNT__VGPR2_DED_COUNT_MASK 0x00C00000L 342 + #define SQ_EDC_CNT__VGPR3_SEC_COUNT_MASK 0x03000000L 343 + #define SQ_EDC_CNT__VGPR3_DED_COUNT_MASK 0x0C000000L 344 + 345 + // addressBlock: gc_tpdec 346 + //TA_EDC_CNT 347 + #define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT__SHIFT 0x0 348 + #define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT__SHIFT 0x2 349 + #define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT__SHIFT 0x4 350 + #define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT__SHIFT 0x6 351 + #define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT__SHIFT 0x8 352 + #define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT__SHIFT 0xa 353 + #define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT__SHIFT 0xc 354 + #define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT__SHIFT 0xe 355 + #define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT__SHIFT 0x10 356 + #define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT__SHIFT 0x12 357 + #define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT_MASK 0x00000003L 358 + #define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT_MASK 0x0000000CL 359 + #define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT_MASK 0x00000030L 360 + #define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT_MASK 0x000000C0L 361 + #define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT_MASK 0x00000300L 362 + #define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT_MASK 0x00000C00L 363 + #define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT_MASK 0x00003000L 364 + #define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT_MASK 0x0000C000L 365 + #define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT_MASK 0x00030000L 366 + #define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT_MASK 0x000C0000L 367 + 368 + // addressBlock: gc_tcdec 369 + //TCP_EDC_CNT 370 + #define TCP_EDC_CNT__SEC_COUNT__SHIFT 0x0 371 + #define TCP_EDC_CNT__LFIFO_SED_COUNT__SHIFT 0x8 372 + #define TCP_EDC_CNT__DED_COUNT__SHIFT 0x10 373 + #define TCP_EDC_CNT__SEC_COUNT_MASK 0x000000FFL 374 + #define TCP_EDC_CNT__LFIFO_SED_COUNT_MASK 0x0000FF00L 375 + #define TCP_EDC_CNT__DED_COUNT_MASK 0x00FF0000L 376 + //TCP_EDC_CNT_NEW 377 + #define TCP_EDC_CNT_NEW__CACHE_RAM_SEC_COUNT__SHIFT 0x0 378 + #define TCP_EDC_CNT_NEW__CACHE_RAM_DED_COUNT__SHIFT 0x2 379 + #define TCP_EDC_CNT_NEW__LFIFO_RAM_SEC_COUNT__SHIFT 0x4 380 + #define TCP_EDC_CNT_NEW__LFIFO_RAM_DED_COUNT__SHIFT 0x6 381 + #define TCP_EDC_CNT_NEW__CMD_FIFO_SEC_COUNT__SHIFT 0x8 382 + #define TCP_EDC_CNT_NEW__CMD_FIFO_DED_COUNT__SHIFT 0xa 383 + #define TCP_EDC_CNT_NEW__VM_FIFO_SEC_COUNT__SHIFT 0xc 384 + #define TCP_EDC_CNT_NEW__VM_FIFO_DED_COUNT__SHIFT 0xe 385 + #define TCP_EDC_CNT_NEW__DB_RAM_SED_COUNT__SHIFT 0x10 386 + #define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_SEC_COUNT__SHIFT 0x12 387 + #define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_DED_COUNT__SHIFT 0x14 388 + #define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_SEC_COUNT__SHIFT 0x16 389 + #define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_DED_COUNT__SHIFT 0x18 390 + #define TCP_EDC_CNT_NEW__CACHE_RAM_SEC_COUNT_MASK 0x00000003L 391 + #define TCP_EDC_CNT_NEW__CACHE_RAM_DED_COUNT_MASK 0x0000000CL 392 + #define TCP_EDC_CNT_NEW__LFIFO_RAM_SEC_COUNT_MASK 0x00000030L 393 + #define TCP_EDC_CNT_NEW__LFIFO_RAM_DED_COUNT_MASK 0x000000C0L 394 + #define TCP_EDC_CNT_NEW__CMD_FIFO_SEC_COUNT_MASK 0x00000300L 395 + #define TCP_EDC_CNT_NEW__CMD_FIFO_DED_COUNT_MASK 0x00000C00L 396 + #define TCP_EDC_CNT_NEW__VM_FIFO_SEC_COUNT_MASK 0x00003000L 397 + #define TCP_EDC_CNT_NEW__VM_FIFO_DED_COUNT_MASK 0x0000C000L 398 + #define TCP_EDC_CNT_NEW__DB_RAM_SED_COUNT_MASK 0x00030000L 399 + #define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_SEC_COUNT_MASK 0x000C0000L 400 + #define TCP_EDC_CNT_NEW__UTCL1_LFIFO0_DED_COUNT_MASK 0x00300000L 401 + #define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_SEC_COUNT_MASK 0x00C00000L 402 + #define TCP_EDC_CNT_NEW__UTCL1_LFIFO1_DED_COUNT_MASK 0x03000000L 403 + //TCP_ATC_EDC_GATCL1_CNT 404 + #define TCP_ATC_EDC_GATCL1_CNT__DATA_SEC__SHIFT 0x0 405 + #define TCP_ATC_EDC_GATCL1_CNT__DATA_SEC_MASK 0x000000FFL 406 + //TCI_EDC_CNT 407 + #define TCI_EDC_CNT__WRITE_RAM_SEC_COUNT__SHIFT 0x0 408 + #define TCI_EDC_CNT__WRITE_RAM_DED_COUNT__SHIFT 0x2 409 + #define TCI_EDC_CNT__WRITE_RAM_SEC_COUNT_MASK 0x00000003L 410 + #define TCI_EDC_CNT__WRITE_RAM_DED_COUNT_MASK 0x0000000CL 411 + //TCA_EDC_CNT 412 + #define TCA_EDC_CNT__HOLE_FIFO_SEC_COUNT__SHIFT 0x0 413 + #define TCA_EDC_CNT__HOLE_FIFO_DED_COUNT__SHIFT 0x2 414 + #define TCA_EDC_CNT__REQ_FIFO_SEC_COUNT__SHIFT 0x4 415 + #define TCA_EDC_CNT__REQ_FIFO_DED_COUNT__SHIFT 0x6 416 + #define TCA_EDC_CNT__HOLE_FIFO_SEC_COUNT_MASK 0x00000003L 417 + #define TCA_EDC_CNT__HOLE_FIFO_DED_COUNT_MASK 0x0000000CL 418 + #define TCA_EDC_CNT__REQ_FIFO_SEC_COUNT_MASK 0x00000030L 419 + #define TCA_EDC_CNT__REQ_FIFO_DED_COUNT_MASK 0x000000C0L 420 + //TCC_EDC_CNT 421 + #define TCC_EDC_CNT__CACHE_DATA_SEC_COUNT__SHIFT 0x0 422 + #define TCC_EDC_CNT__CACHE_DATA_DED_COUNT__SHIFT 0x2 423 + #define TCC_EDC_CNT__CACHE_DIRTY_SEC_COUNT__SHIFT 0x4 424 + #define TCC_EDC_CNT__CACHE_DIRTY_DED_COUNT__SHIFT 0x6 425 + #define TCC_EDC_CNT__HIGH_RATE_TAG_SEC_COUNT__SHIFT 0x8 426 + #define TCC_EDC_CNT__HIGH_RATE_TAG_DED_COUNT__SHIFT 0xa 427 + #define TCC_EDC_CNT__LOW_RATE_TAG_SEC_COUNT__SHIFT 0xc 428 + #define TCC_EDC_CNT__LOW_RATE_TAG_DED_COUNT__SHIFT 0xe 429 + #define TCC_EDC_CNT__SRC_FIFO_SEC_COUNT__SHIFT 0x10 430 + #define TCC_EDC_CNT__SRC_FIFO_DED_COUNT__SHIFT 0x12 431 + #define TCC_EDC_CNT__LATENCY_FIFO_SEC_COUNT__SHIFT 0x14 432 + #define TCC_EDC_CNT__LATENCY_FIFO_DED_COUNT__SHIFT 0x16 433 + #define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_SEC_COUNT__SHIFT 0x18 434 + #define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_DED_COUNT__SHIFT 0x1a 435 + #define TCC_EDC_CNT__CACHE_DATA_SEC_COUNT_MASK 0x00000003L 436 + #define TCC_EDC_CNT__CACHE_DATA_DED_COUNT_MASK 0x0000000CL 437 + #define TCC_EDC_CNT__CACHE_DIRTY_SEC_COUNT_MASK 0x00000030L 438 + #define TCC_EDC_CNT__CACHE_DIRTY_DED_COUNT_MASK 0x000000C0L 439 + #define TCC_EDC_CNT__HIGH_RATE_TAG_SEC_COUNT_MASK 0x00000300L 440 + #define TCC_EDC_CNT__HIGH_RATE_TAG_DED_COUNT_MASK 0x00000C00L 441 + #define TCC_EDC_CNT__LOW_RATE_TAG_SEC_COUNT_MASK 0x00003000L 442 + #define TCC_EDC_CNT__LOW_RATE_TAG_DED_COUNT_MASK 0x0000C000L 443 + #define TCC_EDC_CNT__SRC_FIFO_SEC_COUNT_MASK 0x00030000L 444 + #define TCC_EDC_CNT__SRC_FIFO_DED_COUNT_MASK 0x000C0000L 445 + #define TCC_EDC_CNT__LATENCY_FIFO_SEC_COUNT_MASK 0x00300000L 446 + #define TCC_EDC_CNT__LATENCY_FIFO_DED_COUNT_MASK 0x00C00000L 447 + #define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_SEC_COUNT_MASK 0x03000000L 448 + #define TCC_EDC_CNT__LATENCY_FIFO_NEXT_RAM_DED_COUNT_MASK 0x0C000000L 449 + //TCC_EDC_CNT2 450 + #define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_SEC_COUNT__SHIFT 0x0 451 + #define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_DED_COUNT__SHIFT 0x2 452 + #define TCC_EDC_CNT2__UC_ATOMIC_FIFO_SEC_COUNT__SHIFT 0x4 453 + #define TCC_EDC_CNT2__UC_ATOMIC_FIFO_DED_COUNT__SHIFT 0x6 454 + #define TCC_EDC_CNT2__WRITE_CACHE_READ_SEC_COUNT__SHIFT 0x8 455 + #define TCC_EDC_CNT2__WRITE_CACHE_READ_DED_COUNT__SHIFT 0xa 456 + #define TCC_EDC_CNT2__RETURN_CONTROL_SEC_COUNT__SHIFT 0xc 457 + #define TCC_EDC_CNT2__RETURN_CONTROL_DED_COUNT__SHIFT 0xe 458 + #define TCC_EDC_CNT2__IN_USE_TRANSFER_SEC_COUNT__SHIFT 0x10 459 + #define TCC_EDC_CNT2__IN_USE_TRANSFER_DED_COUNT__SHIFT 0x12 460 + #define TCC_EDC_CNT2__IN_USE_DEC_SEC_COUNT__SHIFT 0x14 461 + #define TCC_EDC_CNT2__IN_USE_DEC_DED_COUNT__SHIFT 0x16 462 + #define TCC_EDC_CNT2__WRITE_RETURN_SEC_COUNT__SHIFT 0x18 463 + #define TCC_EDC_CNT2__WRITE_RETURN_DED_COUNT__SHIFT 0x1a 464 + #define TCC_EDC_CNT2__RETURN_DATA_SEC_COUNT__SHIFT 0x1c 465 + #define TCC_EDC_CNT2__RETURN_DATA_DED_COUNT__SHIFT 0x1e 466 + #define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_SEC_COUNT_MASK 0x00000003L 467 + #define TCC_EDC_CNT2__CACHE_TAG_PROBE_FIFO_DED_COUNT_MASK 0x0000000CL 468 + #define TCC_EDC_CNT2__UC_ATOMIC_FIFO_SEC_COUNT_MASK 0x00000030L 469 + #define TCC_EDC_CNT2__UC_ATOMIC_FIFO_DED_COUNT_MASK 0x000000C0L 470 + #define TCC_EDC_CNT2__WRITE_CACHE_READ_SEC_COUNT_MASK 0x00000300L 471 + #define TCC_EDC_CNT2__WRITE_CACHE_READ_DED_COUNT_MASK 0x00000C00L 472 + #define TCC_EDC_CNT2__RETURN_CONTROL_SEC_COUNT_MASK 0x00003000L 473 + #define TCC_EDC_CNT2__RETURN_CONTROL_DED_COUNT_MASK 0x0000C000L 474 + #define TCC_EDC_CNT2__IN_USE_TRANSFER_SEC_COUNT_MASK 0x00030000L 475 + #define TCC_EDC_CNT2__IN_USE_TRANSFER_DED_COUNT_MASK 0x000C0000L 476 + #define TCC_EDC_CNT2__IN_USE_DEC_SEC_COUNT_MASK 0x00300000L 477 + #define TCC_EDC_CNT2__IN_USE_DEC_DED_COUNT_MASK 0x00C00000L 478 + #define TCC_EDC_CNT2__WRITE_RETURN_SEC_COUNT_MASK 0x03000000L 479 + #define TCC_EDC_CNT2__WRITE_RETURN_DED_COUNT_MASK 0x0C000000L 480 + #define TCC_EDC_CNT2__RETURN_DATA_SEC_COUNT_MASK 0x30000000L 481 + #define TCC_EDC_CNT2__RETURN_DATA_DED_COUNT_MASK 0xC0000000L 482 + 483 + // addressBlock: gc_tpdec 484 + //TD_EDC_CNT 485 + #define TD_EDC_CNT__SS_FIFO_LO_SEC_COUNT__SHIFT 0x0 486 + #define TD_EDC_CNT__SS_FIFO_LO_DED_COUNT__SHIFT 0x2 487 + #define TD_EDC_CNT__SS_FIFO_HI_SEC_COUNT__SHIFT 0x4 488 + #define TD_EDC_CNT__SS_FIFO_HI_DED_COUNT__SHIFT 0x6 489 + #define TD_EDC_CNT__CS_FIFO_SEC_COUNT__SHIFT 0x8 490 + #define TD_EDC_CNT__CS_FIFO_DED_COUNT__SHIFT 0xa 491 + #define TD_EDC_CNT__SS_FIFO_LO_SEC_COUNT_MASK 0x00000003L 492 + #define TD_EDC_CNT__SS_FIFO_LO_DED_COUNT_MASK 0x0000000CL 493 + #define TD_EDC_CNT__SS_FIFO_HI_SEC_COUNT_MASK 0x00000030L 494 + #define TD_EDC_CNT__SS_FIFO_HI_DED_COUNT_MASK 0x000000C0L 495 + #define TD_EDC_CNT__CS_FIFO_SEC_COUNT_MASK 0x00000300L 496 + #define TD_EDC_CNT__CS_FIFO_DED_COUNT_MASK 0x00000C00L 497 + //TA_EDC_CNT 498 + #define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT__SHIFT 0x0 499 + #define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT__SHIFT 0x2 500 + #define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT__SHIFT 0x4 501 + #define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT__SHIFT 0x6 502 + #define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT__SHIFT 0x8 503 + #define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT__SHIFT 0xa 504 + #define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT__SHIFT 0xc 505 + #define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT__SHIFT 0xe 506 + #define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT__SHIFT 0x10 507 + #define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT__SHIFT 0x12 508 + #define TA_EDC_CNT__TA_FS_DFIFO_SEC_COUNT_MASK 0x00000003L 509 + #define TA_EDC_CNT__TA_FS_DFIFO_DED_COUNT_MASK 0x0000000CL 510 + #define TA_EDC_CNT__TA_FS_AFIFO_SEC_COUNT_MASK 0x00000030L 511 + #define TA_EDC_CNT__TA_FS_AFIFO_DED_COUNT_MASK 0x000000C0L 512 + #define TA_EDC_CNT__TA_FL_LFIFO_SEC_COUNT_MASK 0x00000300L 513 + #define TA_EDC_CNT__TA_FL_LFIFO_DED_COUNT_MASK 0x00000C00L 514 + #define TA_EDC_CNT__TA_FX_LFIFO_SEC_COUNT_MASK 0x00003000L 515 + #define TA_EDC_CNT__TA_FX_LFIFO_DED_COUNT_MASK 0x0000C000L 516 + #define TA_EDC_CNT__TA_FS_CFIFO_SEC_COUNT_MASK 0x00030000L 517 + #define TA_EDC_CNT__TA_FS_CFIFO_DED_COUNT_MASK 0x000C0000L 518 + 519 + // addressBlock: gc_ea_gceadec2 520 + //GCEA_EDC_CNT 521 + #define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT__SHIFT 0x0 522 + #define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT__SHIFT 0x2 523 + #define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT__SHIFT 0x4 524 + #define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT__SHIFT 0x6 525 + #define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT__SHIFT 0x8 526 + #define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT__SHIFT 0xa 527 + #define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT__SHIFT 0xc 528 + #define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT__SHIFT 0xe 529 + #define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT__SHIFT 0x10 530 + #define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT__SHIFT 0x12 531 + #define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT__SHIFT 0x14 532 + #define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT__SHIFT 0x16 533 + #define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT__SHIFT 0x18 534 + #define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT__SHIFT 0x1a 535 + #define GCEA_EDC_CNT__IOWR_DATAMEM_SED_COUNT__SHIFT 0x1c 536 + #define GCEA_EDC_CNT__MAM_AFMEM_SEC_COUNT__SHIFT 0x1e 537 + #define GCEA_EDC_CNT__DRAMRD_CMDMEM_SEC_COUNT_MASK 0x00000003L 538 + #define GCEA_EDC_CNT__DRAMRD_CMDMEM_DED_COUNT_MASK 0x0000000CL 539 + #define GCEA_EDC_CNT__DRAMWR_CMDMEM_SEC_COUNT_MASK 0x00000030L 540 + #define GCEA_EDC_CNT__DRAMWR_CMDMEM_DED_COUNT_MASK 0x000000C0L 541 + #define GCEA_EDC_CNT__DRAMWR_DATAMEM_SEC_COUNT_MASK 0x00000300L 542 + #define GCEA_EDC_CNT__DRAMWR_DATAMEM_DED_COUNT_MASK 0x00000C00L 543 + #define GCEA_EDC_CNT__RRET_TAGMEM_SEC_COUNT_MASK 0x00003000L 544 + #define GCEA_EDC_CNT__RRET_TAGMEM_DED_COUNT_MASK 0x0000C000L 545 + #define GCEA_EDC_CNT__WRET_TAGMEM_SEC_COUNT_MASK 0x00030000L 546 + #define GCEA_EDC_CNT__WRET_TAGMEM_DED_COUNT_MASK 0x000C0000L 547 + #define GCEA_EDC_CNT__DRAMRD_PAGEMEM_SED_COUNT_MASK 0x00300000L 548 + #define GCEA_EDC_CNT__DRAMWR_PAGEMEM_SED_COUNT_MASK 0x00C00000L 549 + #define GCEA_EDC_CNT__IORD_CMDMEM_SED_COUNT_MASK 0x03000000L 550 + #define GCEA_EDC_CNT__IOWR_CMDMEM_SED_COUNT_MASK 0x0C000000L 551 + #define GCEA_EDC_CNT__IOWR_DATAMEM_SED_COUNT_MASK 0x30000000L 552 + #define GCEA_EDC_CNT__MAM_AFMEM_SEC_COUNT_MASK 0xC0000000L 553 + //GCEA_EDC_CNT2 554 + #define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT__SHIFT 0x0 555 + #define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT__SHIFT 0x2 556 + #define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT__SHIFT 0x4 557 + #define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT__SHIFT 0x6 558 + #define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT__SHIFT 0x8 559 + #define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa 560 + #define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc 561 + #define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe 562 + #define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 563 + #define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 564 + #define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 565 + #define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 566 + #define GCEA_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 567 + #define GCEA_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a 568 + #define GCEA_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c 569 + #define GCEA_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e 570 + #define GCEA_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L 571 + #define GCEA_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL 572 + #define GCEA_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L 573 + #define GCEA_EDC_CNT2__GMIWR_CMDMEM_DED_COUNT_MASK 0x000000C0L 574 + #define GCEA_EDC_CNT2__GMIWR_DATAMEM_SEC_COUNT_MASK 0x00000300L 575 + #define GCEA_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L 576 + #define GCEA_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L 577 + #define GCEA_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L 578 + #define GCEA_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L 579 + #define GCEA_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L 580 + #define GCEA_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L 581 + #define GCEA_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L 582 + #define GCEA_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L 583 + #define GCEA_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L 584 + #define GCEA_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L 585 + #define GCEA_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L 586 + //GCEA_EDC_CNT3 587 + #define GCEA_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT__SHIFT 0x0 588 + #define GCEA_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT__SHIFT 0x2 589 + #define GCEA_EDC_CNT3__IORD_CMDMEM_DED_COUNT__SHIFT 0x4 590 + #define GCEA_EDC_CNT3__IOWR_CMDMEM_DED_COUNT__SHIFT 0x6 591 + #define GCEA_EDC_CNT3__IOWR_DATAMEM_DED_COUNT__SHIFT 0x8 592 + #define GCEA_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT__SHIFT 0xa 593 + #define GCEA_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT__SHIFT 0xc 594 + #define GCEA_EDC_CNT3__MAM_AFMEM_DED_COUNT__SHIFT 0xe 595 + #define GCEA_EDC_CNT3__MAM_A0MEM_SEC_COUNT__SHIFT 0x10 596 + #define GCEA_EDC_CNT3__MAM_A0MEM_DED_COUNT__SHIFT 0x12 597 + #define GCEA_EDC_CNT3__MAM_A1MEM_SEC_COUNT__SHIFT 0x14 598 + #define GCEA_EDC_CNT3__MAM_A1MEM_DED_COUNT__SHIFT 0x16 599 + #define GCEA_EDC_CNT3__MAM_A2MEM_SEC_COUNT__SHIFT 0x18 600 + #define GCEA_EDC_CNT3__MAM_A2MEM_DED_COUNT__SHIFT 0x1a 601 + #define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT__SHIFT 0x1c 602 + #define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT__SHIFT 0x1e 603 + #define GCEA_EDC_CNT3__DRAMRD_PAGEMEM_DED_COUNT_MASK 0x00000003L 604 + #define GCEA_EDC_CNT3__DRAMWR_PAGEMEM_DED_COUNT_MASK 0x0000000CL 605 + #define GCEA_EDC_CNT3__IORD_CMDMEM_DED_COUNT_MASK 0x00000030L 606 + #define GCEA_EDC_CNT3__IOWR_CMDMEM_DED_COUNT_MASK 0x000000C0L 607 + #define GCEA_EDC_CNT3__IOWR_DATAMEM_DED_COUNT_MASK 0x00000300L 608 + #define GCEA_EDC_CNT3__GMIRD_PAGEMEM_DED_COUNT_MASK 0x00000C00L 609 + #define GCEA_EDC_CNT3__GMIWR_PAGEMEM_DED_COUNT_MASK 0x00003000L 610 + #define GCEA_EDC_CNT3__MAM_AFMEM_DED_COUNT_MASK 0x0000C000L 611 + #define GCEA_EDC_CNT3__MAM_A0MEM_SEC_COUNT_MASK 0x00030000L 612 + #define GCEA_EDC_CNT3__MAM_A0MEM_DED_COUNT_MASK 0x000C0000L 613 + #define GCEA_EDC_CNT3__MAM_A1MEM_SEC_COUNT_MASK 0x00300000L 614 + #define GCEA_EDC_CNT3__MAM_A1MEM_DED_COUNT_MASK 0x00C00000L 615 + #define GCEA_EDC_CNT3__MAM_A2MEM_SEC_COUNT_MASK 0x03000000L 616 + #define GCEA_EDC_CNT3__MAM_A2MEM_DED_COUNT_MASK 0x0C000000L 617 + #define GCEA_EDC_CNT3__MAM_A3MEM_SEC_COUNT_MASK 0x30000000L 618 + #define GCEA_EDC_CNT3__MAM_A3MEM_DED_COUNT_MASK 0xC0000000L 619 + 620 + // addressBlock: gc_gfxudec 621 + //GRBM_GFX_INDEX 622 + #define GRBM_GFX_INDEX__INSTANCE_INDEX__SHIFT 0x0 623 + #define GRBM_GFX_INDEX__SH_INDEX__SHIFT 0x8 624 + #define GRBM_GFX_INDEX__SE_INDEX__SHIFT 0x10 625 + #define GRBM_GFX_INDEX__SH_BROADCAST_WRITES__SHIFT 0x1d 626 + #define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT 0x1e 627 + #define GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT 0x1f 628 + #define GRBM_GFX_INDEX__INSTANCE_INDEX_MASK 0x000000FFL 629 + #define GRBM_GFX_INDEX__SH_INDEX_MASK 0x0000FF00L 630 + #define GRBM_GFX_INDEX__SE_INDEX_MASK 0x00FF0000L 631 + #define GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK 0x20000000L 632 + #define GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK 0x40000000L 633 + #define GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK 0x80000000L 634 + 635 + // addressBlock: gc_utcl2_atcl2dec 636 + //ATC_L2_CNTL 637 + //ATC_L2_CACHE_4K_DSM_INDEX 638 + #define ATC_L2_CACHE_4K_DSM_INDEX__INDEX__SHIFT 0x0 639 + #define ATC_L2_CACHE_4K_DSM_INDEX__INDEX_MASK 0x000000FFL 640 + //ATC_L2_CACHE_2M_DSM_INDEX 641 + #define ATC_L2_CACHE_2M_DSM_INDEX__INDEX__SHIFT 0x0 642 + #define ATC_L2_CACHE_2M_DSM_INDEX__INDEX_MASK 0x000000FFL 643 + //ATC_L2_CACHE_4K_DSM_CNTL 644 + #define ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT__SHIFT 0xd 645 + #define ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT__SHIFT 0xf 646 + #define ATC_L2_CACHE_4K_DSM_CNTL__SEC_COUNT_MASK 0x00006000L 647 + #define ATC_L2_CACHE_4K_DSM_CNTL__DED_COUNT_MASK 0x00018000L 648 + //ATC_L2_CACHE_2M_DSM_CNTL 649 + #define ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT__SHIFT 0xd 650 + #define ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT__SHIFT 0xf 651 + #define ATC_L2_CACHE_2M_DSM_CNTL__SEC_COUNT_MASK 0x00006000L 652 + #define ATC_L2_CACHE_2M_DSM_CNTL__DED_COUNT_MASK 0x00018000L 653 + 654 + // addressBlock: gc_utcl2_vml2pfdec 655 + //VML2_MEM_ECC_INDEX 656 + #define VML2_MEM_ECC_INDEX__INDEX__SHIFT 0x0 657 + #define VML2_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL 658 + //VML2_WALKER_MEM_ECC_INDEX 659 + #define VML2_WALKER_MEM_ECC_INDEX__INDEX__SHIFT 0x0 660 + #define VML2_WALKER_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL 661 + //UTCL2_MEM_ECC_INDEX 662 + #define UTCL2_MEM_ECC_INDEX__INDEX__SHIFT 0x0 663 + #define UTCL2_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL 664 + //VML2_MEM_ECC_CNTL 665 + #define VML2_MEM_ECC_CNTL__SEC_COUNT__SHIFT 0xc 666 + #define VML2_MEM_ECC_CNTL__DED_COUNT__SHIFT 0xe 667 + #define VML2_MEM_ECC_CNTL__SEC_COUNT_MASK 0x00003000L 668 + #define VML2_MEM_ECC_CNTL__DED_COUNT_MASK 0x0000C000L 669 + //VML2_WALKER_MEM_ECC_CNTL 670 + #define VML2_WALKER_MEM_ECC_CNTL__SEC_COUNT__SHIFT 0xc 671 + #define VML2_WALKER_MEM_ECC_CNTL__DED_COUNT__SHIFT 0xe 672 + #define VML2_WALKER_MEM_ECC_CNTL__SEC_COUNT_MASK 0x00003000L 673 + #define VML2_WALKER_MEM_ECC_CNTL__DED_COUNT_MASK 0x0000C000L 674 + //UTCL2_MEM_ECC_CNTL 675 + #define UTCL2_MEM_ECC_CNTL__SEC_COUNT__SHIFT 0xc 676 + #define UTCL2_MEM_ECC_CNTL__DED_COUNT__SHIFT 0xe 677 + #define UTCL2_MEM_ECC_CNTL__SEC_COUNT_MASK 0x00003000L 678 + #define UTCL2_MEM_ECC_CNTL__DED_COUNT_MASK 0x0000C000L 679 + 680 + // addressBlock: gc_rlcpdec 681 + //RLC_EDC_CNT 682 + #define RLC_EDC_CNT__RLCG_INSTR_RAM_SEC_COUNT__SHIFT 0x0 683 + #define RLC_EDC_CNT__RLCG_INSTR_RAM_DED_COUNT__SHIFT 0x2 684 + #define RLC_EDC_CNT__RLCG_SCRATCH_RAM_SEC_COUNT__SHIFT 0x4 685 + #define RLC_EDC_CNT__RLCG_SCRATCH_RAM_DED_COUNT__SHIFT 0x6 686 + #define RLC_EDC_CNT__RLCV_INSTR_RAM_SEC_COUNT__SHIFT 0x8 687 + #define RLC_EDC_CNT__RLCV_INSTR_RAM_DED_COUNT__SHIFT 0xa 688 + #define RLC_EDC_CNT__RLCV_SCRATCH_RAM_SEC_COUNT__SHIFT 0xc 689 + #define RLC_EDC_CNT__RLCV_SCRATCH_RAM_DED_COUNT__SHIFT 0xe 690 + #define RLC_EDC_CNT__RLC_TCTAG_RAM_SEC_COUNT__SHIFT 0x10 691 + #define RLC_EDC_CNT__RLC_TCTAG_RAM_DED_COUNT__SHIFT 0x12 692 + #define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_SEC_COUNT__SHIFT 0x14 693 + #define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_DED_COUNT__SHIFT 0x16 694 + #define RLC_EDC_CNT__RLC_SRM_DATA_RAM_SEC_COUNT__SHIFT 0x18 695 + #define RLC_EDC_CNT__RLC_SRM_DATA_RAM_DED_COUNT__SHIFT 0x1a 696 + #define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_SEC_COUNT__SHIFT 0x1c 697 + #define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_DED_COUNT__SHIFT 0x1e 698 + #define RLC_EDC_CNT__RLCG_INSTR_RAM_SEC_COUNT_MASK 0x00000003L 699 + #define RLC_EDC_CNT__RLCG_INSTR_RAM_DED_COUNT_MASK 0x0000000CL 700 + #define RLC_EDC_CNT__RLCG_SCRATCH_RAM_SEC_COUNT_MASK 0x00000030L 701 + #define RLC_EDC_CNT__RLCG_SCRATCH_RAM_DED_COUNT_MASK 0x000000C0L 702 + #define RLC_EDC_CNT__RLCV_INSTR_RAM_SEC_COUNT_MASK 0x00000300L 703 + #define RLC_EDC_CNT__RLCV_INSTR_RAM_DED_COUNT_MASK 0x00000C00L 704 + #define RLC_EDC_CNT__RLCV_SCRATCH_RAM_SEC_COUNT_MASK 0x00003000L 705 + #define RLC_EDC_CNT__RLCV_SCRATCH_RAM_DED_COUNT_MASK 0x0000C000L 706 + #define RLC_EDC_CNT__RLC_TCTAG_RAM_SEC_COUNT_MASK 0x00030000L 707 + #define RLC_EDC_CNT__RLC_TCTAG_RAM_DED_COUNT_MASK 0x000C0000L 708 + #define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_SEC_COUNT_MASK 0x00300000L 709 + #define RLC_EDC_CNT__RLC_SPM_SCRATCH_RAM_DED_COUNT_MASK 0x00C00000L 710 + #define RLC_EDC_CNT__RLC_SRM_DATA_RAM_SEC_COUNT_MASK 0x03000000L 711 + #define RLC_EDC_CNT__RLC_SRM_DATA_RAM_DED_COUNT_MASK 0x0C000000L 712 + #define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_SEC_COUNT_MASK 0x30000000L 713 + #define RLC_EDC_CNT__RLC_SRM_ADDR_RAM_DED_COUNT_MASK 0xC0000000L 714 + //RLC_EDC_CNT2 715 + #define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT__SHIFT 0x0 716 + #define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT__SHIFT 0x2 717 + #define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT__SHIFT 0x4 718 + #define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT__SHIFT 0x6 719 + #define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT__SHIFT 0x8 720 + #define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT__SHIFT 0xa 721 + #define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT__SHIFT 0xc 722 + #define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT__SHIFT 0xe 723 + #define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT__SHIFT 0x10 724 + #define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT__SHIFT 0x12 725 + #define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT__SHIFT 0x14 726 + #define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT__SHIFT 0x16 727 + #define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT__SHIFT 0x18 728 + #define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT__SHIFT 0x1a 729 + #define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT__SHIFT 0x1c 730 + #define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT__SHIFT 0x1e 731 + #define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_SEC_COUNT_MASK 0x00000003L 732 + #define RLC_EDC_CNT2__RLC_SPM_SE0_SCRATCH_RAM_DED_COUNT_MASK 0x0000000CL 733 + #define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_SEC_COUNT_MASK 0x00000030L 734 + #define RLC_EDC_CNT2__RLC_SPM_SE1_SCRATCH_RAM_DED_COUNT_MASK 0x000000C0L 735 + #define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_SEC_COUNT_MASK 0x00000300L 736 + #define RLC_EDC_CNT2__RLC_SPM_SE2_SCRATCH_RAM_DED_COUNT_MASK 0x00000C00L 737 + #define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_SEC_COUNT_MASK 0x00003000L 738 + #define RLC_EDC_CNT2__RLC_SPM_SE3_SCRATCH_RAM_DED_COUNT_MASK 0x0000C000L 739 + #define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_SEC_COUNT_MASK 0x00030000L 740 + #define RLC_EDC_CNT2__RLC_SPM_SE4_SCRATCH_RAM_DED_COUNT_MASK 0x000C0000L 741 + #define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_SEC_COUNT_MASK 0x00300000L 742 + #define RLC_EDC_CNT2__RLC_SPM_SE5_SCRATCH_RAM_DED_COUNT_MASK 0x00C00000L 743 + #define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_SEC_COUNT_MASK 0x03000000L 744 + #define RLC_EDC_CNT2__RLC_SPM_SE6_SCRATCH_RAM_DED_COUNT_MASK 0x0C000000L 745 + #define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_SEC_COUNT_MASK 0x30000000L 746 + #define RLC_EDC_CNT2__RLC_SPM_SE7_SCRATCH_RAM_DED_COUNT_MASK 0xC0000000L 747 + 748 + #endif
+128
drivers/gpu/drm/amd/include/asic_reg/mmhub/mmhub_9_4_1_sh_mask.h
··· 11185 11185 #define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa 11186 11186 #define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc 11187 11187 #define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe 11188 + #define MMEA0_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 11189 + #define MMEA0_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 11190 + #define MMEA0_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 11191 + #define MMEA0_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 11192 + #define MMEA0_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 11193 + #define MMEA0_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a 11194 + #define MMEA0_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c 11195 + #define MMEA0_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e 11188 11196 #define MMEA0_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L 11189 11197 #define MMEA0_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL 11190 11198 #define MMEA0_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L ··· 11201 11193 #define MMEA0_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L 11202 11194 #define MMEA0_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L 11203 11195 #define MMEA0_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L 11196 + #define MMEA0_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L 11197 + #define MMEA0_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L 11198 + #define MMEA0_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L 11199 + #define MMEA0_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L 11200 + #define MMEA0_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L 11201 + #define MMEA0_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L 11202 + #define MMEA0_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L 11203 + #define MMEA0_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L 11204 11204 //MMEA0_DSM_CNTL 11205 11205 #define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 11206 11206 #define MMEA0_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 ··· 14213 14197 #define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa 14214 14198 #define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc 14215 14199 #define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe 14200 + #define MMEA1_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 14201 + #define MMEA1_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 14202 + #define MMEA1_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 14203 + #define MMEA1_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 14204 + #define MMEA1_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 14205 + #define MMEA1_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a 14206 + #define MMEA1_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c 14207 + #define MMEA1_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e 14216 14208 #define MMEA1_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L 14217 14209 #define MMEA1_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL 14218 14210 #define MMEA1_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L ··· 14229 14205 #define MMEA1_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L 14230 14206 #define MMEA1_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L 14231 14207 #define MMEA1_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L 14208 + #define MMEA1_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L 14209 + #define MMEA1_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L 14210 + #define MMEA1_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L 14211 + #define MMEA1_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L 14212 + #define MMEA1_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L 14213 + #define MMEA1_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L 14214 + #define MMEA1_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L 14215 + #define MMEA1_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L 14232 14216 //MMEA1_DSM_CNTL 14233 14217 #define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 14234 14218 #define MMEA1_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 ··· 17241 17209 #define MMEA2_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa 17242 17210 #define MMEA2_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc 17243 17211 #define MMEA2_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe 17212 + #define MMEA2_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 17213 + #define MMEA2_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 17214 + #define MMEA2_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 17215 + #define MMEA2_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 17216 + #define MMEA2_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 17217 + #define MMEA2_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a 17218 + #define MMEA2_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c 17219 + #define MMEA2_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e 17244 17220 #define MMEA2_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L 17245 17221 #define MMEA2_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL 17246 17222 #define MMEA2_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L ··· 17257 17217 #define MMEA2_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L 17258 17218 #define MMEA2_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L 17259 17219 #define MMEA2_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L 17220 + #define MMEA2_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L 17221 + #define MMEA2_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L 17222 + #define MMEA2_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L 17223 + #define MMEA2_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L 17224 + #define MMEA2_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L 17225 + #define MMEA2_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L 17226 + #define MMEA2_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L 17227 + #define MMEA2_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L 17260 17228 //MMEA2_DSM_CNTL 17261 17229 #define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 17262 17230 #define MMEA2_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 ··· 20269 20221 #define MMEA3_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa 20270 20222 #define MMEA3_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc 20271 20223 #define MMEA3_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe 20224 + #define MMEA3_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 20225 + #define MMEA3_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 20226 + #define MMEA3_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 20227 + #define MMEA3_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 20228 + #define MMEA3_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 20229 + #define MMEA3_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a 20230 + #define MMEA3_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c 20231 + #define MMEA3_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e 20272 20232 #define MMEA3_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L 20273 20233 #define MMEA3_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL 20274 20234 #define MMEA3_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L ··· 20285 20229 #define MMEA3_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L 20286 20230 #define MMEA3_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L 20287 20231 #define MMEA3_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L 20232 + #define MMEA3_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L 20233 + #define MMEA3_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L 20234 + #define MMEA3_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L 20235 + #define MMEA3_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L 20236 + #define MMEA3_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L 20237 + #define MMEA3_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L 20238 + #define MMEA3_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L 20239 + #define MMEA3_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L 20288 20240 //MMEA3_DSM_CNTL 20289 20241 #define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 20290 20242 #define MMEA3_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 ··· 23297 23233 #define MMEA4_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa 23298 23234 #define MMEA4_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc 23299 23235 #define MMEA4_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe 23236 + #define MMEA4_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 23237 + #define MMEA4_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 23238 + #define MMEA4_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 23239 + #define MMEA4_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 23240 + #define MMEA4_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 23241 + #define MMEA4_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a 23242 + #define MMEA4_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c 23243 + #define MMEA4_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e 23300 23244 #define MMEA4_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L 23301 23245 #define MMEA4_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL 23302 23246 #define MMEA4_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L ··· 23313 23241 #define MMEA4_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L 23314 23242 #define MMEA4_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L 23315 23243 #define MMEA4_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L 23244 + #define MMEA4_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L 23245 + #define MMEA4_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L 23246 + #define MMEA4_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L 23247 + #define MMEA4_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L 23248 + #define MMEA4_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L 23249 + #define MMEA4_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L 23250 + #define MMEA4_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L 23251 + #define MMEA4_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L 23316 23252 //MMEA4_DSM_CNTL 23317 23253 #define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 23318 23254 #define MMEA4_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 ··· 35032 34952 #define MMEA5_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa 35033 34953 #define MMEA5_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc 35034 34954 #define MMEA5_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe 34955 + #define MMEA5_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 34956 + #define MMEA5_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 34957 + #define MMEA5_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 34958 + #define MMEA5_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 34959 + #define MMEA5_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 34960 + #define MMEA5_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a 34961 + #define MMEA5_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c 34962 + #define MMEA5_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e 35035 34963 #define MMEA5_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L 35036 34964 #define MMEA5_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL 35037 34965 #define MMEA5_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L ··· 35048 34960 #define MMEA5_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L 35049 34961 #define MMEA5_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L 35050 34962 #define MMEA5_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L 34963 + #define MMEA5_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L 34964 + #define MMEA5_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L 34965 + #define MMEA5_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L 34966 + #define MMEA5_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L 34967 + #define MMEA5_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L 34968 + #define MMEA5_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L 34969 + #define MMEA5_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L 34970 + #define MMEA5_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L 35051 34971 //MMEA5_DSM_CNTL 35052 34972 #define MMEA5_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 35053 34973 #define MMEA5_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 ··· 38060 37964 #define MMEA6_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa 38061 37965 #define MMEA6_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc 38062 37966 #define MMEA6_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe 37967 + #define MMEA6_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 37968 + #define MMEA6_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 37969 + #define MMEA6_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 37970 + #define MMEA6_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 37971 + #define MMEA6_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 37972 + #define MMEA6_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a 37973 + #define MMEA6_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c 37974 + #define MMEA6_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e 38063 37975 #define MMEA6_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L 38064 37976 #define MMEA6_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL 38065 37977 #define MMEA6_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L ··· 38076 37972 #define MMEA6_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L 38077 37973 #define MMEA6_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L 38078 37974 #define MMEA6_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L 37975 + #define MMEA6_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L 37976 + #define MMEA6_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L 37977 + #define MMEA6_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L 37978 + #define MMEA6_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L 37979 + #define MMEA6_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L 37980 + #define MMEA6_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L 37981 + #define MMEA6_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L 37982 + #define MMEA6_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L 38079 37983 //MMEA6_DSM_CNTL 38080 37984 #define MMEA6_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 38081 37985 #define MMEA6_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2 ··· 41088 40976 #define MMEA7_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT__SHIFT 0xa 41089 40977 #define MMEA7_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT__SHIFT 0xc 41090 40978 #define MMEA7_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT__SHIFT 0xe 40979 + #define MMEA7_EDC_CNT2__MAM_D0MEM_SED_COUNT__SHIFT 0x10 40980 + #define MMEA7_EDC_CNT2__MAM_D1MEM_SED_COUNT__SHIFT 0x12 40981 + #define MMEA7_EDC_CNT2__MAM_D2MEM_SED_COUNT__SHIFT 0x14 40982 + #define MMEA7_EDC_CNT2__MAM_D3MEM_SED_COUNT__SHIFT 0x16 40983 + #define MMEA7_EDC_CNT2__MAM_D0MEM_DED_COUNT__SHIFT 0x18 40984 + #define MMEA7_EDC_CNT2__MAM_D1MEM_DED_COUNT__SHIFT 0x1a 40985 + #define MMEA7_EDC_CNT2__MAM_D2MEM_DED_COUNT__SHIFT 0x1c 40986 + #define MMEA7_EDC_CNT2__MAM_D3MEM_DED_COUNT__SHIFT 0x1e 41091 40987 #define MMEA7_EDC_CNT2__GMIRD_CMDMEM_SEC_COUNT_MASK 0x00000003L 41092 40988 #define MMEA7_EDC_CNT2__GMIRD_CMDMEM_DED_COUNT_MASK 0x0000000CL 41093 40989 #define MMEA7_EDC_CNT2__GMIWR_CMDMEM_SEC_COUNT_MASK 0x00000030L ··· 41104 40984 #define MMEA7_EDC_CNT2__GMIWR_DATAMEM_DED_COUNT_MASK 0x00000C00L 41105 40985 #define MMEA7_EDC_CNT2__GMIRD_PAGEMEM_SED_COUNT_MASK 0x00003000L 41106 40986 #define MMEA7_EDC_CNT2__GMIWR_PAGEMEM_SED_COUNT_MASK 0x0000C000L 40987 + #define MMEA7_EDC_CNT2__MAM_D0MEM_SED_COUNT_MASK 0x00030000L 40988 + #define MMEA7_EDC_CNT2__MAM_D1MEM_SED_COUNT_MASK 0x000C0000L 40989 + #define MMEA7_EDC_CNT2__MAM_D2MEM_SED_COUNT_MASK 0x00300000L 40990 + #define MMEA7_EDC_CNT2__MAM_D3MEM_SED_COUNT_MASK 0x00C00000L 40991 + #define MMEA7_EDC_CNT2__MAM_D0MEM_DED_COUNT_MASK 0x03000000L 40992 + #define MMEA7_EDC_CNT2__MAM_D1MEM_DED_COUNT_MASK 0x0C000000L 40993 + #define MMEA7_EDC_CNT2__MAM_D2MEM_DED_COUNT_MASK 0x30000000L 40994 + #define MMEA7_EDC_CNT2__MAM_D3MEM_DED_COUNT_MASK 0xC0000000L 41107 40995 //MMEA7_DSM_CNTL 41108 40996 #define MMEA7_DSM_CNTL__DRAMRD_CMDMEM_DSM_IRRITATOR_DATA__SHIFT 0x0 41109 40997 #define MMEA7_DSM_CNTL__DRAMRD_CMDMEM_ENABLE_SINGLE_WRITE__SHIFT 0x2
+18
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
··· 21 21 */ 22 22 23 23 #include <linux/firmware.h> 24 + #include <linux/pci.h> 24 25 25 26 #include "pp_debug.h" 26 27 #include "amdgpu.h" ··· 1138 1137 ret = smu_system_features_control(smu, true); 1139 1138 if (ret) 1140 1139 return ret; 1140 + 1141 + if (adev->asic_type == CHIP_NAVI10) { 1142 + if ((adev->pdev->device == 0x731f && (adev->pdev->revision == 0xc2 || 1143 + adev->pdev->revision == 0xc3 || 1144 + adev->pdev->revision == 0xca || 1145 + adev->pdev->revision == 0xcb)) || 1146 + (adev->pdev->device == 0x66af && (adev->pdev->revision == 0xf3 || 1147 + adev->pdev->revision == 0xf4 || 1148 + adev->pdev->revision == 0xf5 || 1149 + adev->pdev->revision == 0xf6))) { 1150 + ret = smu_disable_umc_cdr_12gbps_workaround(smu); 1151 + if (ret) { 1152 + pr_err("Workaround failed to disable UMC CDR feature on 12Gbps SKU!\n"); 1153 + return ret; 1154 + } 1155 + } 1156 + } 1141 1157 } 1142 1158 if (adev->asic_type != CHIP_ARCTURUS) { 1143 1159 ret = smu_notify_display_change(smu);
+14 -9
drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
··· 1026 1026 1027 1027 clocks->num_levels = 0; 1028 1028 for (i = 0; i < pclk_vol_table->count; i++) { 1029 - clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10; 1030 - clocks->data[i].latency_in_us = latency_required ? 1031 - smu10_get_mem_latency(hwmgr, 1032 - pclk_vol_table->entries[i].clk) : 1033 - 0; 1034 - clocks->num_levels++; 1029 + if (pclk_vol_table->entries[i].clk) { 1030 + clocks->data[clocks->num_levels].clocks_in_khz = 1031 + pclk_vol_table->entries[i].clk * 10; 1032 + clocks->data[clocks->num_levels].latency_in_us = latency_required ? 1033 + smu10_get_mem_latency(hwmgr, 1034 + pclk_vol_table->entries[i].clk) : 1035 + 0; 1036 + clocks->num_levels++; 1037 + } 1035 1038 } 1036 1039 1037 1040 return 0; ··· 1080 1077 1081 1078 clocks->num_levels = 0; 1082 1079 for (i = 0; i < pclk_vol_table->count; i++) { 1083 - clocks->data[i].clocks_in_khz = pclk_vol_table->entries[i].clk * 10; 1084 - clocks->data[i].voltage_in_mv = pclk_vol_table->entries[i].vol; 1085 - clocks->num_levels++; 1080 + if (pclk_vol_table->entries[i].clk) { 1081 + clocks->data[clocks->num_levels].clocks_in_khz = pclk_vol_table->entries[i].clk * 10; 1082 + clocks->data[clocks->num_levels].voltage_in_mv = pclk_vol_table->entries[i].vol; 1083 + clocks->num_levels++; 1084 + } 1086 1085 } 1087 1086 1088 1087 return 0;
+3 -3
drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
··· 720 720 data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; 721 721 data->dpm_table.vddc_table.dpm_levels[i].param1 = std_voltage_table->entries[i].Leakage; 722 722 /* param1 is for corresponding std voltage */ 723 - data->dpm_table.vddc_table.dpm_levels[i].enabled = 1; 723 + data->dpm_table.vddc_table.dpm_levels[i].enabled = true; 724 724 } 725 725 726 726 data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count; ··· 730 730 /* Initialize Vddci DPM table based on allow Mclk values */ 731 731 for (i = 0; i < allowed_vdd_mclk_table->count; i++) { 732 732 data->dpm_table.vddci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; 733 - data->dpm_table.vddci_table.dpm_levels[i].enabled = 1; 733 + data->dpm_table.vddci_table.dpm_levels[i].enabled = true; 734 734 } 735 735 data->dpm_table.vddci_table.count = allowed_vdd_mclk_table->count; 736 736 } ··· 744 744 */ 745 745 for (i = 0; i < allowed_vdd_mclk_table->count; i++) { 746 746 data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].v; 747 - data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1; 747 + data->dpm_table.mvdd_table.dpm_levels[i].enabled = true; 748 748 } 749 749 data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count; 750 750 }
+2
drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
··· 273 273 uint8_t thermal_controller_type; 274 274 275 275 void *overdrive_table; 276 + void *boot_overdrive_table; 276 277 }; 277 278 278 279 struct smu_dpm_context { ··· 566 565 int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); 567 566 int (*override_pcie_parameters)(struct smu_context *smu); 568 567 uint32_t (*get_pptable_power_limit)(struct smu_context *smu); 568 + int (*disable_umc_cdr_12gbps_workaround)(struct smu_context *smu); 569 569 }; 570 570 571 571 int smu_load_microcode(struct smu_context *smu);
+2
drivers/gpu/drm/amd/powerplay/inc/smu_types.h
··· 170 170 __SMU_DUMMY_MAP(SetSoftMinJpeg), \ 171 171 __SMU_DUMMY_MAP(SetHardMinFclkByFreq), \ 172 172 __SMU_DUMMY_MAP(DFCstateControl), \ 173 + __SMU_DUMMY_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE), \ 174 + __SMU_DUMMY_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE), \ 173 175 174 176 #undef __SMU_DUMMY_MAP 175 177 #define __SMU_DUMMY_MAP(type) SMU_MSG_##type
+4 -1
drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_ppsmc.h
··· 120 120 #define PPSMC_MSG_GetVoltageByDpmOverdrive 0x45 121 121 #define PPSMC_MSG_BacoAudioD3PME 0x48 122 122 123 - #define PPSMC_Message_Count 0x49 123 + #define PPSMC_MSG_DALDisableDummyPstateChange 0x49 124 + #define PPSMC_MSG_DALEnableDummyPstateChange 0x4A 125 + 126 + #define PPSMC_Message_Count 0x4B 124 127 125 128 typedef uint32_t PPSMC_Result; 126 129 typedef uint32_t PPSMC_Msg;
+184 -3
drivers/gpu/drm/amd/powerplay/navi10_ppt.c
··· 119 119 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg), 120 120 MSG_MAP(BacoAudioD3PME, PPSMC_MSG_BacoAudioD3PME), 121 121 MSG_MAP(ArmD3, PPSMC_MSG_ArmD3), 122 + MSG_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE,PPSMC_MSG_DALDisableDummyPstateChange), 123 + MSG_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE, PPSMC_MSG_DALEnableDummyPstateChange), 124 + MSG_MAP(GetVoltageByDpm, PPSMC_MSG_GetVoltageByDpm), 125 + MSG_MAP(GetVoltageByDpmOverdrive, PPSMC_MSG_GetVoltageByDpmOverdrive), 122 126 }; 123 127 124 128 static struct smu_11_0_cmn2aisc_mapping navi10_clk_map[SMU_CLK_COUNT] = { ··· 741 737 return od_table->cap[feature]; 742 738 } 743 739 740 + static void navi10_od_setting_get_range(struct smu_11_0_overdrive_table *od_table, 741 + enum SMU_11_0_ODSETTING_ID setting, 742 + uint32_t *min, uint32_t *max) 743 + { 744 + if (min) 745 + *min = od_table->min[setting]; 746 + if (max) 747 + *max = od_table->max[setting]; 748 + } 744 749 745 750 static int navi10_print_clk_levels(struct smu_context *smu, 746 751 enum smu_clk_type clk_type, char *buf) ··· 768 755 OverDriveTable_t *od_table = 769 756 (OverDriveTable_t *)table_context->overdrive_table; 770 757 struct smu_11_0_overdrive_table *od_settings = smu->od_settings; 758 + uint32_t min_value, max_value; 771 759 772 760 switch (clk_type) { 773 761 case SMU_GFXCLK: ··· 857 843 if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) 858 844 break; 859 845 size += sprintf(buf + size, "OD_MCLK:\n"); 860 - size += sprintf(buf + size, "0: %uMHz\n", od_table->UclkFmax); 846 + size += sprintf(buf + size, "1: %uMHz\n", od_table->UclkFmax); 861 847 break; 862 848 case SMU_OD_VDDC_CURVE: 863 849 if (!smu->od_enabled || !od_table || !od_settings) ··· 881 867 } 882 868 size += sprintf(buf + size, "%d: %uMHz @ %umV\n", i, curve_settings[0], curve_settings[1] / NAVI10_VOLTAGE_SCALE); 883 869 } 870 + break; 871 + case SMU_OD_RANGE: 872 + if (!smu->od_enabled || !od_table || !od_settings) 873 + break; 874 + size = sprintf(buf, "%s:\n", "OD_RANGE"); 875 + 876 + if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) { 877 + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN, 878 + &min_value, NULL); 879 + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX, 880 + NULL, &max_value); 881 + size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n", 882 + min_value, max_value); 883 + } 884 + 885 + if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) { 886 + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX, 887 + &min_value, &max_value); 888 + size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n", 889 + min_value, max_value); 890 + } 891 + 892 + if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) { 893 + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1, 894 + &min_value, &max_value); 895 + size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n", 896 + min_value, max_value); 897 + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1, 898 + &min_value, &max_value); 899 + size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n", 900 + min_value, max_value); 901 + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2, 902 + &min_value, &max_value); 903 + size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n", 904 + min_value, max_value); 905 + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2, 906 + &min_value, &max_value); 907 + size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n", 908 + min_value, max_value); 909 + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3, 910 + &min_value, &max_value); 911 + size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n", 912 + min_value, max_value); 913 + navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3, 914 + &min_value, &max_value); 915 + size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n", 916 + min_value, max_value); 917 + } 918 + 884 919 break; 885 920 default: 886 921 break; ··· 1012 949 case SMU_GFXCLK: 1013 950 case SMU_DCEFCLK: 1014 951 case SMU_SOCCLK: 952 + case SMU_MCLK: 953 + case SMU_UCLK: 1015 954 ret = smu_get_dpm_level_count(smu, clk_type, &level_count); 1016 955 if (ret) 1017 956 return ret; ··· 1936 1871 return 0; 1937 1872 } 1938 1873 1874 + static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu, 1875 + uint16_t *voltage, 1876 + uint32_t freq) 1877 + { 1878 + uint32_t param = (freq & 0xFFFF) | (PPCLK_GFXCLK << 16); 1879 + uint32_t value = 0; 1880 + int ret; 1881 + 1882 + ret = smu_send_smc_msg_with_param(smu, 1883 + SMU_MSG_GetVoltageByDpm, 1884 + param); 1885 + if (ret) { 1886 + pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!"); 1887 + return ret; 1888 + } 1889 + 1890 + smu_read_smc_arg(smu, &value); 1891 + *voltage = (uint16_t)value; 1892 + 1893 + return 0; 1894 + } 1895 + 1939 1896 static int navi10_setup_od_limits(struct smu_context *smu) { 1940 1897 struct smu_11_0_overdrive_table *overdrive_table = NULL; 1941 1898 struct smu_11_0_powerplay_table *powerplay_table = NULL; ··· 1977 1890 } 1978 1891 1979 1892 static int navi10_set_default_od_settings(struct smu_context *smu, bool initialize) { 1980 - OverDriveTable_t *od_table; 1893 + OverDriveTable_t *od_table, *boot_od_table; 1981 1894 int ret = 0; 1982 1895 1983 1896 ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t)); 1984 1897 if (ret) 1985 1898 return ret; 1986 1899 1900 + od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table; 1901 + boot_od_table = (OverDriveTable_t *)smu->smu_table.boot_overdrive_table; 1987 1902 if (initialize) { 1988 1903 ret = navi10_setup_od_limits(smu); 1989 1904 if (ret) { 1990 1905 pr_err("Failed to retrieve board OD limits\n"); 1991 1906 return ret; 1992 1907 } 1908 + if (od_table) { 1909 + if (!od_table->GfxclkVolt1) { 1910 + ret = navi10_overdrive_get_gfx_clk_base_voltage(smu, 1911 + &od_table->GfxclkVolt1, 1912 + od_table->GfxclkFreq1); 1913 + if (ret) 1914 + od_table->GfxclkVolt1 = 0; 1915 + if (boot_od_table) 1916 + boot_od_table->GfxclkVolt1 = od_table->GfxclkVolt1; 1917 + } 1993 1918 1919 + if (!od_table->GfxclkVolt2) { 1920 + ret = navi10_overdrive_get_gfx_clk_base_voltage(smu, 1921 + &od_table->GfxclkVolt2, 1922 + od_table->GfxclkFreq2); 1923 + if (ret) 1924 + od_table->GfxclkVolt2 = 0; 1925 + if (boot_od_table) 1926 + boot_od_table->GfxclkVolt2 = od_table->GfxclkVolt2; 1927 + } 1928 + 1929 + if (!od_table->GfxclkVolt3) { 1930 + ret = navi10_overdrive_get_gfx_clk_base_voltage(smu, 1931 + &od_table->GfxclkVolt3, 1932 + od_table->GfxclkFreq3); 1933 + if (ret) 1934 + od_table->GfxclkVolt3 = 0; 1935 + if (boot_od_table) 1936 + boot_od_table->GfxclkVolt3 = od_table->GfxclkVolt3; 1937 + } 1938 + } 1994 1939 } 1995 1940 1996 - od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table; 1997 1941 if (od_table) { 1998 1942 navi10_dump_od_table(od_table); 1999 1943 } ··· 2120 2002 return ret; 2121 2003 od_table->UclkFmax = input[1]; 2122 2004 break; 2005 + case PP_OD_RESTORE_DEFAULT_TABLE: 2006 + if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) { 2007 + pr_err("Overdrive table was not initialized!\n"); 2008 + return -EINVAL; 2009 + } 2010 + memcpy(table_context->overdrive_table, table_context->boot_overdrive_table, sizeof(OverDriveTable_t)); 2011 + break; 2123 2012 case PP_OD_COMMIT_DPM_TABLE: 2124 2013 navi10_dump_od_table(od_table); 2125 2014 ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true); ··· 2214 2089 pr_err("RunBtc failed!\n"); 2215 2090 2216 2091 return ret; 2092 + } 2093 + 2094 + static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable) 2095 + { 2096 + int result = 0; 2097 + 2098 + if (!enable) 2099 + result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE); 2100 + else 2101 + result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE); 2102 + 2103 + return result; 2104 + } 2105 + 2106 + static int navi10_disable_umc_cdr_12gbps_workaround(struct smu_context *smu) 2107 + { 2108 + uint32_t uclk_count, uclk_min, uclk_max; 2109 + uint32_t smu_version; 2110 + int ret = 0; 2111 + 2112 + ret = smu_get_smc_version(smu, NULL, &smu_version); 2113 + if (ret) 2114 + return ret; 2115 + 2116 + /* This workaround is available only for 42.50 or later SMC firmwares */ 2117 + if (smu_version < 0x2A3200) 2118 + return 0; 2119 + 2120 + ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_count); 2121 + if (ret) 2122 + return ret; 2123 + 2124 + ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)0, &uclk_min); 2125 + if (ret) 2126 + return ret; 2127 + 2128 + ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, (uint16_t)(uclk_count - 1), &uclk_max); 2129 + if (ret) 2130 + return ret; 2131 + 2132 + /* Force UCLK out of the highest DPM */ 2133 + ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_min); 2134 + if (ret) 2135 + return ret; 2136 + 2137 + /* Revert the UCLK Hardmax */ 2138 + ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, uclk_max); 2139 + if (ret) 2140 + return ret; 2141 + 2142 + /* 2143 + * In this case, SMU already disabled dummy pstate during enablement 2144 + * of UCLK DPM, we have to re-enabled it. 2145 + * */ 2146 + return navi10_dummy_pstate_control(smu, true); 2217 2147 } 2218 2148 2219 2149 static const struct pptable_funcs navi10_ppt_funcs = { ··· 2365 2185 .od_edit_dpm_table = navi10_od_edit_dpm_table, 2366 2186 .get_pptable_power_limit = navi10_get_pptable_power_limit, 2367 2187 .run_btc = navi10_run_btc, 2188 + .disable_umc_cdr_12gbps_workaround = navi10_disable_umc_cdr_12gbps_workaround, 2368 2189 }; 2369 2190 2370 2191 void navi10_set_ppt_funcs(struct smu_context *smu)
+3
drivers/gpu/drm/amd/powerplay/smu_internal.h
··· 207 207 #define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) \ 208 208 ((smu)->ppt_funcs->update_pcie_parameters ? (smu)->ppt_funcs->update_pcie_parameters((smu), (pcie_gen_cap), (pcie_width_cap)) : 0) 209 209 210 + #define smu_disable_umc_cdr_12gbps_workaround(smu) \ 211 + ((smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround ? (smu)->ppt_funcs->disable_umc_cdr_12gbps_workaround((smu)) : 0) 212 + 210 213 #endif
+6
drivers/gpu/drm/amd/powerplay/smu_v11_0.c
··· 1882 1882 pr_err("Failed to export overdrive table!\n"); 1883 1883 return ret; 1884 1884 } 1885 + if (!table_context->boot_overdrive_table) { 1886 + table_context->boot_overdrive_table = kmemdup(table_context->overdrive_table, overdrive_table_size, GFP_KERNEL); 1887 + if (!table_context->boot_overdrive_table) { 1888 + return -ENOMEM; 1889 + } 1890 + } 1885 1891 } 1886 1892 ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true); 1887 1893 if (ret) {
+6 -6
drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
··· 128 128 if (enable) { 129 129 PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, 130 130 PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low) == 0, 131 - "[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!", 131 + "[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!", 132 132 return -EINVAL); 133 133 PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, 134 134 PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high) == 0, 135 - "[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!", 135 + "[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!", 136 136 return -EINVAL); 137 137 } else { 138 138 PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, 139 139 PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low) == 0, 140 - "[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!", 140 + "[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!", 141 141 return -EINVAL); 142 142 PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, 143 143 PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high) == 0, 144 - "[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!", 144 + "[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!", 145 145 return -EINVAL); 146 146 } 147 147 ··· 158 158 159 159 PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr, 160 160 PPSMC_MSG_GetEnabledSmuFeaturesLow) == 0, 161 - "[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!", 161 + "[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!", 162 162 return -EINVAL); 163 163 smc_features_low = smu9_get_argument(hwmgr); 164 164 165 165 PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc(hwmgr, 166 166 PPSMC_MSG_GetEnabledSmuFeaturesHigh) == 0, 167 - "[GetEnabledSMCFeatures] Attemp to get SMU features High failed!", 167 + "[GetEnabledSMCFeatures] Attempt to get SMU features High failed!", 168 168 return -EINVAL); 169 169 smc_features_high = smu9_get_argument(hwmgr); 170 170
+6 -6
drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
··· 316 316 if (enable) { 317 317 PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, 318 318 PPSMC_MSG_EnableSmuFeaturesLow, smu_features_low)) == 0, 319 - "[EnableDisableSMCFeatures] Attemp to enable SMU features Low failed!", 319 + "[EnableDisableSMCFeatures] Attempt to enable SMU features Low failed!", 320 320 return ret); 321 321 PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, 322 322 PPSMC_MSG_EnableSmuFeaturesHigh, smu_features_high)) == 0, 323 - "[EnableDisableSMCFeatures] Attemp to enable SMU features High failed!", 323 + "[EnableDisableSMCFeatures] Attempt to enable SMU features High failed!", 324 324 return ret); 325 325 } else { 326 326 PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, 327 327 PPSMC_MSG_DisableSmuFeaturesLow, smu_features_low)) == 0, 328 - "[EnableDisableSMCFeatures] Attemp to disable SMU features Low failed!", 328 + "[EnableDisableSMCFeatures] Attempt to disable SMU features Low failed!", 329 329 return ret); 330 330 PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, 331 331 PPSMC_MSG_DisableSmuFeaturesHigh, smu_features_high)) == 0, 332 - "[EnableDisableSMCFeatures] Attemp to disable SMU features High failed!", 332 + "[EnableDisableSMCFeatures] Attempt to disable SMU features High failed!", 333 333 return ret); 334 334 } 335 335 ··· 347 347 348 348 PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr, 349 349 PPSMC_MSG_GetEnabledSmuFeaturesLow)) == 0, 350 - "[GetEnabledSMCFeatures] Attemp to get SMU features Low failed!", 350 + "[GetEnabledSMCFeatures] Attempt to get SMU features Low failed!", 351 351 return ret); 352 352 smc_features_low = vega20_get_argument(hwmgr); 353 353 PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc(hwmgr, 354 354 PPSMC_MSG_GetEnabledSmuFeaturesHigh)) == 0, 355 - "[GetEnabledSMCFeatures] Attemp to get SMU features High failed!", 355 + "[GetEnabledSMCFeatures] Attempt to get SMU features High failed!", 356 356 return ret); 357 357 smc_features_high = vega20_get_argument(hwmgr); 358 358
+8 -20
drivers/gpu/drm/amd/powerplay/vega20_ppt.c
··· 1706 1706 struct smu_table_context *table_context = &smu->smu_table; 1707 1707 int ret; 1708 1708 1709 + ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t)); 1710 + if (ret) 1711 + return ret; 1712 + 1709 1713 if (initialize) { 1710 - if (table_context->overdrive_table) 1711 - return -EINVAL; 1712 - 1713 - table_context->overdrive_table = kzalloc(sizeof(OverDriveTable_t), GFP_KERNEL); 1714 - 1715 - if (!table_context->overdrive_table) 1716 - return -ENOMEM; 1717 - 1718 - ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, 1719 - table_context->overdrive_table, false); 1720 - if (ret) { 1721 - pr_err("Failed to export over drive table!\n"); 1722 - return ret; 1723 - } 1724 - 1725 1714 ret = vega20_set_default_od8_setttings(smu); 1726 1715 if (ret) 1727 1716 return ret; ··· 2767 2778 break; 2768 2779 2769 2780 case PP_OD_RESTORE_DEFAULT_TABLE: 2770 - ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false); 2771 - if (ret) { 2772 - pr_err("Failed to export over drive table!\n"); 2773 - return ret; 2781 + if (!(table_context->overdrive_table && table_context->boot_overdrive_table)) { 2782 + pr_err("Overdrive table was not initialized!\n"); 2783 + return -EINVAL; 2774 2784 } 2775 - 2785 + memcpy(table_context->overdrive_table, table_context->boot_overdrive_table, sizeof(OverDriveTable_t)); 2776 2786 break; 2777 2787 2778 2788 case PP_OD_COMMIT_DPM_TABLE:
+6
drivers/gpu/drm/nouveau/dispnv50/core.h
··· 6 6 struct nv50_core { 7 7 const struct nv50_core_func *func; 8 8 struct nv50_dmac chan; 9 + bool assign_windows; 9 10 }; 10 11 11 12 int nv50_core_new(struct nouveau_drm *, struct nv50_core **); ··· 18 17 int (*ntfy_wait_done)(struct nouveau_bo *, u32 offset, 19 18 struct nvif_device *); 20 19 void (*update)(struct nv50_core *, u32 *interlock, bool ntfy); 20 + 21 + struct { 22 + void (*owner)(struct nv50_core *); 23 + } wndw; 21 24 22 25 const struct nv50_head_func *head; 23 26 const struct nv50_outp_func { ··· 53 48 int corec37d_new(struct nouveau_drm *, s32, struct nv50_core **); 54 49 int corec37d_ntfy_wait_done(struct nouveau_bo *, u32, struct nvif_device *); 55 50 void corec37d_update(struct nv50_core *, u32 *, bool); 51 + void corec37d_wndw_owner(struct nv50_core *); 56 52 extern const struct nv50_outp_func sorc37d; 57 53 58 54 int corec57d_new(struct nouveau_drm *, s32, struct nv50_core **);
+18 -5
drivers/gpu/drm/nouveau/dispnv50/corec37d.c
··· 25 25 #include <nouveau_bo.h> 26 26 27 27 void 28 + corec37d_wndw_owner(struct nv50_core *core) 29 + { 30 + const u32 windows = 8; /*XXX*/ 31 + u32 *push, i; 32 + if ((push = evo_wait(&core->chan, 2 * windows))) { 33 + for (i = 0; i < windows; i++) { 34 + evo_mthd(push, 0x1000 + (i * 0x080), 1); 35 + evo_data(push, i >> 1); 36 + } 37 + evo_kick(push, &core->chan); 38 + } 39 + } 40 + 41 + void 28 42 corec37d_update(struct nv50_core *core, u32 *interlock, bool ntfy) 29 43 { 30 44 u32 *push; ··· 90 76 { 91 77 const u32 windows = 8; /*XXX*/ 92 78 u32 *push, i; 93 - if ((push = evo_wait(&core->chan, 2 + 6 * windows + 2))) { 79 + if ((push = evo_wait(&core->chan, 2 + 5 * windows))) { 94 80 evo_mthd(push, 0x0208, 1); 95 81 evo_data(push, core->chan.sync.handle); 96 82 for (i = 0; i < windows; i++) { 97 - evo_mthd(push, 0x1000 + (i * 0x080), 3); 98 - evo_data(push, i >> 1); 83 + evo_mthd(push, 0x1004 + (i * 0x080), 2); 99 84 evo_data(push, 0x0000001f); 100 85 evo_data(push, 0x00000000); 101 86 evo_mthd(push, 0x1010 + (i * 0x080), 1); 102 87 evo_data(push, 0x00127fff); 103 88 } 104 - evo_mthd(push, 0x0200, 1); 105 - evo_data(push, 0x00000001); 106 89 evo_kick(push, &core->chan); 90 + core->assign_windows = true; 107 91 } 108 92 } 109 93 ··· 111 99 .ntfy_init = corec37d_ntfy_init, 112 100 .ntfy_wait_done = corec37d_ntfy_wait_done, 113 101 .update = corec37d_update, 102 + .wndw.owner = corec37d_wndw_owner, 114 103 .head = &headc37d, 115 104 .sor = &sorc37d, 116 105 };
+4 -5
drivers/gpu/drm/nouveau/dispnv50/corec57d.c
··· 27 27 { 28 28 const u32 windows = 8; /*XXX*/ 29 29 u32 *push, i; 30 - if ((push = evo_wait(&core->chan, 2 + 6 * windows + 2))) { 30 + if ((push = evo_wait(&core->chan, 2 + 5 * windows))) { 31 31 evo_mthd(push, 0x0208, 1); 32 32 evo_data(push, core->chan.sync.handle); 33 33 for (i = 0; i < windows; i++) { 34 - evo_mthd(push, 0x1000 + (i * 0x080), 3); 35 - evo_data(push, i >> 1); 34 + evo_mthd(push, 0x1004 + (i * 0x080), 2); 36 35 evo_data(push, 0x0000000f); 37 36 evo_data(push, 0x00000000); 38 37 evo_mthd(push, 0x1010 + (i * 0x080), 1); 39 38 evo_data(push, 0x00117fff); 40 39 } 41 - evo_mthd(push, 0x0200, 1); 42 - evo_data(push, 0x00000001); 43 40 evo_kick(push, &core->chan); 41 + core->assign_windows = true; 44 42 } 45 43 } 46 44 ··· 48 50 .ntfy_init = corec37d_ntfy_init, 49 51 .ntfy_wait_done = corec37d_ntfy_wait_done, 50 52 .update = corec37d_update, 53 + .wndw.owner = corec37d_wndw_owner, 51 54 .head = &headc57d, 52 55 .sor = &sorc37d, 53 56 };
+16
drivers/gpu/drm/nouveau/dispnv50/disp.c
··· 1933 1933 struct nouveau_drm *drm = nouveau_drm(dev); 1934 1934 struct nv50_disp *disp = nv50_disp(dev); 1935 1935 struct nv50_atom *atom = nv50_atom(state); 1936 + struct nv50_core *core = disp->core; 1936 1937 struct nv50_outp_atom *outp, *outt; 1937 1938 u32 interlock[NV50_DISP_INTERLOCK__SIZE] = {}; 1938 1939 int i; ··· 2050 2049 if (new_crtc_state->event) 2051 2050 drm_crtc_vblank_get(crtc); 2052 2051 } 2052 + } 2053 + 2054 + /* Update window->head assignment. 2055 + * 2056 + * This has to happen in an update that's not interlocked with 2057 + * any window channels to avoid hitting HW error checks. 2058 + * 2059 + *TODO: Proper handling of window ownership (Turing apparently 2060 + * supports non-fixed mappings). 2061 + */ 2062 + if (core->assign_windows) { 2063 + core->func->wndw.owner(core); 2064 + core->func->update(core, interlock, false); 2065 + core->assign_windows = false; 2066 + interlock[NV50_DISP_INTERLOCK_CORE] = 0; 2053 2067 } 2054 2068 2055 2069 /* Update plane(s). */
+6
drivers/gpu/drm/nouveau/nvkm/engine/disp/gv100.c
··· 155 155 if (stat & 0x00000008) 156 156 stat &= ~0x00000008; 157 157 158 + if (stat & 0x00000080) { 159 + u32 error = nvkm_mask(device, 0x611848, 0x00000000, 0x00000000); 160 + nvkm_warn(subdev, "error %08x\n", error); 161 + stat &= ~0x00000080; 162 + } 163 + 158 164 if (stat & 0x00000100) { 159 165 unsigned long wndws = nvkm_rd32(device, 0x611858); 160 166 unsigned long other = nvkm_rd32(device, 0x61185c);
+2 -7
drivers/gpu/drm/radeon/radeon_display.c
··· 127 127 128 128 DRM_DEBUG_KMS("%d\n", radeon_crtc->crtc_id); 129 129 130 + msleep(10); 131 + 130 132 WREG32(NI_INPUT_CSC_CONTROL + radeon_crtc->crtc_offset, 131 133 (NI_INPUT_CSC_GRPH_MODE(NI_INPUT_CSC_BYPASS) | 132 134 NI_INPUT_CSC_OVL_MODE(NI_INPUT_CSC_BYPASS))); ··· 674 672 { 675 673 struct radeon_device *rdev = dev->dev_private; 676 674 struct radeon_crtc *radeon_crtc; 677 - int i; 678 675 679 676 radeon_crtc = kzalloc(sizeof(struct radeon_crtc) + (RADEONFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); 680 677 if (radeon_crtc == NULL) ··· 701 700 radeon_crtc->mode_set.connectors = (struct drm_connector **)(radeon_crtc + 1); 702 701 radeon_crtc->mode_set.num_connectors = 0; 703 702 #endif 704 - 705 - for (i = 0; i < 256; i++) { 706 - radeon_crtc->lut_r[i] = i << 2; 707 - radeon_crtc->lut_g[i] = i << 2; 708 - radeon_crtc->lut_b[i] = i << 2; 709 - } 710 703 711 704 if (rdev->is_atom_bios && (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom)) 712 705 radeon_atombios_init_crtc(dev, radeon_crtc);
-1
drivers/gpu/drm/radeon/radeon_mode.h
··· 327 327 struct radeon_crtc { 328 328 struct drm_crtc base; 329 329 int crtc_id; 330 - u16 lut_r[256], lut_g[256], lut_b[256]; 331 330 bool enabled; 332 331 bool can_tile; 333 332 bool cursor_out_of_bounds;
+1 -1
drivers/gpu/drm/scheduler/sched_entity.c
··· 45 45 * @guilty: atomic_t set to 1 when a job on this queue 46 46 * is found to be guilty causing a timeout 47 47 * 48 - * Note: the sched_list should have atleast one element to schedule 48 + * Note: the sched_list should have at least one element to schedule 49 49 * the entity 50 50 * 51 51 * Returns 0 on success or a negative error code on failure.
+32 -17
drivers/gpu/drm/tegra/drm.c
··· 1037 1037 free_pages((unsigned long)virt, get_order(size)); 1038 1038 } 1039 1039 1040 - static int host1x_drm_probe(struct host1x_device *dev) 1040 + static bool host1x_drm_wants_iommu(struct host1x_device *dev) 1041 1041 { 1042 - struct drm_driver *driver = &tegra_drm_driver; 1043 1042 struct iommu_domain *domain; 1044 - struct tegra_drm *tegra; 1045 - struct drm_device *drm; 1046 - int err; 1047 - 1048 - drm = drm_dev_alloc(driver, &dev->dev); 1049 - if (IS_ERR(drm)) 1050 - return PTR_ERR(drm); 1051 - 1052 - tegra = kzalloc(sizeof(*tegra), GFP_KERNEL); 1053 - if (!tegra) { 1054 - err = -ENOMEM; 1055 - goto put; 1056 - } 1057 1043 1058 1044 /* 1059 1045 * If the Tegra DRM clients are backed by an IOMMU, push buffers are ··· 1068 1082 * up the device tree appropriately. This is considered an problem 1069 1083 * of integration, so care must be taken for the DT to be consistent. 1070 1084 */ 1071 - domain = iommu_get_domain_for_dev(drm->dev->parent); 1085 + domain = iommu_get_domain_for_dev(dev->dev.parent); 1072 1086 1073 - if (domain && iommu_present(&platform_bus_type)) { 1087 + /* 1088 + * Tegra20 and Tegra30 don't support addressing memory beyond the 1089 + * 32-bit boundary, so the regular GATHER opcodes will always be 1090 + * sufficient and whether or not the host1x is attached to an IOMMU 1091 + * doesn't matter. 1092 + */ 1093 + if (!domain && dma_get_mask(dev->dev.parent) <= DMA_BIT_MASK(32)) 1094 + return true; 1095 + 1096 + return domain != NULL; 1097 + } 1098 + 1099 + static int host1x_drm_probe(struct host1x_device *dev) 1100 + { 1101 + struct drm_driver *driver = &tegra_drm_driver; 1102 + struct tegra_drm *tegra; 1103 + struct drm_device *drm; 1104 + int err; 1105 + 1106 + drm = drm_dev_alloc(driver, &dev->dev); 1107 + if (IS_ERR(drm)) 1108 + return PTR_ERR(drm); 1109 + 1110 + tegra = kzalloc(sizeof(*tegra), GFP_KERNEL); 1111 + if (!tegra) { 1112 + err = -ENOMEM; 1113 + goto put; 1114 + } 1115 + 1116 + if (host1x_drm_wants_iommu(dev) && iommu_present(&platform_bus_type)) { 1074 1117 tegra->domain = iommu_domain_alloc(&platform_bus_type); 1075 1118 if (!tegra->domain) { 1076 1119 err = -ENOMEM;
+9 -1
drivers/gpu/drm/tegra/gem.c
··· 60 60 /* 61 61 * If we've manually mapped the buffer object through the IOMMU, make 62 62 * sure to return the IOVA address of our mapping. 63 + * 64 + * Similarly, for buffers that have been allocated by the DMA API the 65 + * physical address can be used for devices that are not attached to 66 + * an IOMMU. For these devices, callers must pass a valid pointer via 67 + * the @phys argument. 68 + * 69 + * Imported buffers were also already mapped at import time, so the 70 + * existing mapping can be reused. 63 71 */ 64 - if (phys && obj->mm) { 72 + if (phys) { 65 73 *phys = obj->iova; 66 74 return NULL; 67 75 }
+25 -19
drivers/gpu/drm/tegra/plane.c
··· 3 3 * Copyright (C) 2017 NVIDIA CORPORATION. All rights reserved. 4 4 */ 5 5 6 + #include <linux/iommu.h> 7 + 6 8 #include <drm/drm_atomic.h> 7 9 #include <drm/drm_atomic_helper.h> 8 10 #include <drm/drm_fourcc.h> ··· 109 107 110 108 static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state) 111 109 { 110 + struct iommu_domain *domain = iommu_get_domain_for_dev(dc->dev); 112 111 unsigned int i; 113 112 int err; 114 113 115 114 for (i = 0; i < state->base.fb->format->num_planes; i++) { 116 115 struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i); 116 + dma_addr_t phys_addr, *phys; 117 + struct sg_table *sgt; 117 118 118 - if (!dc->client.group) { 119 - struct sg_table *sgt; 119 + if (!domain || dc->client.group) 120 + phys = &phys_addr; 121 + else 122 + phys = NULL; 120 123 121 - sgt = host1x_bo_pin(dc->dev, &bo->base, NULL); 122 - if (IS_ERR(sgt)) { 123 - err = PTR_ERR(sgt); 124 - goto unpin; 125 - } 124 + sgt = host1x_bo_pin(dc->dev, &bo->base, phys); 125 + if (IS_ERR(sgt)) { 126 + err = PTR_ERR(sgt); 127 + goto unpin; 128 + } 126 129 130 + if (sgt) { 127 131 err = dma_map_sg(dc->dev, sgt->sgl, sgt->nents, 128 132 DMA_TO_DEVICE); 129 133 if (err == 0) { ··· 151 143 state->iova[i] = sg_dma_address(sgt->sgl); 152 144 state->sgt[i] = sgt; 153 145 } else { 154 - state->iova[i] = bo->iova; 146 + state->iova[i] = phys_addr; 155 147 } 156 148 } 157 149 ··· 164 156 struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i); 165 157 struct sg_table *sgt = state->sgt[i]; 166 158 167 - dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE); 168 - host1x_bo_unpin(dc->dev, &bo->base, sgt); 159 + if (sgt) 160 + dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, 161 + DMA_TO_DEVICE); 169 162 163 + host1x_bo_unpin(dc->dev, &bo->base, sgt); 170 164 state->iova[i] = DMA_MAPPING_ERROR; 171 165 state->sgt[i] = NULL; 172 166 } ··· 182 172 183 173 for (i = 0; i < state->base.fb->format->num_planes; i++) { 184 174 struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i); 175 + struct sg_table *sgt = state->sgt[i]; 185 176 186 - if (!dc->client.group) { 187 - struct sg_table *sgt = state->sgt[i]; 177 + if (sgt) 178 + dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, 179 + DMA_TO_DEVICE); 188 180 189 - if (sgt) { 190 - dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, 191 - DMA_TO_DEVICE); 192 - host1x_bo_unpin(dc->dev, &bo->base, sgt); 193 - } 194 - } 195 - 181 + host1x_bo_unpin(dc->dev, &bo->base, sgt); 196 182 state->iova[i] = DMA_MAPPING_ERROR; 197 183 state->sgt[i] = NULL; 198 184 }
+38 -33
drivers/gpu/drm/tegra/sor.c
··· 3915 3915 platform_set_drvdata(pdev, sor); 3916 3916 pm_runtime_enable(&pdev->dev); 3917 3917 3918 - /* 3919 - * On Tegra210 and earlier, provide our own implementation for the 3920 - * pad output clock. 3921 - */ 3922 - if (!sor->clk_pad) { 3923 - char *name; 3924 - 3925 - err = host1x_client_resume(&sor->client); 3926 - if (err < 0) { 3927 - dev_err(sor->dev, "failed to resume: %d\n", err); 3928 - goto remove; 3929 - } 3930 - 3931 - name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "sor%u_pad_clkout", sor->index); 3932 - if (!name) { 3933 - err = -ENOMEM; 3934 - goto remove; 3935 - } 3936 - 3937 - sor->clk_pad = tegra_clk_sor_pad_register(sor, name); 3938 - host1x_client_suspend(&sor->client); 3939 - } 3940 - 3941 - if (IS_ERR(sor->clk_pad)) { 3942 - err = PTR_ERR(sor->clk_pad); 3943 - dev_err(&pdev->dev, "failed to register SOR pad clock: %d\n", 3944 - err); 3945 - goto remove; 3946 - } 3947 - 3948 3918 INIT_LIST_HEAD(&sor->client.list); 3949 3919 sor->client.ops = &sor_client_ops; 3950 3920 sor->client.dev = &pdev->dev; ··· 3923 3953 if (err < 0) { 3924 3954 dev_err(&pdev->dev, "failed to register host1x client: %d\n", 3925 3955 err); 3926 - goto remove; 3956 + goto rpm_disable; 3957 + } 3958 + 3959 + /* 3960 + * On Tegra210 and earlier, provide our own implementation for the 3961 + * pad output clock. 3962 + */ 3963 + if (!sor->clk_pad) { 3964 + char *name; 3965 + 3966 + name = devm_kasprintf(sor->dev, GFP_KERNEL, "sor%u_pad_clkout", 3967 + sor->index); 3968 + if (!name) { 3969 + err = -ENOMEM; 3970 + goto unregister; 3971 + } 3972 + 3973 + err = host1x_client_resume(&sor->client); 3974 + if (err < 0) { 3975 + dev_err(sor->dev, "failed to resume: %d\n", err); 3976 + goto unregister; 3977 + } 3978 + 3979 + sor->clk_pad = tegra_clk_sor_pad_register(sor, name); 3980 + host1x_client_suspend(&sor->client); 3981 + } 3982 + 3983 + if (IS_ERR(sor->clk_pad)) { 3984 + err = PTR_ERR(sor->clk_pad); 3985 + dev_err(sor->dev, "failed to register SOR pad clock: %d\n", 3986 + err); 3987 + goto unregister; 3927 3988 } 3928 3989 3929 3990 return 0; 3930 3991 3992 + unregister: 3993 + host1x_client_unregister(&sor->client); 3994 + rpm_disable: 3995 + pm_runtime_disable(&pdev->dev); 3931 3996 remove: 3932 3997 if (sor->ops && sor->ops->remove) 3933 3998 sor->ops->remove(sor); ··· 3976 3971 struct tegra_sor *sor = platform_get_drvdata(pdev); 3977 3972 int err; 3978 3973 3979 - pm_runtime_disable(&pdev->dev); 3980 - 3981 3974 err = host1x_client_unregister(&sor->client); 3982 3975 if (err < 0) { 3983 3976 dev_err(&pdev->dev, "failed to unregister host1x client: %d\n", 3984 3977 err); 3985 3978 return err; 3986 3979 } 3980 + 3981 + pm_runtime_disable(&pdev->dev); 3987 3982 3988 3983 if (sor->ops && sor->ops->remove) { 3989 3984 err = sor->ops->remove(sor);
+30 -4
drivers/gpu/host1x/job.c
··· 8 8 #include <linux/dma-mapping.h> 9 9 #include <linux/err.h> 10 10 #include <linux/host1x.h> 11 + #include <linux/iommu.h> 11 12 #include <linux/kref.h> 12 13 #include <linux/module.h> 13 14 #include <linux/scatterlist.h> ··· 102 101 { 103 102 struct host1x_client *client = job->client; 104 103 struct device *dev = client->dev; 104 + struct iommu_domain *domain; 105 105 unsigned int i; 106 106 int err; 107 107 108 + domain = iommu_get_domain_for_dev(dev); 108 109 job->num_unpins = 0; 109 110 110 111 for (i = 0; i < job->num_relocs; i++) { ··· 120 117 goto unpin; 121 118 } 122 119 123 - if (client->group) 120 + /* 121 + * If the client device is not attached to an IOMMU, the 122 + * physical address of the buffer object can be used. 123 + * 124 + * Similarly, when an IOMMU domain is shared between all 125 + * host1x clients, the IOVA is already available, so no 126 + * need to map the buffer object again. 127 + * 128 + * XXX Note that this isn't always safe to do because it 129 + * relies on an assumption that no cache maintenance is 130 + * needed on the buffer objects. 131 + */ 132 + if (!domain || client->group) 124 133 phys = &phys_addr; 125 134 else 126 135 phys = NULL; ··· 191 176 dma_addr_t phys_addr; 192 177 unsigned long shift; 193 178 struct iova *alloc; 179 + dma_addr_t *phys; 194 180 unsigned int j; 195 181 196 182 g->bo = host1x_bo_get(g->bo); ··· 200 184 goto unpin; 201 185 } 202 186 203 - sgt = host1x_bo_pin(host->dev, g->bo, NULL); 187 + /** 188 + * If the host1x is not attached to an IOMMU, there is no need 189 + * to map the buffer object for the host1x, since the physical 190 + * address can simply be used. 191 + */ 192 + if (!iommu_get_domain_for_dev(host->dev)) 193 + phys = &phys_addr; 194 + else 195 + phys = NULL; 196 + 197 + sgt = host1x_bo_pin(host->dev, g->bo, phys); 204 198 if (IS_ERR(sgt)) { 205 199 err = PTR_ERR(sgt); 206 200 goto unpin; ··· 240 214 241 215 job->unpins[job->num_unpins].size = gather_size; 242 216 phys_addr = iova_dma_addr(&host->iova, alloc); 243 - } else { 217 + } else if (sgt) { 244 218 err = dma_map_sg(host->dev, sgt->sgl, sgt->nents, 245 219 DMA_TO_DEVICE); 246 220 if (!err) { ··· 248 222 goto unpin; 249 223 } 250 224 225 + job->unpins[job->num_unpins].dir = DMA_TO_DEVICE; 251 226 job->unpins[job->num_unpins].dev = host->dev; 252 227 phys_addr = sg_dma_address(sgt->sgl); 253 228 } ··· 256 229 job->addr_phys[job->num_unpins] = phys_addr; 257 230 job->gather_addr_phys[i] = phys_addr; 258 231 259 - job->unpins[job->num_unpins].dir = DMA_TO_DEVICE; 260 232 job->unpins[job->num_unpins].bo = g->bo; 261 233 job->unpins[job->num_unpins].sgt = sgt; 262 234 job->num_unpins++;
+3 -2
include/drm/gpu_scheduler.h
··· 52 52 * @list: used to append this struct to the list of entities in the 53 53 * runqueue. 54 54 * @rq: runqueue on which this entity is currently scheduled. 55 - * @sched_list: a list of drm_gpu_schedulers on which jobs from this entity can 56 - * be scheduled 55 + * @sched_list: A list of schedulers (drm_gpu_schedulers). 56 + * Jobs from this entity can be scheduled on any scheduler 57 + * on this list. 57 58 * @num_sched_list: number of drm_gpu_schedulers in the sched_list. 58 59 * @rq_lock: lock to modify the runqueue to which this entity belongs. 59 60 * @job_queue: the list of jobs of this entity.