Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'amd-drm-next-5.6-2020-01-09' of git://people.freedesktop.org/~agd5f/linux into drm-next

amd-drm-next-5.6-2020-01-09:

amdgpu:
- Enable DCN support on POWER
- Enable GFXOFF for Raven1 refresh
- Clean up MM engine idle handlers
- HDMI 2.0 audio fixes
- Fixes for some 10 bpc EDP panels
- Watermark fixes for renoir
- SR-IOV fixes
- Runtime pm robustness fixes
- Arcturus VCN fixes
- RAS fixes
- BACO fixes for Arcturus
- Stable pstate fixes for swSMU
- HDCP fixes
- PSP cleanup
- HDMI fixes
- Misc cleanups

amdkfd:
- Spread interrupt work across cores to reduce latency
- Topology fixes for APUs
- GPU reset improvements

UAPI:
- Enable DRIVER_SYNCOBJ_TIMELINE for vulkan
- Return better error values for kfd process ioctl

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200109230338.8022-1-alexander.deucher@amd.com

+8630 -2090
+1 -4
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 636 636 struct amdgpu_bo *reserved_bo; 637 637 void *va; 638 638 639 - /* Offset on the top of VRAM, used as c2p write buffer. 639 + /* GDDR6 training support flag. 640 640 */ 641 - u64 mem_train_fb_loc; 642 641 bool mem_train_support; 643 642 }; 644 643 ··· 993 994 994 995 bool pm_sysfs_en; 995 996 bool ucode_sysfs_en; 996 - 997 - bool in_baco; 998 997 }; 999 998 1000 999 static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
+19 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
··· 46 46 #include "soc15.h" 47 47 #include "soc15d.h" 48 48 #include "amdgpu_amdkfd_gfx_v9.h" 49 + #include "gfxhub_v1_0.h" 50 + #include "mmhub_v9_4.h" 49 51 50 52 #define HQD_N_REGS 56 51 53 #define DUMP_REG(addr) do { \ ··· 260 258 return 0; 261 259 } 262 260 261 + static void kgd_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, 262 + uint64_t page_table_base) 263 + { 264 + struct amdgpu_device *adev = get_amdgpu_device(kgd); 265 + 266 + if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { 267 + pr_err("trying to set page table base for wrong VMID %u\n", 268 + vmid); 269 + return; 270 + } 271 + 272 + mmhub_v9_4_setup_vm_pt_regs(adev, vmid, page_table_base); 273 + 274 + gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); 275 + } 276 + 263 277 const struct kfd2kgd_calls arcturus_kfd2kgd = { 264 278 .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, 265 279 .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping, ··· 295 277 .get_atc_vmid_pasid_mapping_info = 296 278 kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, 297 279 .get_tile_config = kgd_gfx_v9_get_tile_config, 298 - .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, 280 + .set_vm_context_page_table_base = kgd_set_vm_context_page_table_base, 299 281 .invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs, 300 282 .invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid, 301 283 .get_hive_id = amdgpu_amdkfd_get_hive_id,
+3 -11
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
··· 40 40 #include "soc15d.h" 41 41 #include "mmhub_v1_0.h" 42 42 #include "gfxhub_v1_0.h" 43 - #include "mmhub_v9_4.h" 44 43 45 44 46 45 enum hqd_dequeue_request_type { ··· 757 758 return 0; 758 759 } 759 760 760 - void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, 761 - uint64_t page_table_base) 761 + static void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, 762 + uint32_t vmid, uint64_t page_table_base) 762 763 { 763 764 struct amdgpu_device *adev = get_amdgpu_device(kgd); 764 765 ··· 768 769 return; 769 770 } 770 771 771 - /* TODO: take advantage of per-process address space size. For 772 - * now, all processes share the same address space size, like 773 - * on GFX8 and older. 774 - */ 775 - if (adev->asic_type == CHIP_ARCTURUS) { 776 - mmhub_v9_4_setup_vm_pt_regs(adev, vmid, page_table_base); 777 - } else 778 - mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); 772 + mmhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); 779 773 780 774 gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); 781 775 }
-2
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
··· 57 57 58 58 bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, 59 59 uint8_t vmid, uint16_t *p_pasid); 60 - void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, 61 - uint64_t page_table_base); 62 60 int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); 63 61 int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid); 64 62 int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
··· 2022 2022 if (adev->is_atom_fw) { 2023 2023 amdgpu_atomfirmware_scratch_regs_init(adev); 2024 2024 amdgpu_atomfirmware_allocate_fb_scratch(adev); 2025 - ret = amdgpu_atomfirmware_get_mem_train_fb_loc(adev); 2025 + ret = amdgpu_atomfirmware_get_mem_train_info(adev); 2026 2026 if (ret) { 2027 2027 DRM_ERROR("Failed to get mem train fb location.\n"); 2028 2028 return ret;
+4 -34
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
··· 525 525 return ret; 526 526 } 527 527 528 - int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev) 528 + int amdgpu_atomfirmware_get_mem_train_info(struct amdgpu_device *adev) 529 529 { 530 530 struct atom_context *ctx = adev->mode_info.atom_context; 531 - unsigned char *bios = ctx->bios; 532 - struct vram_reserve_block *reserved_block; 533 - int index, block_number; 531 + int index; 534 532 uint8_t frev, crev; 535 533 uint16_t data_offset, size; 536 - uint32_t start_address_in_kb; 537 - uint64_t offset; 538 534 int ret; 539 535 540 536 adev->fw_vram_usage.mem_train_support = false; ··· 565 569 return -EINVAL; 566 570 } 567 571 568 - reserved_block = (struct vram_reserve_block *) 569 - (bios + data_offset + sizeof(struct atom_common_table_header)); 570 - block_number = ((unsigned int)size - sizeof(struct atom_common_table_header)) 571 - / sizeof(struct vram_reserve_block); 572 - reserved_block += (block_number > 0) ? block_number-1 : 0; 573 - DRM_DEBUG("block_number:0x%04x, last block: 0x%08xkb sz, %dkb fw, %dkb drv.\n", 574 - block_number, 575 - le32_to_cpu(reserved_block->start_address_in_kb), 576 - le16_to_cpu(reserved_block->used_by_firmware_in_kb), 577 - le16_to_cpu(reserved_block->used_by_driver_in_kb)); 578 - if (reserved_block->used_by_firmware_in_kb > 0) { 579 - start_address_in_kb = le32_to_cpu(reserved_block->start_address_in_kb); 580 - offset = (uint64_t)start_address_in_kb * ONE_KiB; 581 - if ((offset & (ONE_MiB - 1)) < (4 * ONE_KiB + 1) ) { 582 - offset -= ONE_MiB; 583 - } 584 - 585 - offset &= ~(ONE_MiB - 1); 586 - adev->fw_vram_usage.mem_train_fb_loc = offset; 587 - adev->fw_vram_usage.mem_train_support = true; 588 - DRM_DEBUG("mem_train_fb_loc:0x%09llx.\n", offset); 589 - ret = 0; 590 - } else { 591 - DRM_ERROR("used_by_firmware_in_kb is 0!\n"); 592 - ret = -EINVAL; 593 - } 594 - 595 - return ret; 572 + adev->fw_vram_usage.mem_train_support = true; 573 + return 0; 596 574 }
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h
··· 31 31 int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev); 32 32 int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, 33 33 int *vram_width, int *vram_type, int *vram_vendor); 34 - int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev); 34 + int amdgpu_atomfirmware_get_mem_train_info(struct amdgpu_device *adev); 35 35 int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev); 36 36 int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev); 37 37 bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev);
+11 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
··· 613 613 bool d3_supported = false; 614 614 struct pci_dev *parent_pdev; 615 615 616 - while ((pdev = pci_get_class(PCI_BASE_CLASS_DISPLAY << 16, pdev)) != NULL) { 616 + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { 617 + vga_count++; 618 + 619 + has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true); 620 + 621 + parent_pdev = pci_upstream_bridge(pdev); 622 + d3_supported |= parent_pdev && parent_pdev->bridge_d3; 623 + amdgpu_atpx_get_quirks(pdev); 624 + } 625 + 626 + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { 617 627 vga_count++; 618 628 619 629 has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true);
+70 -43
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
··· 74 74 struct amdgpu_ctx *ctx) 75 75 { 76 76 unsigned num_entities = amdgpu_ctx_total_num_entities(); 77 - unsigned i, j, k; 77 + unsigned i, j; 78 78 int r; 79 79 80 80 if (priority < 0 || priority >= DRM_SCHED_PRIORITY_MAX) ··· 121 121 ctx->override_priority = DRM_SCHED_PRIORITY_UNSET; 122 122 123 123 for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) { 124 - struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; 125 - struct drm_sched_rq *rqs[AMDGPU_MAX_RINGS]; 126 - unsigned num_rings = 0; 127 - unsigned num_rqs = 0; 124 + struct drm_gpu_scheduler **scheds; 125 + struct drm_gpu_scheduler *sched; 126 + unsigned num_scheds = 0; 128 127 129 128 switch (i) { 130 129 case AMDGPU_HW_IP_GFX: 131 - rings[0] = &adev->gfx.gfx_ring[0]; 132 - num_rings = 1; 130 + sched = &adev->gfx.gfx_ring[0].sched; 131 + scheds = &sched; 132 + num_scheds = 1; 133 133 break; 134 134 case AMDGPU_HW_IP_COMPUTE: 135 - for (j = 0; j < adev->gfx.num_compute_rings; ++j) 136 - rings[j] = &adev->gfx.compute_ring[j]; 137 - num_rings = adev->gfx.num_compute_rings; 135 + scheds = adev->gfx.compute_sched; 136 + num_scheds = adev->gfx.num_compute_sched; 138 137 break; 139 138 case AMDGPU_HW_IP_DMA: 140 - for (j = 0; j < adev->sdma.num_instances; ++j) 141 - rings[j] = &adev->sdma.instance[j].ring; 142 - num_rings = adev->sdma.num_instances; 139 + scheds = adev->sdma.sdma_sched; 140 + num_scheds = adev->sdma.num_sdma_sched; 143 141 break; 144 142 case AMDGPU_HW_IP_UVD: 145 - rings[0] = &adev->uvd.inst[0].ring; 146 - num_rings = 1; 143 + sched = &adev->uvd.inst[0].ring.sched; 144 + scheds = &sched; 145 + num_scheds = 1; 147 146 break; 148 147 case AMDGPU_HW_IP_VCE: 149 - rings[0] = &adev->vce.ring[0]; 150 - num_rings = 1; 148 + sched = &adev->vce.ring[0].sched; 149 + scheds = &sched; 150 + num_scheds = 1; 151 151 break; 152 152 case AMDGPU_HW_IP_UVD_ENC: 153 - rings[0] = &adev->uvd.inst[0].ring_enc[0]; 154 - num_rings = 1; 153 + sched = &adev->uvd.inst[0].ring_enc[0].sched; 154 + scheds = &sched; 155 + num_scheds = 1; 155 156 break; 156 157 case AMDGPU_HW_IP_VCN_DEC: 157 - for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { 158 - if (adev->vcn.harvest_config & (1 << j)) 159 - continue; 160 - rings[num_rings++] = &adev->vcn.inst[j].ring_dec; 161 - } 158 + scheds = adev->vcn.vcn_dec_sched; 159 + num_scheds = adev->vcn.num_vcn_dec_sched; 162 160 break; 163 161 case AMDGPU_HW_IP_VCN_ENC: 164 - for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { 165 - if (adev->vcn.harvest_config & (1 << j)) 166 - continue; 167 - for (k = 0; k < adev->vcn.num_enc_rings; ++k) 168 - rings[num_rings++] = &adev->vcn.inst[j].ring_enc[k]; 169 - } 162 + scheds = adev->vcn.vcn_enc_sched; 163 + num_scheds = adev->vcn.num_vcn_enc_sched; 170 164 break; 171 165 case AMDGPU_HW_IP_VCN_JPEG: 172 - for (j = 0; j < adev->jpeg.num_jpeg_inst; ++j) { 173 - if (adev->jpeg.harvest_config & (1 << j)) 174 - continue; 175 - rings[num_rings++] = &adev->jpeg.inst[j].ring_dec; 176 - } 166 + scheds = adev->jpeg.jpeg_sched; 167 + num_scheds = adev->jpeg.num_jpeg_sched; 177 168 break; 178 - } 179 - 180 - for (j = 0; j < num_rings; ++j) { 181 - if (!rings[j]->adev) 182 - continue; 183 - 184 - rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority]; 185 169 } 186 170 187 171 for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) 188 172 r = drm_sched_entity_init(&ctx->entities[i][j].entity, 189 - rqs, num_rqs, &ctx->guilty); 173 + priority, scheds, 174 + num_scheds, &ctx->guilty); 190 175 if (r) 191 176 goto error_cleanup_entities; 192 177 } ··· 611 626 612 627 idr_destroy(&mgr->ctx_handles); 613 628 mutex_destroy(&mgr->lock); 629 + } 630 + 631 + void amdgpu_ctx_init_sched(struct amdgpu_device *adev) 632 + { 633 + int i, j; 634 + 635 + for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 636 + adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched; 637 + adev->gfx.num_gfx_sched++; 638 + } 639 + 640 + for (i = 0; i < adev->gfx.num_compute_rings; i++) { 641 + adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched; 642 + adev->gfx.num_compute_sched++; 643 + } 644 + 645 + for (i = 0; i < adev->sdma.num_instances; i++) { 646 + adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched; 647 + adev->sdma.num_sdma_sched++; 648 + } 649 + 650 + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 651 + if (adev->vcn.harvest_config & (1 << i)) 652 + continue; 653 + adev->vcn.vcn_dec_sched[adev->vcn.num_vcn_dec_sched++] = 654 + &adev->vcn.inst[i].ring_dec.sched; 655 + } 656 + 657 + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 658 + if (adev->vcn.harvest_config & (1 << i)) 659 + continue; 660 + for (j = 0; j < adev->vcn.num_enc_rings; ++j) 661 + adev->vcn.vcn_enc_sched[adev->vcn.num_vcn_enc_sched++] = 662 + &adev->vcn.inst[i].ring_enc[j].sched; 663 + } 664 + 665 + for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) { 666 + if (adev->jpeg.harvest_config & (1 << i)) 667 + continue; 668 + adev->jpeg.jpeg_sched[adev->jpeg.num_jpeg_sched++] = 669 + &adev->jpeg.inst[i].ring_dec.sched; 670 + } 614 671 }
+3
drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
··· 87 87 long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout); 88 88 void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr); 89 89 90 + void amdgpu_ctx_init_sched(struct amdgpu_device *adev); 91 + 92 + 90 93 #endif
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
··· 129 129 sh_bank = 0xFFFFFFFF; 130 130 if (instance_bank == 0x3FF) 131 131 instance_bank = 0xFFFFFFFF; 132 - use_bank = 1; 132 + use_bank = true; 133 133 } else if (*pos & (1ULL << 61)) { 134 134 135 135 me = (*pos & GENMASK_ULL(33, 24)) >> 24; ··· 137 137 queue = (*pos & GENMASK_ULL(53, 44)) >> 44; 138 138 vmid = (*pos & GENMASK_ULL(58, 54)) >> 54; 139 139 140 - use_ring = 1; 140 + use_ring = true; 141 141 } else { 142 - use_bank = use_ring = 0; 142 + use_bank = use_ring = false; 143 143 } 144 144 145 145 *pos &= (1UL << 22) - 1;
+62 -80
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 66 66 #include "amdgpu_pmu.h" 67 67 68 68 #include <linux/suspend.h> 69 + #include <drm/task_barrier.h> 69 70 70 71 MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); 71 72 MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); ··· 1032 1031 */ 1033 1032 static int amdgpu_device_check_arguments(struct amdgpu_device *adev) 1034 1033 { 1035 - int ret = 0; 1036 - 1037 1034 if (amdgpu_sched_jobs < 4) { 1038 1035 dev_warn(adev->dev, "sched jobs (%d) must be at least 4\n", 1039 1036 amdgpu_sched_jobs); ··· 1071 1072 1072 1073 adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); 1073 1074 1074 - return ret; 1075 + return 0; 1075 1076 } 1076 1077 1077 1078 /** ··· 1809 1810 } 1810 1811 } 1811 1812 1812 - r = amdgpu_pm_load_smu_firmware(adev, &smu_version); 1813 + if (!amdgpu_sriov_vf(adev) || adev->asic_type == CHIP_TONGA) 1814 + r = amdgpu_pm_load_smu_firmware(adev, &smu_version); 1813 1815 1814 1816 return r; 1815 1817 } ··· 2439 2439 AMD_IP_BLOCK_TYPE_GFX, 2440 2440 AMD_IP_BLOCK_TYPE_SDMA, 2441 2441 AMD_IP_BLOCK_TYPE_UVD, 2442 - AMD_IP_BLOCK_TYPE_VCE 2442 + AMD_IP_BLOCK_TYPE_VCE, 2443 + AMD_IP_BLOCK_TYPE_VCN 2443 2444 }; 2444 2445 2445 2446 for (i = 0; i < ARRAY_SIZE(ip_order); i++) { ··· 2455 2454 block->status.hw) 2456 2455 continue; 2457 2456 2458 - r = block->version->funcs->hw_init(adev); 2457 + if (block->version->type == AMD_IP_BLOCK_TYPE_SMC) 2458 + r = block->version->funcs->resume(adev); 2459 + else 2460 + r = block->version->funcs->hw_init(adev); 2461 + 2459 2462 DRM_INFO("RE-INIT-late: %s %s\n", block->version->funcs->name, r?"failed":"succeeded"); 2460 2463 if (r) 2461 2464 return r; ··· 2668 2663 { 2669 2664 struct amdgpu_device *adev = 2670 2665 container_of(__work, struct amdgpu_device, xgmi_reset_work); 2666 + struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0); 2671 2667 2672 - if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) 2673 - adev->asic_reset_res = (adev->in_baco == false) ? 2674 - amdgpu_device_baco_enter(adev->ddev) : 2675 - amdgpu_device_baco_exit(adev->ddev); 2676 - else 2677 - adev->asic_reset_res = amdgpu_asic_reset(adev); 2668 + /* It's a bug to not have a hive within this function */ 2669 + if (WARN_ON(!hive)) 2670 + return; 2678 2671 2672 + /* 2673 + * Use task barrier to synchronize all xgmi reset works across the 2674 + * hive. task_barrier_enter and task_barrier_exit will block 2675 + * until all the threads running the xgmi reset works reach 2676 + * those points. task_barrier_full will do both blocks. 2677 + */ 2678 + if (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { 2679 + 2680 + task_barrier_enter(&hive->tb); 2681 + adev->asic_reset_res = amdgpu_device_baco_enter(adev->ddev); 2682 + 2683 + if (adev->asic_reset_res) 2684 + goto fail; 2685 + 2686 + task_barrier_exit(&hive->tb); 2687 + adev->asic_reset_res = amdgpu_device_baco_exit(adev->ddev); 2688 + 2689 + if (adev->asic_reset_res) 2690 + goto fail; 2691 + } else { 2692 + 2693 + task_barrier_full(&hive->tb); 2694 + adev->asic_reset_res = amdgpu_asic_reset(adev); 2695 + } 2696 + 2697 + fail: 2679 2698 if (adev->asic_reset_res) 2680 2699 DRM_WARN("ASIC reset failed with error, %d for drm dev, %s", 2681 2700 adev->asic_reset_res, adev->ddev->unique); ··· 2814 2785 adev->mman.buffer_funcs = NULL; 2815 2786 adev->mman.buffer_funcs_ring = NULL; 2816 2787 adev->vm_manager.vm_pte_funcs = NULL; 2817 - adev->vm_manager.vm_pte_num_rqs = 0; 2788 + adev->vm_manager.vm_pte_num_scheds = 0; 2818 2789 adev->gmc.gmc_funcs = NULL; 2819 2790 adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); 2820 2791 bitmap_zero(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); ··· 3057 3028 amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_AMDGPU_INIT_FAIL, 0, 0); 3058 3029 goto failed; 3059 3030 } 3031 + 3032 + DRM_DEBUG("SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n", 3033 + adev->gfx.config.max_shader_engines, 3034 + adev->gfx.config.max_sh_per_se, 3035 + adev->gfx.config.max_cu_per_sh, 3036 + adev->gfx.cu_info.number); 3037 + 3038 + amdgpu_ctx_init_sched(adev); 3060 3039 3061 3040 adev->accel_working = true; 3062 3041 ··· 3697 3660 if (r) 3698 3661 return r; 3699 3662 3700 - amdgpu_amdkfd_pre_reset(adev); 3701 - 3702 3663 /* Resume IP prior to SMC */ 3703 3664 r = amdgpu_device_ip_reinit_early_sriov(adev); 3704 3665 if (r) ··· 3825 3790 return r; 3826 3791 } 3827 3792 3828 - static int amdgpu_do_asic_reset(struct amdgpu_device *adev, 3829 - struct amdgpu_hive_info *hive, 3793 + static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, 3830 3794 struct list_head *device_list_handle, 3831 3795 bool *need_full_reset_arg) 3832 3796 { 3833 3797 struct amdgpu_device *tmp_adev = NULL; 3834 3798 bool need_full_reset = *need_full_reset_arg, vram_lost = false; 3835 3799 int r = 0; 3836 - int cpu = smp_processor_id(); 3837 - bool use_baco = 3838 - (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) ? 3839 - true : false; 3840 3800 3841 3801 /* 3842 3802 * ASIC reset has to be done on all HGMI hive nodes ASAP ··· 3839 3809 */ 3840 3810 if (need_full_reset) { 3841 3811 list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { 3842 - /* 3843 - * For XGMI run all resets in parallel to speed up the 3844 - * process by scheduling the highpri wq on different 3845 - * cpus. For XGMI with baco reset, all nodes must enter 3846 - * baco within close proximity before anyone exit. 3847 - */ 3812 + /* For XGMI run all resets in parallel to speed up the process */ 3848 3813 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { 3849 - if (!queue_work_on(cpu, system_highpri_wq, 3850 - &tmp_adev->xgmi_reset_work)) 3814 + if (!queue_work(system_unbound_wq, &tmp_adev->xgmi_reset_work)) 3851 3815 r = -EALREADY; 3852 - cpu = cpumask_next(cpu, cpu_online_mask); 3853 3816 } else 3854 3817 r = amdgpu_asic_reset(tmp_adev); 3855 - if (r) 3818 + 3819 + if (r) { 3820 + DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s", 3821 + r, tmp_adev->ddev->unique); 3856 3822 break; 3823 + } 3857 3824 } 3858 3825 3859 - /* For XGMI wait for all work to complete before proceed */ 3826 + /* For XGMI wait for all resets to complete before proceed */ 3860 3827 if (!r) { 3861 3828 list_for_each_entry(tmp_adev, device_list_handle, 3862 3829 gmc.xgmi.head) { ··· 3862 3835 r = tmp_adev->asic_reset_res; 3863 3836 if (r) 3864 3837 break; 3865 - if (use_baco) 3866 - tmp_adev->in_baco = true; 3867 3838 } 3868 3839 } 3869 - } 3870 - 3871 - /* 3872 - * For XGMI with baco reset, need exit baco phase by scheduling 3873 - * xgmi_reset_work one more time. PSP reset and sGPU skips this 3874 - * phase. Not assume the situation that PSP reset and baco reset 3875 - * coexist within an XGMI hive. 3876 - */ 3877 - 3878 - if (!r && use_baco) { 3879 - cpu = smp_processor_id(); 3880 - list_for_each_entry(tmp_adev, device_list_handle, 3881 - gmc.xgmi.head) { 3882 - if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { 3883 - if (!queue_work_on(cpu, 3884 - system_highpri_wq, 3885 - &tmp_adev->xgmi_reset_work)) 3886 - r = -EALREADY; 3887 - if (r) 3888 - break; 3889 - cpu = cpumask_next(cpu, cpu_online_mask); 3890 - } 3891 - } 3892 - } 3893 - 3894 - if (!r && use_baco) { 3895 - list_for_each_entry(tmp_adev, device_list_handle, 3896 - gmc.xgmi.head) { 3897 - if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { 3898 - flush_work(&tmp_adev->xgmi_reset_work); 3899 - r = tmp_adev->asic_reset_res; 3900 - if (r) 3901 - break; 3902 - tmp_adev->in_baco = false; 3903 - } 3904 - } 3905 - } 3906 - 3907 - if (r) { 3908 - DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s", 3909 - r, tmp_adev->ddev->unique); 3910 - goto end; 3911 3840 } 3912 3841 } 3913 3842 ··· 3957 3974 mutex_lock(&adev->lock_reset); 3958 3975 3959 3976 atomic_inc(&adev->gpu_reset_counter); 3960 - adev->in_gpu_reset = 1; 3977 + adev->in_gpu_reset = true; 3961 3978 switch (amdgpu_asic_reset_method(adev)) { 3962 3979 case AMD_RESET_METHOD_MODE1: 3963 3980 adev->mp1_state = PP_MP1_STATE_SHUTDOWN; ··· 3977 3994 { 3978 3995 amdgpu_vf_error_trans_all(adev); 3979 3996 adev->mp1_state = PP_MP1_STATE_NONE; 3980 - adev->in_gpu_reset = 0; 3997 + adev->in_gpu_reset = false; 3981 3998 mutex_unlock(&adev->lock_reset); 3982 3999 } 3983 4000 ··· 4158 4175 if (r) 4159 4176 adev->asic_reset_res = r; 4160 4177 } else { 4161 - r = amdgpu_do_asic_reset(adev, hive, device_list_handle, 4162 - &need_full_reset); 4178 + r = amdgpu_do_asic_reset(hive, device_list_handle, &need_full_reset); 4163 4179 if (r && r == -EAGAIN) 4164 4180 goto retry; 4165 4181 }
+19 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
··· 951 951 case AMD_IP_BLOCK_TYPE_VCN: 952 952 case AMD_IP_BLOCK_TYPE_VCE: 953 953 case AMD_IP_BLOCK_TYPE_SDMA: 954 + if (swsmu) { 955 + ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate); 956 + } else { 957 + if (adev->powerplay.pp_funcs && 958 + adev->powerplay.pp_funcs->set_powergating_by_smu) { 959 + mutex_lock(&adev->pm.mutex); 960 + ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( 961 + (adev)->powerplay.pp_handle, block_type, gate)); 962 + mutex_unlock(&adev->pm.mutex); 963 + } 964 + } 965 + break; 966 + case AMD_IP_BLOCK_TYPE_JPEG: 954 967 if (swsmu) 955 968 ret = smu_dpm_set_power_gate(&adev->smu, block_type, gate); 956 - else 957 - ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( 958 - (adev)->powerplay.pp_handle, block_type, gate)); 959 969 break; 960 970 case AMD_IP_BLOCK_TYPE_GMC: 961 971 case AMD_IP_BLOCK_TYPE_ACP: 962 - ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( 972 + if (adev->powerplay.pp_funcs && 973 + adev->powerplay.pp_funcs->set_powergating_by_smu) { 974 + mutex_lock(&adev->pm.mutex); 975 + ret = ((adev)->powerplay.pp_funcs->set_powergating_by_smu( 963 976 (adev)->powerplay.pp_handle, block_type, gate)); 977 + mutex_unlock(&adev->pm.mutex); 978 + } 964 979 break; 965 980 default: 966 981 break;
+15 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 142 142 int amdgpu_mcbp = 0; 143 143 int amdgpu_discovery = -1; 144 144 int amdgpu_mes = 0; 145 - int amdgpu_noretry = 1; 145 + int amdgpu_noretry; 146 146 int amdgpu_force_asic_type = -1; 147 147 148 148 struct amdgpu_mgpu_info mgpu_info = { ··· 588 588 module_param_named(mes, amdgpu_mes, int, 0444); 589 589 590 590 MODULE_PARM_DESC(noretry, 591 - "Disable retry faults (0 = retry enabled, 1 = retry disabled (default))"); 591 + "Disable retry faults (0 = retry enabled (default), 1 = retry disabled)"); 592 592 module_param_named(noretry, amdgpu_noretry, int, 0644); 593 593 594 594 /** ··· 1203 1203 struct pci_dev *pdev = to_pci_dev(dev); 1204 1204 struct drm_device *drm_dev = pci_get_drvdata(pdev); 1205 1205 struct amdgpu_device *adev = drm_dev->dev_private; 1206 - int ret; 1206 + int ret, i; 1207 1207 1208 1208 if (!adev->runpm) { 1209 1209 pm_runtime_forbid(dev); 1210 1210 return -EBUSY; 1211 + } 1212 + 1213 + /* wait for all rings to drain before suspending */ 1214 + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 1215 + struct amdgpu_ring *ring = adev->rings[i]; 1216 + if (ring && ring->sched.ready) { 1217 + ret = amdgpu_fence_wait_empty(ring); 1218 + if (ret) 1219 + return -EBUSY; 1220 + } 1211 1221 } 1212 1222 1213 1223 if (amdgpu_device_supports_boco(drm_dev)) ··· 1391 1381 .driver_features = 1392 1382 DRIVER_USE_AGP | DRIVER_ATOMIC | 1393 1383 DRIVER_GEM | 1394 - DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ, 1384 + DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ | 1385 + DRIVER_SYNCOBJ_TIMELINE, 1395 1386 .load = amdgpu_driver_load_kms, 1396 1387 .open = amdgpu_driver_open_kms, 1397 1388 .postclose = amdgpu_driver_postclose_kms,
+5 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
··· 34 34 #include <linux/kref.h> 35 35 #include <linux/slab.h> 36 36 #include <linux/firmware.h> 37 + #include <linux/pm_runtime.h> 37 38 38 39 #include <drm/drm_debugfs.h> 39 40 ··· 155 154 seq); 156 155 amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr, 157 156 seq, flags | AMDGPU_FENCE_FLAG_INT); 158 - 157 + pm_runtime_get_noresume(adev->ddev->dev); 159 158 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; 160 159 if (unlikely(rcu_dereference_protected(*ptr, 1))) { 161 160 struct dma_fence *old; ··· 235 234 bool amdgpu_fence_process(struct amdgpu_ring *ring) 236 235 { 237 236 struct amdgpu_fence_driver *drv = &ring->fence_drv; 237 + struct amdgpu_device *adev = ring->adev; 238 238 uint32_t seq, last_seq; 239 239 int r; 240 240 ··· 276 274 BUG(); 277 275 278 276 dma_fence_put(fence); 277 + pm_runtime_mark_last_busy(adev->ddev->dev); 278 + pm_runtime_put_autosuspend(adev->ddev->dev); 279 279 } while (last_seq != seq); 280 280 281 281 return true;
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
··· 641 641 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); 642 642 if (adev->gfx.funcs->query_ras_error_count) 643 643 adev->gfx.funcs->query_ras_error_count(adev, err_data); 644 - amdgpu_ras_reset_gpu(adev, 0); 644 + amdgpu_ras_reset_gpu(adev); 645 645 } 646 646 return AMDGPU_RAS_SUCCESS; 647 647 }
+4
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
··· 269 269 bool me_fw_write_wait; 270 270 bool cp_fw_write_wait; 271 271 struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; 272 + struct drm_gpu_scheduler *gfx_sched[AMDGPU_MAX_GFX_RINGS]; 273 + uint32_t num_gfx_sched; 272 274 unsigned num_gfx_rings; 273 275 struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; 276 + struct drm_gpu_scheduler *compute_sched[AMDGPU_MAX_COMPUTE_RINGS]; 277 + uint32_t num_compute_sched; 274 278 unsigned num_compute_rings; 275 279 struct amdgpu_irq_src eop_irq; 276 280 struct amdgpu_irq_src priv_reg_irq;
+41 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
··· 223 223 u64 size_af, size_bf; 224 224 225 225 if (amdgpu_sriov_vf(adev)) { 226 - mc->agp_start = 0xffffffff; 226 + mc->agp_start = 0xffffffffffff; 227 227 mc->agp_end = 0x0; 228 228 mc->agp_size = 0; 229 229 ··· 332 332 amdgpu_umc_ras_fini(adev); 333 333 amdgpu_mmhub_ras_fini(adev); 334 334 amdgpu_xgmi_ras_fini(adev); 335 + } 336 + 337 + /* 338 + * The latest engine allocation on gfx9/10 is: 339 + * Engine 2, 3: firmware 340 + * Engine 0, 1, 4~16: amdgpu ring, 341 + * subject to change when ring number changes 342 + * Engine 17: Gart flushes 343 + */ 344 + #define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 345 + #define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 346 + 347 + int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev) 348 + { 349 + struct amdgpu_ring *ring; 350 + unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] = 351 + {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP, 352 + GFXHUB_FREE_VM_INV_ENGS_BITMAP}; 353 + unsigned i; 354 + unsigned vmhub, inv_eng; 355 + 356 + for (i = 0; i < adev->num_rings; ++i) { 357 + ring = adev->rings[i]; 358 + vmhub = ring->funcs->vmhub; 359 + 360 + inv_eng = ffs(vm_inv_engs[vmhub]); 361 + if (!inv_eng) { 362 + dev_err(adev->dev, "no VM inv eng for ring %s\n", 363 + ring->name); 364 + return -EINVAL; 365 + } 366 + 367 + ring->vm_inv_eng = inv_eng - 1; 368 + vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng); 369 + 370 + dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", 371 + ring->name, ring->vm_inv_eng, ring->funcs->vmhub); 372 + } 373 + 374 + return 0; 335 375 }
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
··· 267 267 uint16_t pasid, uint64_t timestamp); 268 268 int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev); 269 269 void amdgpu_gmc_ras_fini(struct amdgpu_device *adev); 270 + int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev); 270 271 271 272 #endif
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
··· 43 43 uint8_t num_jpeg_inst; 44 44 struct amdgpu_jpeg_inst inst[AMDGPU_MAX_JPEG_INSTANCES]; 45 45 struct amdgpu_jpeg_reg internal; 46 + struct drm_gpu_scheduler *jpeg_sched[AMDGPU_MAX_JPEG_INSTANCES]; 47 + uint32_t num_jpeg_sched; 46 48 unsigned harvest_config; 47 49 struct delayed_work idle_work; 48 50 enum amd_powergating_state cur_state;
+15 -28
drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
··· 2762 2762 void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 2763 2763 { 2764 2764 int ret = 0; 2765 - if (is_support_sw_smu(adev)) { 2766 - ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_UVD, enable); 2767 - if (ret) 2768 - DRM_ERROR("[SW SMU]: dpm enable uvd failed, state = %s, ret = %d. \n", 2769 - enable ? "true" : "false", ret); 2770 - } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) { 2771 - /* enable/disable UVD */ 2772 - mutex_lock(&adev->pm.mutex); 2773 - amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 2774 - mutex_unlock(&adev->pm.mutex); 2775 - } 2765 + 2766 + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 2767 + if (ret) 2768 + DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 2769 + enable ? "enable" : "disable", ret); 2770 + 2776 2771 /* enable/disable Low Memory PState for UVD (4k videos) */ 2777 2772 if (adev->asic_type == CHIP_STONEY && 2778 2773 adev->uvd.decode_image_width >= WIDTH_4K) { ··· 2784 2789 void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 2785 2790 { 2786 2791 int ret = 0; 2787 - if (is_support_sw_smu(adev)) { 2788 - ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_VCE, enable); 2789 - if (ret) 2790 - DRM_ERROR("[SW SMU]: dpm enable vce failed, state = %s, ret = %d. \n", 2791 - enable ? "true" : "false", ret); 2792 - } else if (adev->powerplay.pp_funcs->set_powergating_by_smu) { 2793 - /* enable/disable VCE */ 2794 - mutex_lock(&adev->pm.mutex); 2795 - amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 2796 - mutex_unlock(&adev->pm.mutex); 2797 - } 2792 + 2793 + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 2794 + if (ret) 2795 + DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 2796 + enable ? "enable" : "disable", ret); 2798 2797 } 2799 2798 2800 2799 void amdgpu_pm_print_power_states(struct amdgpu_device *adev) ··· 2807 2818 { 2808 2819 int ret = 0; 2809 2820 2810 - if (is_support_sw_smu(adev)) { 2811 - ret = smu_dpm_set_power_gate(&adev->smu, AMD_IP_BLOCK_TYPE_JPEG, enable); 2812 - if (ret) 2813 - DRM_ERROR("[SW SMU]: dpm enable jpeg failed, state = %s, ret = %d. \n", 2814 - enable ? "true" : "false", ret); 2815 - } 2821 + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable); 2822 + if (ret) 2823 + DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 2824 + enable ? "enable" : "disable", ret); 2816 2825 } 2817 2826 2818 2827 int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
+4 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
··· 107 107 default: 108 108 count = 0; 109 109 break; 110 - }; 110 + } 111 111 } while (local64_cmpxchg(&hwc->prev_count, prev, count) != prev); 112 112 113 113 local64_add(count - prev, &event->count); ··· 130 130 break; 131 131 default: 132 132 break; 133 - }; 133 + } 134 134 135 135 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); 136 136 hwc->state |= PERF_HES_STOPPED; ··· 160 160 break; 161 161 default: 162 162 return 0; 163 - }; 163 + } 164 164 165 165 if (retval) 166 166 return retval; ··· 188 188 break; 189 189 default: 190 190 break; 191 - }; 191 + } 192 192 193 193 perf_event_update_userpage(event); 194 194 }
+94 -231
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
··· 191 191 if (ucode) 192 192 DRM_WARN("failed to load ucode id (%d) ", 193 193 ucode->ucode_id); 194 - DRM_DEBUG_DRIVER("psp command (0x%X) failed and response status is (0x%X)\n", 194 + DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n", 195 195 psp->cmd_buf_mem->cmd_id, 196 - psp->cmd_buf_mem->resp.status & GFX_CMD_STATUS_MASK); 196 + psp->cmd_buf_mem->resp.status); 197 197 if (!timeout) { 198 198 mutex_unlock(&psp->mutex); 199 199 return -EINVAL; ··· 365 365 return ret; 366 366 } 367 367 368 - static void psp_prep_asd_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 369 - uint32_t asd_session_id) 368 + static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 369 + uint32_t session_id) 370 370 { 371 371 cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 372 - cmd->cmd.cmd_unload_ta.session_id = asd_session_id; 372 + cmd->cmd.cmd_unload_ta.session_id = session_id; 373 373 } 374 374 375 375 static int psp_asd_unload(struct psp_context *psp) ··· 387 387 if (!cmd) 388 388 return -ENOMEM; 389 389 390 - psp_prep_asd_unload_cmd_buf(cmd, psp->asd_context.session_id); 390 + psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id); 391 391 392 392 ret = psp_cmd_submit_buf(psp, NULL, cmd, 393 393 psp->fence_buf_mc_addr); ··· 427 427 return ret; 428 428 } 429 429 430 - static void psp_prep_xgmi_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 431 - uint64_t xgmi_ta_mc, uint64_t xgmi_mc_shared, 432 - uint32_t xgmi_ta_size, uint32_t shared_size) 430 + static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 431 + uint64_t ta_bin_mc, 432 + uint32_t ta_bin_size, 433 + uint64_t ta_shared_mc, 434 + uint32_t ta_shared_size) 433 435 { 434 - cmd->cmd_id = GFX_CMD_ID_LOAD_TA; 435 - cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(xgmi_ta_mc); 436 - cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(xgmi_ta_mc); 437 - cmd->cmd.cmd_load_ta.app_len = xgmi_ta_size; 436 + cmd->cmd_id = GFX_CMD_ID_LOAD_TA; 437 + cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc); 438 + cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc); 439 + cmd->cmd.cmd_load_ta.app_len = ta_bin_size; 438 440 439 - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(xgmi_mc_shared); 440 - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(xgmi_mc_shared); 441 - cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size; 441 + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc); 442 + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc); 443 + cmd->cmd.cmd_load_ta.cmd_buf_len = ta_shared_size; 442 444 } 443 445 444 446 static int psp_xgmi_init_shared_buf(struct psp_context *psp) ··· 460 458 return ret; 461 459 } 462 460 461 + static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 462 + uint32_t ta_cmd_id, 463 + uint32_t session_id) 464 + { 465 + cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 466 + cmd->cmd.cmd_invoke_cmd.session_id = session_id; 467 + cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 468 + } 469 + 470 + int psp_ta_invoke(struct psp_context *psp, 471 + uint32_t ta_cmd_id, 472 + uint32_t session_id) 473 + { 474 + int ret; 475 + struct psp_gfx_cmd_resp *cmd; 476 + 477 + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 478 + if (!cmd) 479 + return -ENOMEM; 480 + 481 + psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id); 482 + 483 + ret = psp_cmd_submit_buf(psp, NULL, cmd, 484 + psp->fence_buf_mc_addr); 485 + 486 + kfree(cmd); 487 + 488 + return ret; 489 + } 490 + 463 491 static int psp_xgmi_load(struct psp_context *psp) 464 492 { 465 493 int ret; ··· 498 466 /* 499 467 * TODO: bypass the loading in sriov for now 500 468 */ 501 - if (amdgpu_sriov_vf(psp->adev)) 502 - return 0; 503 469 504 470 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 505 471 if (!cmd) ··· 506 476 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 507 477 memcpy(psp->fw_pri_buf, psp->ta_xgmi_start_addr, psp->ta_xgmi_ucode_size); 508 478 509 - psp_prep_xgmi_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, 510 - psp->xgmi_context.xgmi_shared_mc_addr, 511 - psp->ta_xgmi_ucode_size, PSP_XGMI_SHARED_MEM_SIZE); 479 + psp_prep_ta_load_cmd_buf(cmd, 480 + psp->fw_pri_mc_addr, 481 + psp->ta_xgmi_ucode_size, 482 + psp->xgmi_context.xgmi_shared_mc_addr, 483 + PSP_XGMI_SHARED_MEM_SIZE); 512 484 513 485 ret = psp_cmd_submit_buf(psp, NULL, cmd, 514 486 psp->fence_buf_mc_addr); ··· 525 493 return ret; 526 494 } 527 495 528 - static void psp_prep_xgmi_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 529 - uint32_t xgmi_session_id) 530 - { 531 - cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 532 - cmd->cmd.cmd_unload_ta.session_id = xgmi_session_id; 533 - } 534 - 535 496 static int psp_xgmi_unload(struct psp_context *psp) 536 497 { 537 498 int ret; ··· 533 508 /* 534 509 * TODO: bypass the unloading in sriov for now 535 510 */ 536 - if (amdgpu_sriov_vf(psp->adev)) 537 - return 0; 538 511 539 512 cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 540 513 if (!cmd) 541 514 return -ENOMEM; 542 515 543 - psp_prep_xgmi_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id); 516 + psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.session_id); 544 517 545 518 ret = psp_cmd_submit_buf(psp, NULL, cmd, 546 519 psp->fence_buf_mc_addr); ··· 548 525 return ret; 549 526 } 550 527 551 - static void psp_prep_xgmi_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 552 - uint32_t ta_cmd_id, 553 - uint32_t xgmi_session_id) 554 - { 555 - cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 556 - cmd->cmd.cmd_invoke_cmd.session_id = xgmi_session_id; 557 - cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 558 - /* Note: cmd_invoke_cmd.buf is not used for now */ 559 - } 560 - 561 528 int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 562 529 { 563 - int ret; 564 - struct psp_gfx_cmd_resp *cmd; 565 - 566 - /* 567 - * TODO: bypass the loading in sriov for now 568 - */ 569 - if (amdgpu_sriov_vf(psp->adev)) 570 - return 0; 571 - 572 - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 573 - if (!cmd) 574 - return -ENOMEM; 575 - 576 - psp_prep_xgmi_ta_invoke_cmd_buf(cmd, ta_cmd_id, 577 - psp->xgmi_context.session_id); 578 - 579 - ret = psp_cmd_submit_buf(psp, NULL, cmd, 580 - psp->fence_buf_mc_addr); 581 - 582 - kfree(cmd); 583 - 584 - return ret; 530 + return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id); 585 531 } 586 532 587 533 static int psp_xgmi_terminate(struct psp_context *psp) ··· 606 614 } 607 615 608 616 // ras begin 609 - static void psp_prep_ras_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 610 - uint64_t ras_ta_mc, uint64_t ras_mc_shared, 611 - uint32_t ras_ta_size, uint32_t shared_size) 612 - { 613 - cmd->cmd_id = GFX_CMD_ID_LOAD_TA; 614 - cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ras_ta_mc); 615 - cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ras_ta_mc); 616 - cmd->cmd.cmd_load_ta.app_len = ras_ta_size; 617 - 618 - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ras_mc_shared); 619 - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ras_mc_shared); 620 - cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size; 621 - } 622 - 623 617 static int psp_ras_init_shared_buf(struct psp_context *psp) 624 618 { 625 619 int ret; ··· 641 663 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 642 664 memcpy(psp->fw_pri_buf, psp->ta_ras_start_addr, psp->ta_ras_ucode_size); 643 665 644 - psp_prep_ras_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, 645 - psp->ras.ras_shared_mc_addr, 646 - psp->ta_ras_ucode_size, PSP_RAS_SHARED_MEM_SIZE); 666 + psp_prep_ta_load_cmd_buf(cmd, 667 + psp->fw_pri_mc_addr, 668 + psp->ta_ras_ucode_size, 669 + psp->ras.ras_shared_mc_addr, 670 + PSP_RAS_SHARED_MEM_SIZE); 647 671 648 672 ret = psp_cmd_submit_buf(psp, NULL, cmd, 649 673 psp->fence_buf_mc_addr); 650 674 651 675 if (!ret) { 652 - psp->ras.ras_initialized = 1; 676 + psp->ras.ras_initialized = true; 653 677 psp->ras.session_id = cmd->resp.session_id; 654 678 } 655 679 656 680 kfree(cmd); 657 681 658 682 return ret; 659 - } 660 - 661 - static void psp_prep_ras_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 662 - uint32_t ras_session_id) 663 - { 664 - cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 665 - cmd->cmd.cmd_unload_ta.session_id = ras_session_id; 666 683 } 667 684 668 685 static int psp_ras_unload(struct psp_context *psp) ··· 675 702 if (!cmd) 676 703 return -ENOMEM; 677 704 678 - psp_prep_ras_ta_unload_cmd_buf(cmd, psp->ras.session_id); 705 + psp_prep_ta_unload_cmd_buf(cmd, psp->ras.session_id); 679 706 680 707 ret = psp_cmd_submit_buf(psp, NULL, cmd, 681 708 psp->fence_buf_mc_addr); ··· 685 712 return ret; 686 713 } 687 714 688 - static void psp_prep_ras_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 689 - uint32_t ta_cmd_id, 690 - uint32_t ras_session_id) 691 - { 692 - cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 693 - cmd->cmd.cmd_invoke_cmd.session_id = ras_session_id; 694 - cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 695 - /* Note: cmd_invoke_cmd.buf is not used for now */ 696 - } 697 - 698 715 int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 699 716 { 700 - int ret; 701 - struct psp_gfx_cmd_resp *cmd; 702 - 703 717 /* 704 718 * TODO: bypass the loading in sriov for now 705 719 */ 706 720 if (amdgpu_sriov_vf(psp->adev)) 707 721 return 0; 708 722 709 - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 710 - if (!cmd) 711 - return -ENOMEM; 712 - 713 - psp_prep_ras_ta_invoke_cmd_buf(cmd, ta_cmd_id, 714 - psp->ras.session_id); 715 - 716 - ret = psp_cmd_submit_buf(psp, NULL, cmd, 717 - psp->fence_buf_mc_addr); 718 - 719 - kfree(cmd); 720 - 721 - return ret; 723 + return psp_ta_invoke(psp, ta_cmd_id, psp->ras.session_id); 722 724 } 723 725 724 726 int psp_ras_enable_features(struct psp_context *psp, ··· 739 791 if (ret) 740 792 return ret; 741 793 742 - psp->ras.ras_initialized = 0; 794 + psp->ras.ras_initialized = false; 743 795 744 796 /* free ras shared memory */ 745 797 amdgpu_bo_free_kernel(&psp->ras.ras_shared_bo, ··· 780 832 // ras end 781 833 782 834 // HDCP start 783 - static void psp_prep_hdcp_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 784 - uint64_t hdcp_ta_mc, 785 - uint64_t hdcp_mc_shared, 786 - uint32_t hdcp_ta_size, 787 - uint32_t shared_size) 788 - { 789 - cmd->cmd_id = GFX_CMD_ID_LOAD_TA; 790 - cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(hdcp_ta_mc); 791 - cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(hdcp_ta_mc); 792 - cmd->cmd.cmd_load_ta.app_len = hdcp_ta_size; 793 - 794 - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 795 - lower_32_bits(hdcp_mc_shared); 796 - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 797 - upper_32_bits(hdcp_mc_shared); 798 - cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size; 799 - } 800 - 801 835 static int psp_hdcp_init_shared_buf(struct psp_context *psp) 802 836 { 803 837 int ret; ··· 816 886 memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr, 817 887 psp->ta_hdcp_ucode_size); 818 888 819 - psp_prep_hdcp_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, 820 - psp->hdcp_context.hdcp_shared_mc_addr, 821 - psp->ta_hdcp_ucode_size, 822 - PSP_HDCP_SHARED_MEM_SIZE); 889 + psp_prep_ta_load_cmd_buf(cmd, 890 + psp->fw_pri_mc_addr, 891 + psp->ta_hdcp_ucode_size, 892 + psp->hdcp_context.hdcp_shared_mc_addr, 893 + PSP_HDCP_SHARED_MEM_SIZE); 823 894 824 895 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 825 896 826 897 if (!ret) { 827 - psp->hdcp_context.hdcp_initialized = 1; 898 + psp->hdcp_context.hdcp_initialized = true; 828 899 psp->hdcp_context.session_id = cmd->resp.session_id; 829 900 } 830 901 ··· 861 930 862 931 return 0; 863 932 } 864 - static void psp_prep_hdcp_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, 865 - uint32_t hdcp_session_id) 866 - { 867 - cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; 868 - cmd->cmd.cmd_unload_ta.session_id = hdcp_session_id; 869 - } 870 933 871 934 static int psp_hdcp_unload(struct psp_context *psp) 872 935 { ··· 877 952 if (!cmd) 878 953 return -ENOMEM; 879 954 880 - psp_prep_hdcp_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id); 955 + psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id); 881 956 882 957 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 883 958 ··· 886 961 return ret; 887 962 } 888 963 889 - static void psp_prep_hdcp_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 890 - uint32_t ta_cmd_id, 891 - uint32_t hdcp_session_id) 892 - { 893 - cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 894 - cmd->cmd.cmd_invoke_cmd.session_id = hdcp_session_id; 895 - cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 896 - /* Note: cmd_invoke_cmd.buf is not used for now */ 897 - } 898 - 899 964 int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 900 965 { 901 - int ret; 902 - struct psp_gfx_cmd_resp *cmd; 903 - 904 966 /* 905 967 * TODO: bypass the loading in sriov for now 906 968 */ 907 969 if (amdgpu_sriov_vf(psp->adev)) 908 970 return 0; 909 971 910 - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 911 - if (!cmd) 912 - return -ENOMEM; 913 - 914 - psp_prep_hdcp_ta_invoke_cmd_buf(cmd, ta_cmd_id, 915 - psp->hdcp_context.session_id); 916 - 917 - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 918 - 919 - kfree(cmd); 920 - 921 - return ret; 972 + return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.session_id); 922 973 } 923 974 924 975 static int psp_hdcp_terminate(struct psp_context *psp) ··· 914 1013 if (ret) 915 1014 return ret; 916 1015 917 - psp->hdcp_context.hdcp_initialized = 0; 1016 + psp->hdcp_context.hdcp_initialized = false; 918 1017 919 1018 /* free hdcp shared memory */ 920 1019 amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo, ··· 926 1025 // HDCP end 927 1026 928 1027 // DTM start 929 - static void psp_prep_dtm_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, 930 - uint64_t dtm_ta_mc, 931 - uint64_t dtm_mc_shared, 932 - uint32_t dtm_ta_size, 933 - uint32_t shared_size) 934 - { 935 - cmd->cmd_id = GFX_CMD_ID_LOAD_TA; 936 - cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(dtm_ta_mc); 937 - cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(dtm_ta_mc); 938 - cmd->cmd.cmd_load_ta.app_len = dtm_ta_size; 939 - 940 - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(dtm_mc_shared); 941 - cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(dtm_mc_shared); 942 - cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size; 943 - } 944 - 945 1028 static int psp_dtm_init_shared_buf(struct psp_context *psp) 946 1029 { 947 1030 int ret; ··· 961 1076 memset(psp->fw_pri_buf, 0, PSP_1_MEG); 962 1077 memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size); 963 1078 964 - psp_prep_dtm_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, 965 - psp->dtm_context.dtm_shared_mc_addr, 966 - psp->ta_dtm_ucode_size, 967 - PSP_DTM_SHARED_MEM_SIZE); 1079 + psp_prep_ta_load_cmd_buf(cmd, 1080 + psp->fw_pri_mc_addr, 1081 + psp->ta_dtm_ucode_size, 1082 + psp->dtm_context.dtm_shared_mc_addr, 1083 + PSP_DTM_SHARED_MEM_SIZE); 968 1084 969 1085 ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 970 1086 971 1087 if (!ret) { 972 - psp->dtm_context.dtm_initialized = 1; 1088 + psp->dtm_context.dtm_initialized = true; 973 1089 psp->dtm_context.session_id = cmd->resp.session_id; 974 1090 } 975 1091 ··· 1008 1122 return 0; 1009 1123 } 1010 1124 1011 - static void psp_prep_dtm_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, 1012 - uint32_t ta_cmd_id, 1013 - uint32_t dtm_session_id) 1014 - { 1015 - cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; 1016 - cmd->cmd.cmd_invoke_cmd.session_id = dtm_session_id; 1017 - cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; 1018 - /* Note: cmd_invoke_cmd.buf is not used for now */ 1019 - } 1020 - 1021 1125 int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) 1022 1126 { 1023 - int ret; 1024 - struct psp_gfx_cmd_resp *cmd; 1025 - 1026 1127 /* 1027 1128 * TODO: bypass the loading in sriov for now 1028 1129 */ 1029 1130 if (amdgpu_sriov_vf(psp->adev)) 1030 1131 return 0; 1031 1132 1032 - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); 1033 - if (!cmd) 1034 - return -ENOMEM; 1035 - 1036 - psp_prep_dtm_ta_invoke_cmd_buf(cmd, ta_cmd_id, 1037 - psp->dtm_context.session_id); 1038 - 1039 - ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); 1040 - 1041 - kfree(cmd); 1042 - 1043 - return ret; 1133 + return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.session_id); 1044 1134 } 1045 1135 1046 1136 static int psp_dtm_terminate(struct psp_context *psp) ··· 1036 1174 if (ret) 1037 1175 return ret; 1038 1176 1039 - psp->dtm_context.dtm_initialized = 0; 1177 + psp->dtm_context.dtm_initialized = false; 1040 1178 1041 1179 /* free hdcp shared memory */ 1042 1180 amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo, ··· 1171 1309 break; 1172 1310 case AMDGPU_UCODE_ID_VCN: 1173 1311 *type = GFX_FW_TYPE_VCN; 1312 + break; 1313 + case AMDGPU_UCODE_ID_VCN1: 1314 + *type = GFX_FW_TYPE_VCN1; 1174 1315 break; 1175 1316 case AMDGPU_UCODE_ID_DMCU_ERAM: 1176 1317 *type = GFX_FW_TYPE_DMCU_ERAM; ··· 1319 1454 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G 1320 1455 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL 1321 1456 || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM 1322 - || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) 1457 + || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM 1458 + || ucode->ucode_id == AMDGPU_UCODE_ID_SMC)) 1323 1459 /*skip ucode loading in SRIOV VF */ 1324 1460 continue; 1325 1461 ··· 1338 1472 1339 1473 /* Start rlc autoload after psp recieved all the gfx firmware */ 1340 1474 if (psp->autoload_supported && ucode->ucode_id == (amdgpu_sriov_vf(adev) ? 1341 - AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) { 1475 + AMDGPU_UCODE_ID_CP_MEC2 : AMDGPU_UCODE_ID_RLC_G)) { 1342 1476 ret = psp_rlc_autoload(psp); 1343 1477 if (ret) { 1344 1478 DRM_ERROR("Failed to start rlc autoload\n"); ··· 1369 1503 if (!psp->cmd) 1370 1504 return -ENOMEM; 1371 1505 1372 - /* this fw pri bo is not used under SRIOV */ 1373 - if (!amdgpu_sriov_vf(psp->adev)) { 1374 - ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 1375 - AMDGPU_GEM_DOMAIN_GTT, 1376 - &psp->fw_pri_bo, 1377 - &psp->fw_pri_mc_addr, 1378 - &psp->fw_pri_buf); 1379 - if (ret) 1380 - goto failed; 1381 - } 1506 + ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 1507 + AMDGPU_GEM_DOMAIN_GTT, 1508 + &psp->fw_pri_bo, 1509 + &psp->fw_pri_mc_addr, 1510 + &psp->fw_pri_buf); 1511 + if (ret) 1512 + goto failed; 1382 1513 1383 1514 ret = amdgpu_bo_create_kernel(adev, PSP_FENCE_BUFFER_SIZE, PAGE_SIZE, 1384 1515 AMDGPU_GEM_DOMAIN_VRAM,
-1
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
··· 202 202 203 203 /*vram offset of the p2c training data*/ 204 204 u64 p2c_train_data_offset; 205 - struct amdgpu_bo *p2c_bo; 206 205 207 206 /*vram offset of the c2p training data*/ 208 207 u64 c2p_train_data_offset;
+4 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
··· 315 315 default: 316 316 ret = -EINVAL; 317 317 break; 318 - }; 318 + } 319 319 320 320 if (ret) 321 321 return -EINVAL; ··· 1311 1311 data = con->eh_data; 1312 1312 if (!data || data->count == 0) { 1313 1313 *bps = NULL; 1314 + ret = -EINVAL; 1314 1315 goto out; 1315 1316 } 1316 1317 ··· 1871 1870 * See feature_enable_on_boot 1872 1871 */ 1873 1872 amdgpu_ras_disable_all_features(adev, 1); 1874 - amdgpu_ras_reset_gpu(adev, 0); 1873 + amdgpu_ras_reset_gpu(adev); 1875 1874 } 1876 1875 } 1877 1876 ··· 1934 1933 if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { 1935 1934 DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n"); 1936 1935 1937 - amdgpu_ras_reset_gpu(adev, false); 1936 + amdgpu_ras_reset_gpu(adev); 1938 1937 } 1939 1938 }
+1 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
··· 494 494 495 495 int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev); 496 496 497 - static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev, 498 - bool is_baco) 497 + static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) 499 498 { 500 499 struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); 501 500
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
··· 160 160 struct amdgpu_iv_entry *entry) 161 161 { 162 162 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); 163 - amdgpu_ras_reset_gpu(adev, 0); 163 + amdgpu_ras_reset_gpu(adev); 164 164 165 165 return AMDGPU_RAS_SUCCESS; 166 166 }
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
··· 52 52 53 53 struct amdgpu_sdma { 54 54 struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES]; 55 + struct drm_gpu_scheduler *sdma_sched[AMDGPU_MAX_SDMA_INSTANCES]; 56 + uint32_t num_sdma_sched; 55 57 struct amdgpu_irq_src trap_irq; 56 58 struct amdgpu_irq_src illegal_inst_irq; 57 59 struct amdgpu_irq_src ecc_irq;
+16 -23
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 1714 1714 amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL); 1715 1715 ctx->c2p_bo = NULL; 1716 1716 1717 - amdgpu_bo_free_kernel(&ctx->p2c_bo, NULL, NULL); 1718 - ctx->p2c_bo = NULL; 1719 - 1720 1717 return 0; 1718 + } 1719 + 1720 + static u64 amdgpu_ttm_training_get_c2p_offset(u64 vram_size) 1721 + { 1722 + if ((vram_size & (SZ_1M - 1)) < (SZ_4K + 1) ) 1723 + vram_size -= SZ_1M; 1724 + 1725 + return ALIGN(vram_size, SZ_1M); 1721 1726 } 1722 1727 1723 1728 /** ··· 1743 1738 return 0; 1744 1739 } 1745 1740 1746 - ctx->c2p_train_data_offset = adev->fw_vram_usage.mem_train_fb_loc; 1741 + ctx->c2p_train_data_offset = amdgpu_ttm_training_get_c2p_offset(adev->gmc.mc_vram_size); 1747 1742 ctx->p2c_train_data_offset = (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET); 1748 1743 ctx->train_data_size = GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES; 1749 1744 ··· 1753 1748 ctx->c2p_train_data_offset); 1754 1749 1755 1750 ret = amdgpu_bo_create_kernel_at(adev, 1756 - ctx->p2c_train_data_offset, 1757 - ctx->train_data_size, 1758 - AMDGPU_GEM_DOMAIN_VRAM, 1759 - &ctx->p2c_bo, 1760 - NULL); 1761 - if (ret) { 1762 - DRM_ERROR("alloc p2c_bo failed(%d)!\n", ret); 1763 - goto Err_out; 1764 - } 1765 - 1766 - ret = amdgpu_bo_create_kernel_at(adev, 1767 1751 ctx->c2p_train_data_offset, 1768 1752 ctx->train_data_size, 1769 1753 AMDGPU_GEM_DOMAIN_VRAM, ··· 1760 1766 NULL); 1761 1767 if (ret) { 1762 1768 DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret); 1763 - goto Err_out; 1769 + amdgpu_ttm_training_reserve_vram_fini(adev); 1770 + return ret; 1764 1771 } 1765 1772 1766 1773 ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS; 1767 1774 return 0; 1768 - 1769 - Err_out: 1770 - amdgpu_ttm_training_reserve_vram_fini(adev); 1771 - return ret; 1772 1775 } 1773 1776 1774 1777 /** ··· 1978 1987 1979 1988 if (enable) { 1980 1989 struct amdgpu_ring *ring; 1981 - struct drm_sched_rq *rq; 1990 + struct drm_gpu_scheduler *sched; 1982 1991 1983 1992 ring = adev->mman.buffer_funcs_ring; 1984 - rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; 1985 - r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL); 1993 + sched = &ring->sched; 1994 + r = drm_sched_entity_init(&adev->mman.entity, 1995 + DRM_SCHED_PRIORITY_KERNEL, &sched, 1996 + 1, NULL); 1986 1997 if (r) { 1987 1998 DRM_ERROR("Failed setting up TTM BO move entity (%d)\n", 1988 1999 r);
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
··· 300 300 AMDGPU_UCODE_ID_CP_MEC2_JT, 301 301 AMDGPU_UCODE_ID_CP_MES, 302 302 AMDGPU_UCODE_ID_CP_MES_DATA, 303 - AMDGPU_UCODE_ID_RLC_G, 304 303 AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL, 305 304 AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM, 306 305 AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM, 306 + AMDGPU_UCODE_ID_RLC_G, 307 307 AMDGPU_UCODE_ID_STORAGE, 308 308 AMDGPU_UCODE_ID_SMC, 309 309 AMDGPU_UCODE_ID_UVD,
+2 -8
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
··· 95 95 { 96 96 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; 97 97 98 - /* When “Full RAS” is enabled, the per-IP interrupt sources should 99 - * be disabled and the driver should only look for the aggregated 100 - * interrupt via sync flood 101 - */ 102 - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) 103 - return AMDGPU_RAS_SUCCESS; 104 - 105 98 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); 106 99 if (adev->umc.funcs && 107 100 adev->umc.funcs->query_ras_error_count) ··· 106 113 err_data->err_addr = 107 114 kcalloc(adev->umc.max_ras_err_cnt_per_query, 108 115 sizeof(struct eeprom_table_record), GFP_KERNEL); 116 + 109 117 /* still call query_ras_error_address to clear error status 110 118 * even NOMEM error is encountered 111 119 */ ··· 126 132 err_data->err_addr_cnt)) 127 133 DRM_WARN("Failed to add ras bad page!\n"); 128 134 129 - amdgpu_ras_reset_gpu(adev, 0); 135 + amdgpu_ras_reset_gpu(adev); 130 136 } 131 137 132 138 kfree(err_data->err_addr);
-35
drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
··· 21 21 #ifndef __AMDGPU_UMC_H__ 22 22 #define __AMDGPU_UMC_H__ 23 23 24 - /* implement 64 bits REG operations via 32 bits interface */ 25 - #define RREG64_UMC(reg) (RREG32(reg) | \ 26 - ((uint64_t)RREG32((reg) + 1) << 32)) 27 - #define WREG64_UMC(reg, v) \ 28 - do { \ 29 - WREG32((reg), lower_32_bits(v)); \ 30 - WREG32((reg) + 1, upper_32_bits(v)); \ 31 - } while (0) 32 - 33 - /* 34 - * void (*func)(struct amdgpu_device *adev, struct ras_err_data *err_data, 35 - * uint32_t umc_reg_offset, uint32_t channel_index) 36 - */ 37 - #define amdgpu_umc_for_each_channel(func) \ 38 - struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; \ 39 - uint32_t umc_inst, channel_inst, umc_reg_offset, channel_index; \ 40 - for (umc_inst = 0; umc_inst < adev->umc.umc_inst_num; umc_inst++) { \ 41 - /* enable the index mode to query eror count per channel */ \ 42 - adev->umc.funcs->enable_umc_index_mode(adev, umc_inst); \ 43 - for (channel_inst = 0; \ 44 - channel_inst < adev->umc.channel_inst_num; \ 45 - channel_inst++) { \ 46 - /* calc the register offset according to channel instance */ \ 47 - umc_reg_offset = adev->umc.channel_offs * channel_inst; \ 48 - /* get channel index of interleaved memory */ \ 49 - channel_index = adev->umc.channel_idx_tbl[ \ 50 - umc_inst * adev->umc.channel_inst_num + channel_inst]; \ 51 - (func)(adev, err_data, umc_reg_offset, channel_index); \ 52 - } \ 53 - } \ 54 - adev->umc.funcs->disable_umc_index_mode(adev); 55 - 56 24 struct amdgpu_umc_funcs { 57 25 void (*err_cnt_init)(struct amdgpu_device *adev); 58 26 int (*ras_late_init)(struct amdgpu_device *adev); ··· 28 60 void *ras_error_status); 29 61 void (*query_ras_error_address)(struct amdgpu_device *adev, 30 62 void *ras_error_status); 31 - void (*enable_umc_index_mode)(struct amdgpu_device *adev, 32 - uint32_t umc_instance); 33 - void (*disable_umc_index_mode)(struct amdgpu_device *adev); 34 63 void (*init_registers)(struct amdgpu_device *adev); 35 64 }; 36 65
+4 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
··· 330 330 int amdgpu_uvd_entity_init(struct amdgpu_device *adev) 331 331 { 332 332 struct amdgpu_ring *ring; 333 - struct drm_sched_rq *rq; 333 + struct drm_gpu_scheduler *sched; 334 334 int r; 335 335 336 336 ring = &adev->uvd.inst[0].ring; 337 - rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; 338 - r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL); 337 + sched = &ring->sched; 338 + r = drm_sched_entity_init(&adev->uvd.entity, DRM_SCHED_PRIORITY_NORMAL, 339 + &sched, 1, NULL); 339 340 if (r) { 340 341 DRM_ERROR("Failed setting up UVD kernel entity.\n"); 341 342 return r;
+4 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
··· 240 240 int amdgpu_vce_entity_init(struct amdgpu_device *adev) 241 241 { 242 242 struct amdgpu_ring *ring; 243 - struct drm_sched_rq *rq; 243 + struct drm_gpu_scheduler *sched; 244 244 int r; 245 245 246 246 ring = &adev->vce.ring[0]; 247 - rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; 248 - r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL); 247 + sched = &ring->sched; 248 + r = drm_sched_entity_init(&adev->vce.entity, DRM_SCHED_PRIORITY_NORMAL, 249 + &sched, 1, NULL); 249 250 if (r != 0) { 250 251 DRM_ERROR("Failed setting up VCE run queue.\n"); 251 252 return r;
+15 -32
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
··· 28 28 #include <linux/module.h> 29 29 #include <linux/pci.h> 30 30 31 - #include <drm/drm.h> 32 - 33 31 #include "amdgpu.h" 34 32 #include "amdgpu_pm.h" 35 33 #include "amdgpu_vcn.h" 36 34 #include "soc15d.h" 37 - #include "soc15_common.h" 38 - 39 - #include "vcn/vcn_1_0_offset.h" 40 - #include "vcn/vcn_1_0_sh_mask.h" 41 - 42 - /* 1 second timeout */ 43 - #define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000) 44 35 45 36 /* Firmware Names */ 46 37 #define FIRMWARE_RAVEN "amdgpu/raven_vcn.bin" ··· 285 294 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { 286 295 if (adev->vcn.harvest_config & (1 << j)) 287 296 continue; 297 + 288 298 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 289 299 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_enc[i]); 290 300 } ··· 298 306 else 299 307 new_state.fw_based = VCN_DPG_STATE__UNPAUSE; 300 308 301 - if (amdgpu_fence_count_emitted(&adev->jpeg.inst[j].ring_dec)) 302 - new_state.jpeg = VCN_DPG_STATE__PAUSE; 303 - else 304 - new_state.jpeg = VCN_DPG_STATE__UNPAUSE; 305 - 306 309 adev->vcn.pause_dpg_mode(adev, &new_state); 307 310 } 308 311 309 - fence[j] += amdgpu_fence_count_emitted(&adev->jpeg.inst[j].ring_dec); 310 312 fence[j] += amdgpu_fence_count_emitted(&adev->vcn.inst[j].ring_dec); 311 313 fences += fence[j]; 312 314 } 313 315 314 316 if (fences == 0) { 315 317 amdgpu_gfx_off_ctrl(adev, true); 316 - if (adev->asic_type < CHIP_ARCTURUS && adev->pm.dpm_enabled) 317 - amdgpu_dpm_enable_uvd(adev, false); 318 - else 319 - amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, 320 - AMD_PG_STATE_GATE); 318 + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, 319 + AMD_PG_STATE_GATE); 321 320 } else { 322 321 schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); 323 322 } ··· 321 338 322 339 if (set_clocks) { 323 340 amdgpu_gfx_off_ctrl(adev, false); 324 - if (adev->asic_type < CHIP_ARCTURUS && adev->pm.dpm_enabled) 325 - amdgpu_dpm_enable_uvd(adev, true); 326 - else 327 - amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, 328 - AMD_PG_STATE_UNGATE); 341 + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, 342 + AMD_PG_STATE_UNGATE); 329 343 } 330 344 331 345 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { ··· 338 358 else 339 359 new_state.fw_based = VCN_DPG_STATE__UNPAUSE; 340 360 341 - if (amdgpu_fence_count_emitted(&adev->jpeg.inst[ring->me].ring_dec)) 342 - new_state.jpeg = VCN_DPG_STATE__PAUSE; 343 - else 344 - new_state.jpeg = VCN_DPG_STATE__UNPAUSE; 345 - 346 361 if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) 347 362 new_state.fw_based = VCN_DPG_STATE__PAUSE; 348 - else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) 349 - new_state.jpeg = VCN_DPG_STATE__PAUSE; 350 363 351 364 adev->vcn.pause_dpg_mode(adev, &new_state); 352 365 } ··· 491 518 492 519 int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout) 493 520 { 521 + struct amdgpu_device *adev = ring->adev; 494 522 struct dma_fence *fence; 495 523 long r; 524 + 525 + /* temporarily disable ib test for sriov */ 526 + if (amdgpu_sriov_vf(adev)) 527 + return 0; 496 528 497 529 r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL); 498 530 if (r) ··· 654 676 655 677 int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout) 656 678 { 679 + struct amdgpu_device *adev = ring->adev; 657 680 struct dma_fence *fence = NULL; 658 681 struct amdgpu_bo *bo = NULL; 659 682 long r; 683 + 684 + /* temporarily disable ib test for sriov */ 685 + if (amdgpu_sriov_vf(adev)) 686 + return 0; 660 687 661 688 r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE, 662 689 AMDGPU_GEM_DOMAIN_VRAM,
+10 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
··· 31 31 #define AMDGPU_VCN_MAX_ENC_RINGS 3 32 32 33 33 #define AMDGPU_MAX_VCN_INSTANCES 2 34 + #define AMDGPU_MAX_VCN_ENC_RINGS AMDGPU_VCN_MAX_ENC_RINGS * AMDGPU_MAX_VCN_INSTANCES 34 35 35 36 #define AMDGPU_VCN_HARVEST_VCN0 (1 << 0) 36 37 #define AMDGPU_VCN_HARVEST_VCN1 (1 << 1) ··· 56 55 #define VCN_AON_SOC_ADDRESS_2_0 0x1f800 57 56 #define VCN_VID_IP_ADDRESS_2_0 0x0 58 57 #define VCN_AON_IP_ADDRESS_2_0 0x30000 58 + 59 + /* 1 second timeout */ 60 + #define VCN_IDLE_TIMEOUT msecs_to_jiffies(1000) 59 61 60 62 #define RREG32_SOC15_DPG_MODE(ip, inst, reg, mask, sram_sel) \ 61 63 ({ WREG32_SOC15(ip, inst, mmUVD_DPG_LMA_MASK, mask); \ ··· 190 186 uint32_t *dpg_sram_curr_addr; 191 187 192 188 uint8_t num_vcn_inst; 193 - struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES]; 194 - struct amdgpu_vcn_reg internal; 189 + struct amdgpu_vcn_inst inst[AMDGPU_MAX_VCN_INSTANCES]; 190 + struct amdgpu_vcn_reg internal; 191 + struct drm_gpu_scheduler *vcn_enc_sched[AMDGPU_MAX_VCN_ENC_RINGS]; 192 + struct drm_gpu_scheduler *vcn_dec_sched[AMDGPU_MAX_VCN_INSTANCES]; 193 + uint32_t num_vcn_enc_sched; 194 + uint32_t num_vcn_dec_sched; 195 195 196 196 unsigned harvest_config; 197 197 int (*pause_dpg_mode)(struct amdgpu_device *adev,
+7 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 2753 2753 spin_lock_init(&vm->invalidated_lock); 2754 2754 INIT_LIST_HEAD(&vm->freed); 2755 2755 2756 + 2756 2757 /* create scheduler entities for page table updates */ 2757 - r = drm_sched_entity_init(&vm->direct, adev->vm_manager.vm_pte_rqs, 2758 - adev->vm_manager.vm_pte_num_rqs, NULL); 2758 + r = drm_sched_entity_init(&vm->direct, DRM_SCHED_PRIORITY_NORMAL, 2759 + adev->vm_manager.vm_pte_scheds, 2760 + adev->vm_manager.vm_pte_num_scheds, NULL); 2759 2761 if (r) 2760 2762 return r; 2761 2763 2762 - r = drm_sched_entity_init(&vm->delayed, adev->vm_manager.vm_pte_rqs, 2763 - adev->vm_manager.vm_pte_num_rqs, NULL); 2764 + r = drm_sched_entity_init(&vm->delayed, DRM_SCHED_PRIORITY_NORMAL, 2765 + adev->vm_manager.vm_pte_scheds, 2766 + adev->vm_manager.vm_pte_num_scheds, NULL); 2764 2767 if (r) 2765 2768 goto error_free_direct; 2766 2769
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
··· 327 327 u64 vram_base_offset; 328 328 /* vm pte handling */ 329 329 const struct amdgpu_vm_pte_funcs *vm_pte_funcs; 330 - struct drm_sched_rq *vm_pte_rqs[AMDGPU_MAX_RINGS]; 331 - unsigned vm_pte_num_rqs; 330 + struct drm_gpu_scheduler *vm_pte_scheds[AMDGPU_MAX_RINGS]; 331 + unsigned vm_pte_num_scheds; 332 332 struct amdgpu_ring *page_fault; 333 333 334 334 /* partial resident texture handling */
+4
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
··· 261 261 INIT_LIST_HEAD(&tmp->device_list); 262 262 mutex_init(&tmp->hive_lock); 263 263 mutex_init(&tmp->reset_lock); 264 + task_barrier_init(&tmp->tb); 264 265 265 266 if (lock) 266 267 mutex_lock(&tmp->hive_lock); ··· 409 408 top_info->num_nodes = count; 410 409 hive->number_devices = count; 411 410 411 + task_barrier_add_task(&hive->tb); 412 + 412 413 if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) { 413 414 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { 414 415 /* update node list for other device in the hive */ ··· 473 470 mutex_destroy(&hive->hive_lock); 474 471 mutex_destroy(&hive->reset_lock); 475 472 } else { 473 + task_barrier_rem_task(&hive->tb); 476 474 amdgpu_xgmi_sysfs_rem_dev_info(adev, hive); 477 475 mutex_unlock(&hive->hive_lock); 478 476 }
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
··· 22 22 #ifndef __AMDGPU_XGMI_H__ 23 23 #define __AMDGPU_XGMI_H__ 24 24 25 + #include <drm/task_barrier.h> 25 26 #include "amdgpu_psp.h" 26 27 27 28 struct amdgpu_hive_info { ··· 34 33 struct device_attribute dev_attr; 35 34 struct amdgpu_device *adev; 36 35 int pstate; /*0 -- low , 1 -- high , -1 unknown*/ 36 + struct task_barrier tb; 37 37 }; 38 38 39 39 struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
+5 -7
drivers/gpu/drm/amd/amdgpu/cik_sdma.c
··· 228 228 u32 extra_bits = vmid & 0xf; 229 229 230 230 /* IB packet must end on a 8 DW boundary */ 231 - cik_sdma_ring_insert_nop(ring, (12 - (lower_32_bits(ring->wptr) & 7)) % 8); 231 + cik_sdma_ring_insert_nop(ring, (4 - lower_32_bits(ring->wptr)) & 7); 232 232 233 233 amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_INDIRECT_BUFFER, 0, extra_bits)); 234 234 amdgpu_ring_write(ring, ib->gpu_addr & 0xffffffe0); /* base must be 32 byte aligned */ ··· 811 811 u32 pad_count; 812 812 int i; 813 813 814 - pad_count = (8 - (ib->length_dw & 0x7)) % 8; 814 + pad_count = (-ib->length_dw) & 7; 815 815 for (i = 0; i < pad_count; i++) 816 816 if (sdma && sdma->burst_nop && (i == 0)) 817 817 ib->ptr[ib->length_dw++] = ··· 1372 1372 1373 1373 static void cik_sdma_set_vm_pte_funcs(struct amdgpu_device *adev) 1374 1374 { 1375 - struct drm_gpu_scheduler *sched; 1376 1375 unsigned i; 1377 1376 1378 1377 adev->vm_manager.vm_pte_funcs = &cik_sdma_vm_pte_funcs; 1379 1378 for (i = 0; i < adev->sdma.num_instances; i++) { 1380 - sched = &adev->sdma.instance[i].ring.sched; 1381 - adev->vm_manager.vm_pte_rqs[i] = 1382 - &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; 1379 + adev->vm_manager.vm_pte_scheds[i] = 1380 + &adev->sdma.instance[i].ring.sched; 1383 1381 } 1384 - adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; 1382 + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; 1385 1383 } 1386 1384 1387 1385 const struct amdgpu_ip_block_version cik_sdma_ip_block =
+129 -22
drivers/gpu/drm/amd/amdgpu/df_v3_6.c
··· 183 183 spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 184 184 } 185 185 186 + /* same as perfmon_wreg but return status on write value check */ 187 + static int df_v3_6_perfmon_arm_with_status(struct amdgpu_device *adev, 188 + uint32_t lo_addr, uint32_t lo_val, 189 + uint32_t hi_addr, uint32_t hi_val) 190 + { 191 + unsigned long flags, address, data; 192 + uint32_t lo_val_rb, hi_val_rb; 193 + 194 + address = adev->nbio.funcs->get_pcie_index_offset(adev); 195 + data = adev->nbio.funcs->get_pcie_data_offset(adev); 196 + 197 + spin_lock_irqsave(&adev->pcie_idx_lock, flags); 198 + WREG32(address, lo_addr); 199 + WREG32(data, lo_val); 200 + WREG32(address, hi_addr); 201 + WREG32(data, hi_val); 202 + 203 + WREG32(address, lo_addr); 204 + lo_val_rb = RREG32(data); 205 + WREG32(address, hi_addr); 206 + hi_val_rb = RREG32(data); 207 + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); 208 + 209 + if (!(lo_val == lo_val_rb && hi_val == hi_val_rb)) 210 + return -EBUSY; 211 + 212 + return 0; 213 + } 214 + 215 + 216 + /* 217 + * retry arming counters every 100 usecs within 1 millisecond interval. 218 + * if retry fails after time out, return error. 219 + */ 220 + #define ARM_RETRY_USEC_TIMEOUT 1000 221 + #define ARM_RETRY_USEC_INTERVAL 100 222 + static int df_v3_6_perfmon_arm_with_retry(struct amdgpu_device *adev, 223 + uint32_t lo_addr, uint32_t lo_val, 224 + uint32_t hi_addr, uint32_t hi_val) 225 + { 226 + int countdown = ARM_RETRY_USEC_TIMEOUT; 227 + 228 + while (countdown) { 229 + 230 + if (!df_v3_6_perfmon_arm_with_status(adev, lo_addr, lo_val, 231 + hi_addr, hi_val)) 232 + break; 233 + 234 + countdown -= ARM_RETRY_USEC_INTERVAL; 235 + udelay(ARM_RETRY_USEC_INTERVAL); 236 + } 237 + 238 + return countdown > 0 ? 0 : -ETIME; 239 + } 240 + 186 241 /* get the number of df counters available */ 187 242 static ssize_t df_v3_6_get_df_cntr_avail(struct device *dev, 188 243 struct device_attribute *attr, ··· 389 334 switch (target_cntr) { 390 335 391 336 case 0: 392 - *lo_base_addr = is_ctrl ? smnPerfMonCtlLo0 : smnPerfMonCtrLo0; 393 - *hi_base_addr = is_ctrl ? smnPerfMonCtlHi0 : smnPerfMonCtrHi0; 337 + *lo_base_addr = is_ctrl ? smnPerfMonCtlLo4 : smnPerfMonCtrLo4; 338 + *hi_base_addr = is_ctrl ? smnPerfMonCtlHi4 : smnPerfMonCtrHi4; 394 339 break; 395 340 case 1: 396 - *lo_base_addr = is_ctrl ? smnPerfMonCtlLo1 : smnPerfMonCtrLo1; 397 - *hi_base_addr = is_ctrl ? smnPerfMonCtlHi1 : smnPerfMonCtrHi1; 341 + *lo_base_addr = is_ctrl ? smnPerfMonCtlLo5 : smnPerfMonCtrLo5; 342 + *hi_base_addr = is_ctrl ? smnPerfMonCtlHi5 : smnPerfMonCtrHi5; 398 343 break; 399 344 case 2: 400 - *lo_base_addr = is_ctrl ? smnPerfMonCtlLo2 : smnPerfMonCtrLo2; 401 - *hi_base_addr = is_ctrl ? smnPerfMonCtlHi2 : smnPerfMonCtrHi2; 345 + *lo_base_addr = is_ctrl ? smnPerfMonCtlLo6 : smnPerfMonCtrLo6; 346 + *hi_base_addr = is_ctrl ? smnPerfMonCtlHi6 : smnPerfMonCtrHi6; 402 347 break; 403 348 case 3: 404 - *lo_base_addr = is_ctrl ? smnPerfMonCtlLo3 : smnPerfMonCtrLo3; 405 - *hi_base_addr = is_ctrl ? smnPerfMonCtlHi3 : smnPerfMonCtrHi3; 349 + *lo_base_addr = is_ctrl ? smnPerfMonCtlLo7 : smnPerfMonCtrLo7; 350 + *hi_base_addr = is_ctrl ? smnPerfMonCtlHi7 : smnPerfMonCtrHi7; 406 351 break; 407 352 408 353 } ··· 477 422 return -ENOSPC; 478 423 } 479 424 425 + #define DEFERRED_ARM_MASK (1 << 31) 426 + static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev, 427 + uint64_t config, bool is_deferred) 428 + { 429 + int target_cntr; 430 + 431 + target_cntr = df_v3_6_pmc_config_2_cntr(adev, config); 432 + 433 + if (target_cntr < 0) 434 + return -EINVAL; 435 + 436 + if (is_deferred) 437 + adev->df_perfmon_config_assign_mask[target_cntr] |= 438 + DEFERRED_ARM_MASK; 439 + else 440 + adev->df_perfmon_config_assign_mask[target_cntr] &= 441 + ~DEFERRED_ARM_MASK; 442 + 443 + return 0; 444 + } 445 + 446 + static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev, 447 + uint64_t config) 448 + { 449 + int target_cntr; 450 + 451 + target_cntr = df_v3_6_pmc_config_2_cntr(adev, config); 452 + 453 + /* 454 + * we never get target_cntr < 0 since this funciton is only called in 455 + * pmc_count for now but we should check anyways. 456 + */ 457 + return (target_cntr >= 0 && 458 + (adev->df_perfmon_config_assign_mask[target_cntr] 459 + & DEFERRED_ARM_MASK)); 460 + 461 + } 462 + 480 463 /* release performance counter */ 481 464 static void df_v3_6_pmc_release_cntr(struct amdgpu_device *adev, 482 465 uint64_t config) ··· 544 451 int is_enable) 545 452 { 546 453 uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val; 547 - int ret = 0; 454 + int err = 0, ret = 0; 548 455 549 456 switch (adev->asic_type) { 550 457 case CHIP_VEGA20: 458 + if (is_enable) 459 + return df_v3_6_pmc_add_cntr(adev, config); 551 460 552 461 df_v3_6_reset_perfmon_cntr(adev, config); 553 462 554 - if (is_enable) { 555 - ret = df_v3_6_pmc_add_cntr(adev, config); 556 - } else { 557 - ret = df_v3_6_pmc_get_ctrl_settings(adev, 463 + ret = df_v3_6_pmc_get_ctrl_settings(adev, 558 464 config, 559 465 &lo_base_addr, 560 466 &hi_base_addr, 561 467 &lo_val, 562 468 &hi_val); 563 469 564 - if (ret) 565 - return ret; 470 + if (ret) 471 + return ret; 566 472 567 - df_v3_6_perfmon_wreg(adev, lo_base_addr, lo_val, 568 - hi_base_addr, hi_val); 569 - } 473 + err = df_v3_6_perfmon_arm_with_retry(adev, 474 + lo_base_addr, 475 + lo_val, 476 + hi_base_addr, 477 + hi_val); 478 + 479 + if (err) 480 + ret = df_v3_6_pmc_set_deferred(adev, config, true); 570 481 571 482 break; 572 483 default: ··· 598 501 if (ret) 599 502 return ret; 600 503 601 - df_v3_6_perfmon_wreg(adev, lo_base_addr, 0, hi_base_addr, 0); 504 + df_v3_6_reset_perfmon_cntr(adev, config); 602 505 603 506 if (is_disable) 604 507 df_v3_6_pmc_release_cntr(adev, config); ··· 615 518 uint64_t config, 616 519 uint64_t *count) 617 520 { 618 - uint32_t lo_base_addr, hi_base_addr, lo_val, hi_val; 521 + uint32_t lo_base_addr, hi_base_addr, lo_val = 0, hi_val = 0; 619 522 *count = 0; 620 523 621 524 switch (adev->asic_type) { 622 525 case CHIP_VEGA20: 623 - 624 526 df_v3_6_pmc_get_read_settings(adev, config, &lo_base_addr, 625 527 &hi_base_addr); 626 528 627 529 if ((lo_base_addr == 0) || (hi_base_addr == 0)) 628 530 return; 531 + 532 + /* rearm the counter or throw away count value on failure */ 533 + if (df_v3_6_pmc_is_deferred(adev, config)) { 534 + int rearm_err = df_v3_6_perfmon_arm_with_status(adev, 535 + lo_base_addr, lo_val, 536 + hi_base_addr, hi_val); 537 + 538 + if (rearm_err) 539 + return; 540 + 541 + df_v3_6_pmc_set_deferred(adev, config, false); 542 + } 629 543 630 544 df_v3_6_perfmon_rreg(adev, lo_base_addr, &lo_val, 631 545 hi_base_addr, &hi_val); ··· 650 542 config, lo_base_addr, hi_base_addr, lo_val, hi_val); 651 543 652 544 break; 653 - 654 545 default: 655 546 break; 656 547 }
+12 -32
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
··· 471 471 else 472 472 udelay(1); 473 473 } 474 - if (i < adev->usec_timeout) { 475 - if (amdgpu_emu_mode == 1) 476 - DRM_INFO("ring test on %d succeeded in %d msecs\n", 477 - ring->idx, i); 478 - else 479 - DRM_INFO("ring test on %d succeeded in %d usecs\n", 480 - ring->idx, i); 481 - } else { 482 - DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n", 483 - ring->idx, scratch, tmp); 484 - r = -EINVAL; 485 - } 474 + 475 + if (i >= adev->usec_timeout) 476 + r = -ETIMEDOUT; 477 + 486 478 amdgpu_gfx_scratch_free(adev, scratch); 487 479 488 480 return r; ··· 524 532 } 525 533 526 534 tmp = RREG32(scratch); 527 - if (tmp == 0xDEADBEEF) { 528 - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); 535 + if (tmp == 0xDEADBEEF) 529 536 r = 0; 530 - } else { 531 - DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n", 532 - scratch, tmp); 537 + else 533 538 r = -EINVAL; 534 - } 535 539 err2: 536 540 amdgpu_ib_free(adev, &ib, NULL); 537 541 dma_fence_put(f); ··· 576 588 } 577 589 578 590 if (adev->gfx.cp_fw_write_wait == false) 579 - DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \ 580 - GRBM requires 1-cycle delay in cp firmware\n"); 591 + DRM_WARN_ONCE("CP firmware version too old, please update!"); 581 592 } 582 593 583 594 ··· 1950 1963 rlc_autoload_info[rlc_toc->id].size = rlc_toc->size * 4; 1951 1964 1952 1965 rlc_toc++; 1953 - }; 1966 + } 1954 1967 1955 1968 return 0; 1956 1969 } ··· 3593 3606 3594 3607 for (i = 0; i < adev->gfx.num_gfx_rings; i++) { 3595 3608 ring = &adev->gfx.gfx_ring[i]; 3596 - DRM_INFO("gfx %d ring me %d pipe %d q %d\n", 3597 - i, ring->me, ring->pipe, ring->queue); 3598 - r = amdgpu_ring_test_ring(ring); 3599 - if (r) { 3600 - ring->sched.ready = false; 3609 + r = amdgpu_ring_test_helper(ring); 3610 + if (r) 3601 3611 return r; 3602 - } 3603 3612 } 3604 3613 3605 3614 for (i = 0; i < adev->gfx.num_compute_rings; i++) { 3606 3615 ring = &adev->gfx.compute_ring[i]; 3607 - ring->sched.ready = true; 3608 - DRM_INFO("compute ring %d mec %d pipe %d q %d\n", 3609 - i, ring->me, ring->pipe, ring->queue); 3610 - r = amdgpu_ring_test_ring(ring); 3616 + r = amdgpu_ring_test_helper(ring); 3611 3617 if (r) 3612 - ring->sched.ready = false; 3618 + return r; 3613 3619 } 3614 3620 3615 3621 return 0;
+66 -39
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 48 48 49 49 #include "amdgpu_ras.h" 50 50 51 - #include "sdma0/sdma0_4_0_offset.h" 52 - #include "sdma1/sdma1_4_0_offset.h" 51 + #include "sdma0/sdma0_4_2_offset.h" 52 + #include "sdma1/sdma1_4_2_offset.h" 53 + #include "sdma2/sdma2_4_2_2_offset.h" 54 + #include "sdma3/sdma3_4_2_2_offset.h" 55 + #include "sdma4/sdma4_4_2_2_offset.h" 56 + #include "sdma5/sdma5_4_2_2_offset.h" 57 + #include "sdma6/sdma6_4_2_2_offset.h" 58 + #include "sdma7/sdma7_4_2_2_offset.h" 59 + 53 60 #define GFX9_NUM_GFX_RINGS 1 54 61 #define GFX9_MEC_HPD_SIZE 4096 55 62 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L ··· 988 981 (adev->gfx.mec_feature_version < 46) || 989 982 (adev->gfx.pfp_fw_version < 0x000000b7) || 990 983 (adev->gfx.pfp_feature_version < 46)) 991 - DRM_WARN_ONCE("Warning: check cp_fw_version and update it to realize \ 992 - GRBM requires 1-cycle delay in cp firmware\n"); 984 + DRM_WARN_ONCE("CP firmware version too old, please update!"); 993 985 994 986 switch (adev->asic_type) { 995 987 case CHIP_VEGA10: ··· 1048 1042 case CHIP_VEGA20: 1049 1043 break; 1050 1044 case CHIP_RAVEN: 1051 - /* Disable GFXOFF on original raven. There are combinations 1052 - * of sbios and platforms that are not stable. 1053 - */ 1054 - if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8)) 1055 - adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 1056 - else if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) 1057 - &&((adev->gfx.rlc_fw_version != 106 && 1058 - adev->gfx.rlc_fw_version < 531) || 1059 - (adev->gfx.rlc_fw_version == 53815) || 1060 - (adev->gfx.rlc_feature_version < 1) || 1061 - !adev->gfx.rlc.is_rlc_v2_1)) 1045 + if (!(adev->rev_id >= 0x8 || 1046 + adev->pdev->device == 0x15d8) && 1047 + (adev->pm.fw_version < 0x41e2b || /* not raven1 fresh */ 1048 + !adev->gfx.rlc.is_rlc_v2_1)) /* without rlc save restore ucodes */ 1062 1049 adev->pm.pp_feature &= ~PP_GFXOFF_MASK; 1063 1050 1064 1051 if (adev->pm.pp_feature & PP_GFXOFF_MASK) ··· 3932 3933 0xbe800080, 0xbf810000, 3933 3934 }; 3934 3935 3936 + /* When below register arrays changed, please update gpr_reg_size, 3937 + and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds, 3938 + to cover all gfx9 ASICs */ 3935 3939 static const struct soc15_reg_entry vgpr_init_regs[] = { 3936 - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff }, 3937 - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff }, 3938 - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff }, 3939 - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff }, 3940 3940 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, 3941 3941 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 }, 3942 3942 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 }, 3943 3943 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 }, 3944 3944 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x3f }, 3945 3945 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */ 3946 + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff }, 3947 + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff }, 3948 + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff }, 3949 + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff }, 3950 + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff }, 3951 + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff }, 3952 + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff }, 3953 + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff }, 3946 3954 }; 3947 3955 3948 3956 static const struct soc15_reg_entry sgpr1_init_regs[] = { 3957 + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, 3958 + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 }, 3959 + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 }, 3960 + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 }, 3961 + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */ 3962 + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 }, 3949 3963 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x000000ff }, 3950 3964 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x000000ff }, 3951 3965 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x000000ff }, 3952 3966 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x000000ff }, 3967 + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x000000ff }, 3968 + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x000000ff }, 3969 + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x000000ff }, 3970 + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x000000ff }, 3971 + }; 3972 + 3973 + static const struct soc15_reg_entry sgpr2_init_regs[] = { 3953 3974 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, 3954 3975 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 }, 3955 3976 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 }, 3956 3977 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 }, 3957 3978 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */ 3958 3979 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 }, 3959 - }; 3960 - 3961 - static const struct soc15_reg_entry sgpr2_init_regs[] = { 3962 3980 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0x0000ff00 }, 3963 3981 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0x0000ff00 }, 3964 3982 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0x0000ff00 }, 3965 3983 { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0x0000ff00 }, 3966 - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 }, 3967 - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 }, 3968 - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 8 }, 3969 - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 }, 3970 - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x240 }, /* (80 GPRS) */ 3971 - { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x0 }, 3984 + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0x0000ff00 }, 3985 + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0x0000ff00 }, 3986 + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0x0000ff00 }, 3987 + { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0x0000ff00 }, 3972 3988 }; 3973 3989 3974 3990 static const struct soc15_reg_entry sec_ded_counter_registers[] = { ··· 4020 4006 { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16}, 4021 4007 { SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2}, 4022 4008 { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6}, 4009 + { SOC15_REG_ENTRY(HDP, 0, mmHDP_EDC_CNT), 0, 1, 1}, 4023 4010 { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_EDC_COUNTER), 0, 1, 1}, 4024 4011 { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_EDC_COUNTER), 0, 1, 1}, 4025 - { SOC15_REG_ENTRY(HDP, 0, mmHDP_EDC_CNT), 0, 1, 1}, 4012 + { SOC15_REG_ENTRY(SDMA2, 0, mmSDMA2_EDC_COUNTER), 0, 1, 1}, 4013 + { SOC15_REG_ENTRY(SDMA3, 0, mmSDMA3_EDC_COUNTER), 0, 1, 1}, 4014 + { SOC15_REG_ENTRY(SDMA4, 0, mmSDMA4_EDC_COUNTER), 0, 1, 1}, 4015 + { SOC15_REG_ENTRY(SDMA5, 0, mmSDMA5_EDC_COUNTER), 0, 1, 1}, 4016 + { SOC15_REG_ENTRY(SDMA6, 0, mmSDMA6_EDC_COUNTER), 0, 1, 1}, 4017 + { SOC15_REG_ENTRY(SDMA7, 0, mmSDMA7_EDC_COUNTER), 0, 1, 1}, 4026 4018 }; 4027 4019 4028 4020 static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev) ··· 4087 4067 unsigned total_size, vgpr_offset, sgpr_offset; 4088 4068 u64 gpu_addr; 4089 4069 4070 + int compute_dim_x = adev->gfx.config.max_shader_engines * 4071 + adev->gfx.config.max_cu_per_sh * 4072 + adev->gfx.config.max_sh_per_se; 4073 + int sgpr_work_group_size = 5; 4074 + int gpr_reg_size = compute_dim_x / 16 + 6; 4075 + int sec_ded_counter_reg_size = adev->sdma.num_instances + 34; 4076 + 4090 4077 /* only support when RAS is enabled */ 4091 4078 if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) 4092 4079 return 0; ··· 4103 4076 return 0; 4104 4077 4105 4078 total_size = 4106 - ((ARRAY_SIZE(vgpr_init_regs) * 3) + 4 + 5 + 2) * 4; 4079 + (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */ 4107 4080 total_size += 4108 - ((ARRAY_SIZE(sgpr1_init_regs) * 3) + 4 + 5 + 2) * 4; 4081 + (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS1 */ 4109 4082 total_size += 4110 - ((ARRAY_SIZE(sgpr2_init_regs) * 3) + 4 + 5 + 2) * 4; 4083 + (gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */ 4111 4084 total_size = ALIGN(total_size, 256); 4112 4085 vgpr_offset = total_size; 4113 4086 total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256); ··· 4134 4107 4135 4108 /* VGPR */ 4136 4109 /* write the register state for the compute dispatch */ 4137 - for (i = 0; i < ARRAY_SIZE(vgpr_init_regs); i++) { 4110 + for (i = 0; i < gpr_reg_size; i++) { 4138 4111 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1); 4139 4112 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs[i]) 4140 4113 - PACKET3_SET_SH_REG_START; ··· 4150 4123 4151 4124 /* write dispatch packet */ 4152 4125 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); 4153 - ib.ptr[ib.length_dw++] = 0x40*2; /* x */ 4126 + ib.ptr[ib.length_dw++] = compute_dim_x; /* x */ 4154 4127 ib.ptr[ib.length_dw++] = 1; /* y */ 4155 4128 ib.ptr[ib.length_dw++] = 1; /* z */ 4156 4129 ib.ptr[ib.length_dw++] = ··· 4162 4135 4163 4136 /* SGPR1 */ 4164 4137 /* write the register state for the compute dispatch */ 4165 - for (i = 0; i < ARRAY_SIZE(sgpr1_init_regs); i++) { 4138 + for (i = 0; i < gpr_reg_size; i++) { 4166 4139 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1); 4167 4140 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr1_init_regs[i]) 4168 4141 - PACKET3_SET_SH_REG_START; ··· 4178 4151 4179 4152 /* write dispatch packet */ 4180 4153 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); 4181 - ib.ptr[ib.length_dw++] = 0xA0*2; /* x */ 4154 + ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */ 4182 4155 ib.ptr[ib.length_dw++] = 1; /* y */ 4183 4156 ib.ptr[ib.length_dw++] = 1; /* z */ 4184 4157 ib.ptr[ib.length_dw++] = ··· 4190 4163 4191 4164 /* SGPR2 */ 4192 4165 /* write the register state for the compute dispatch */ 4193 - for (i = 0; i < ARRAY_SIZE(sgpr2_init_regs); i++) { 4166 + for (i = 0; i < gpr_reg_size; i++) { 4194 4167 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1); 4195 4168 ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(sgpr2_init_regs[i]) 4196 4169 - PACKET3_SET_SH_REG_START; ··· 4206 4179 4207 4180 /* write dispatch packet */ 4208 4181 ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3); 4209 - ib.ptr[ib.length_dw++] = 0xA0*2; /* x */ 4182 + ib.ptr[ib.length_dw++] = compute_dim_x / 2 * sgpr_work_group_size; /* x */ 4210 4183 ib.ptr[ib.length_dw++] = 1; /* y */ 4211 4184 ib.ptr[ib.length_dw++] = 1; /* z */ 4212 4185 ib.ptr[ib.length_dw++] = ··· 4232 4205 4233 4206 /* read back registers to clear the counters */ 4234 4207 mutex_lock(&adev->grbm_idx_mutex); 4235 - for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) { 4208 + for (i = 0; i < sec_ded_counter_reg_size; i++) { 4236 4209 for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) { 4237 4210 for (k = 0; k < sec_ded_counter_registers[i].instance; k++) { 4238 4211 gfx_v9_0_select_se_sh(adev, j, 0x0, k);
+40 -33
drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c
··· 75 75 WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); 76 76 WREG32_SOC15_RLC(GC, 0, mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); 77 77 78 - /* Program the system aperture low logical page number. */ 79 - WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 80 - min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); 78 + if (!amdgpu_sriov_vf(adev) || adev->asic_type <= CHIP_VEGA10) { 79 + /* Program the system aperture low logical page number. */ 80 + WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, 81 + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); 81 82 82 - if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8) 83 - /* 84 - * Raven2 has a HW issue that it is unable to use the vram which 85 - * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the 86 - * workaround that increase system aperture high address (add 1) 87 - * to get rid of the VM fault and hardware hang. 88 - */ 89 - WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 90 - max((adev->gmc.fb_end >> 18) + 0x1, 91 - adev->gmc.agp_end >> 18)); 92 - else 93 - WREG32_SOC15_RLC(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 94 - max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); 83 + if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8) 84 + /* 85 + * Raven2 has a HW issue that it is unable to use the 86 + * vram which is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. 87 + * So here is the workaround that increase system 88 + * aperture high address (add 1) to get rid of the VM 89 + * fault and hardware hang. 90 + */ 91 + WREG32_SOC15_RLC(GC, 0, 92 + mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 93 + max((adev->gmc.fb_end >> 18) + 0x1, 94 + adev->gmc.agp_end >> 18)); 95 + else 96 + WREG32_SOC15_RLC( 97 + GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, 98 + max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); 95 99 96 - /* Set default page address. */ 97 - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start 98 - + adev->vm_manager.vram_base_offset; 99 - WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 100 - (u32)(value >> 12)); 101 - WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, 102 - (u32)(value >> 44)); 100 + /* Set default page address. */ 101 + value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + 102 + adev->vm_manager.vram_base_offset; 103 + WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 104 + (u32)(value >> 12)); 105 + WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, 106 + (u32)(value >> 44)); 103 107 104 - /* Program "protection fault". */ 105 - WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, 106 - (u32)(adev->dummy_page_addr >> 12)); 107 - WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, 108 - (u32)((u64)adev->dummy_page_addr >> 44)); 108 + /* Program "protection fault". */ 109 + WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, 110 + (u32)(adev->dummy_page_addr >> 12)); 111 + WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, 112 + (u32)((u64)adev->dummy_page_addr >> 44)); 109 113 110 - WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2, 111 - ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); 114 + WREG32_FIELD15(GC, 0, VM_L2_PROTECTION_FAULT_CNTL2, 115 + ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); 116 + } 112 117 } 113 118 114 119 static void gfxhub_v1_0_init_tlb_regs(struct amdgpu_device *adev) ··· 269 264 270 265 int gfxhub_v1_0_gart_enable(struct amdgpu_device *adev) 271 266 { 272 - if (amdgpu_sriov_vf(adev)) { 267 + if (amdgpu_sriov_vf(adev) && adev->asic_type != CHIP_ARCTURUS) { 273 268 /* 274 269 * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are 275 270 * VF copy registers so vbios post doesn't program them, for ··· 285 280 gfxhub_v1_0_init_gart_aperture_regs(adev); 286 281 gfxhub_v1_0_init_system_aperture_regs(adev); 287 282 gfxhub_v1_0_init_tlb_regs(adev); 288 - gfxhub_v1_0_init_cache_regs(adev); 283 + if (!amdgpu_sriov_vf(adev)) 284 + gfxhub_v1_0_init_cache_regs(adev); 289 285 290 286 gfxhub_v1_0_enable_system_domain(adev); 291 - gfxhub_v1_0_disable_identity_aperture(adev); 287 + if (!amdgpu_sriov_vf(adev)) 288 + gfxhub_v1_0_disable_identity_aperture(adev); 292 289 gfxhub_v1_0_setup_vmid_config(adev); 293 290 gfxhub_v1_0_program_invalidation(adev); 294 291
+4 -15
drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
··· 564 564 static int gmc_v10_0_late_init(void *handle) 565 565 { 566 566 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 567 - unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 }; 568 - unsigned i; 567 + int r; 569 568 570 - for(i = 0; i < adev->num_rings; ++i) { 571 - struct amdgpu_ring *ring = adev->rings[i]; 572 - unsigned vmhub = ring->funcs->vmhub; 573 - 574 - ring->vm_inv_eng = vm_inv_eng[vmhub]++; 575 - dev_info(adev->dev, "ring %u(%s) uses VM inv eng %u on hub %u\n", 576 - ring->idx, ring->name, ring->vm_inv_eng, 577 - ring->funcs->vmhub); 578 - } 579 - 580 - /* Engine 17 is used for GART flushes */ 581 - for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i) 582 - BUG_ON(vm_inv_eng[i] > 17); 569 + r = amdgpu_gmc_allocate_vm_inv_eng(adev); 570 + if (r) 571 + return r; 583 572 584 573 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); 585 574 }
+24 -44
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 207 207 { 208 208 u32 bits, i, tmp, reg; 209 209 210 + /* Devices newer then VEGA10/12 shall have these programming 211 + sequences performed by PSP BL */ 212 + if (adev->asic_type >= CHIP_VEGA20) 213 + return 0; 214 + 210 215 bits = 0x7f; 211 216 212 217 switch (state) { ··· 398 393 adev->gmc.vm_fault.num_types = 1; 399 394 adev->gmc.vm_fault.funcs = &gmc_v9_0_irq_funcs; 400 395 401 - adev->gmc.ecc_irq.num_types = 1; 402 - adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs; 396 + if (!amdgpu_sriov_vf(adev)) { 397 + adev->gmc.ecc_irq.num_types = 1; 398 + adev->gmc.ecc_irq.funcs = &gmc_v9_0_ecc_funcs; 399 + } 403 400 } 404 401 405 402 static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid, ··· 797 790 } 798 791 } 799 792 800 - static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev) 801 - { 802 - struct amdgpu_ring *ring; 803 - unsigned vm_inv_engs[AMDGPU_MAX_VMHUBS] = 804 - {GFXHUB_FREE_VM_INV_ENGS_BITMAP, MMHUB_FREE_VM_INV_ENGS_BITMAP, 805 - GFXHUB_FREE_VM_INV_ENGS_BITMAP}; 806 - unsigned i; 807 - unsigned vmhub, inv_eng; 808 - 809 - for (i = 0; i < adev->num_rings; ++i) { 810 - ring = adev->rings[i]; 811 - vmhub = ring->funcs->vmhub; 812 - 813 - inv_eng = ffs(vm_inv_engs[vmhub]); 814 - if (!inv_eng) { 815 - dev_err(adev->dev, "no VM inv eng for ring %s\n", 816 - ring->name); 817 - return -EINVAL; 818 - } 819 - 820 - ring->vm_inv_eng = inv_eng - 1; 821 - vm_inv_engs[vmhub] &= ~(1 << ring->vm_inv_eng); 822 - 823 - dev_info(adev->dev, "ring %s uses VM inv eng %u on hub %u\n", 824 - ring->name, ring->vm_inv_eng, ring->funcs->vmhub); 825 - } 826 - 827 - return 0; 828 - } 829 - 830 793 static int gmc_v9_0_late_init(void *handle) 831 794 { 832 795 struct amdgpu_device *adev = (struct amdgpu_device *)handle; ··· 805 828 if (!gmc_v9_0_keep_stolen_memory(adev)) 806 829 amdgpu_bo_late_init(adev); 807 830 808 - r = gmc_v9_0_allocate_vm_inv_eng(adev); 831 + r = amdgpu_gmc_allocate_vm_inv_eng(adev); 809 832 if (r) 810 833 return r; 811 834 /* Check if ecc is available */ ··· 1089 1112 if (r) 1090 1113 return r; 1091 1114 1092 - /* interrupt sent to DF. */ 1093 - r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0, 1094 - &adev->gmc.ecc_irq); 1095 - if (r) 1096 - return r; 1115 + if (!amdgpu_sriov_vf(adev)) { 1116 + /* interrupt sent to DF. */ 1117 + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DF, 0, 1118 + &adev->gmc.ecc_irq); 1119 + if (r) 1120 + return r; 1121 + } 1097 1122 1098 1123 /* Set the internal MC address mask 1099 1124 * This is the max address of the GPU's ··· 1281 1302 else 1282 1303 value = true; 1283 1304 1284 - gfxhub_v1_0_set_fault_enable_default(adev, value); 1285 - if (adev->asic_type == CHIP_ARCTURUS) 1286 - mmhub_v9_4_set_fault_enable_default(adev, value); 1287 - else 1288 - mmhub_v1_0_set_fault_enable_default(adev, value); 1289 - 1305 + if (!amdgpu_sriov_vf(adev)) { 1306 + gfxhub_v1_0_set_fault_enable_default(adev, value); 1307 + if (adev->asic_type == CHIP_ARCTURUS) 1308 + mmhub_v9_4_set_fault_enable_default(adev, value); 1309 + else 1310 + mmhub_v1_0_set_fault_enable_default(adev, value); 1311 + } 1290 1312 for (i = 0; i < adev->num_vmhubs; ++i) 1291 1313 gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0); 1292 1314
-10
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.h
··· 24 24 #ifndef __GMC_V9_0_H__ 25 25 #define __GMC_V9_0_H__ 26 26 27 - /* 28 - * The latest engine allocation on gfx9 is: 29 - * Engine 2, 3: firmware 30 - * Engine 0, 1, 4~16: amdgpu ring, 31 - * subject to change when ring number changes 32 - * Engine 17: Gart flushes 33 - */ 34 - #define GFXHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 35 - #define MMHUB_FREE_VM_INV_ENGS_BITMAP 0x1FFF3 36 - 37 27 extern const struct amd_ip_funcs gmc_v9_0_ip_funcs; 38 28 extern const struct amdgpu_ip_block_version gmc_v9_0_ip_block; 39 29 #endif
+2 -1
drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
··· 25 25 #include "amdgpu_jpeg.h" 26 26 #include "soc15.h" 27 27 #include "soc15d.h" 28 + #include "vcn_v1_0.h" 28 29 29 30 #include "vcn/vcn_1_0_offset.h" 30 31 #include "vcn/vcn_1_0_sh_mask.h" ··· 562 561 .insert_start = jpeg_v1_0_decode_ring_insert_start, 563 562 .insert_end = jpeg_v1_0_decode_ring_insert_end, 564 563 .pad_ib = amdgpu_ring_generic_pad_ib, 565 - .begin_use = amdgpu_vcn_ring_begin_use, 564 + .begin_use = vcn_v1_0_ring_begin_use, 566 565 .end_use = amdgpu_vcn_ring_end_use, 567 566 .emit_wreg = jpeg_v1_0_decode_ring_emit_wreg, 568 567 .emit_reg_wait = jpeg_v1_0_decode_ring_emit_reg_wait,
+42 -48
drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
··· 128 128 hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 129 129 adev->gmc.agp_start >> 24); 130 130 131 - /* Program the system aperture low logical page number. */ 132 - WREG32_SOC15_OFFSET(MMHUB, 0, 133 - mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR, 134 - hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 135 - min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); 136 - WREG32_SOC15_OFFSET(MMHUB, 0, 137 - mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 138 - hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 139 - max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); 131 + if (!amdgpu_sriov_vf(adev)) { 132 + /* Program the system aperture low logical page number. */ 133 + WREG32_SOC15_OFFSET( 134 + MMHUB, 0, mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_LOW_ADDR, 135 + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 136 + min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); 137 + WREG32_SOC15_OFFSET( 138 + MMHUB, 0, mmVMSHAREDVC0_MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 139 + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 140 + max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); 140 141 141 - /* Set default page address. */ 142 - value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + 143 - adev->vm_manager.vram_base_offset; 144 - WREG32_SOC15_OFFSET(MMHUB, 0, 142 + /* Set default page address. */ 143 + value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start + 144 + adev->vm_manager.vram_base_offset; 145 + WREG32_SOC15_OFFSET( 146 + MMHUB, 0, 145 147 mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, 146 148 hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 147 149 (u32)(value >> 12)); 148 - WREG32_SOC15_OFFSET(MMHUB, 0, 150 + WREG32_SOC15_OFFSET( 151 + MMHUB, 0, 149 152 mmVMSHAREDPF0_MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, 150 153 hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 151 154 (u32)(value >> 44)); 152 155 153 - /* Program "protection fault". */ 154 - WREG32_SOC15_OFFSET(MMHUB, 0, 155 - mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, 156 - hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 157 - (u32)(adev->dummy_page_addr >> 12)); 158 - WREG32_SOC15_OFFSET(MMHUB, 0, 159 - mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, 160 - hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 161 - (u32)((u64)adev->dummy_page_addr >> 44)); 156 + /* Program "protection fault". */ 157 + WREG32_SOC15_OFFSET( 158 + MMHUB, 0, 159 + mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, 160 + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 161 + (u32)(adev->dummy_page_addr >> 12)); 162 + WREG32_SOC15_OFFSET( 163 + MMHUB, 0, 164 + mmVML2PF0_VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, 165 + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 166 + (u32)((u64)adev->dummy_page_addr >> 44)); 162 167 163 - tmp = RREG32_SOC15_OFFSET(MMHUB, 0, 164 - mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2, 165 - hubid * MMHUB_INSTANCE_REGISTER_OFFSET); 166 - tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2, 167 - ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); 168 - WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2, 169 - hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); 168 + tmp = RREG32_SOC15_OFFSET( 169 + MMHUB, 0, mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2, 170 + hubid * MMHUB_INSTANCE_REGISTER_OFFSET); 171 + tmp = REG_SET_FIELD(tmp, VML2PF0_VM_L2_PROTECTION_FAULT_CNTL2, 172 + ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); 173 + WREG32_SOC15_OFFSET(MMHUB, 0, 174 + mmVML2PF0_VM_L2_PROTECTION_FAULT_CNTL2, 175 + hubid * MMHUB_INSTANCE_REGISTER_OFFSET, 176 + tmp); 177 + } 170 178 } 171 179 172 180 static void mmhub_v9_4_init_tlb_regs(struct amdgpu_device *adev, int hubid) ··· 376 368 int i; 377 369 378 370 for (i = 0; i < MMHUB_NUM_INSTANCES; i++) { 379 - if (amdgpu_sriov_vf(adev)) { 380 - /* 381 - * MC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase 382 - * they are VF copy registers so vbios post doesn't 383 - * program them, for SRIOV driver need to program them 384 - */ 385 - WREG32_SOC15_OFFSET(MMHUB, 0, 386 - mmVMSHAREDVC0_MC_VM_FB_LOCATION_BASE, 387 - i * MMHUB_INSTANCE_REGISTER_OFFSET, 388 - adev->gmc.vram_start >> 24); 389 - WREG32_SOC15_OFFSET(MMHUB, 0, 390 - mmVMSHAREDVC0_MC_VM_FB_LOCATION_TOP, 391 - i * MMHUB_INSTANCE_REGISTER_OFFSET, 392 - adev->gmc.vram_end >> 24); 393 - } 394 - 395 371 /* GART Enable. */ 396 372 mmhub_v9_4_init_gart_aperture_regs(adev, i); 397 373 mmhub_v9_4_init_system_aperture_regs(adev, i); 398 374 mmhub_v9_4_init_tlb_regs(adev, i); 399 - mmhub_v9_4_init_cache_regs(adev, i); 375 + if (!amdgpu_sriov_vf(adev)) 376 + mmhub_v9_4_init_cache_regs(adev, i); 400 377 401 378 mmhub_v9_4_enable_system_domain(adev, i); 402 - mmhub_v9_4_disable_identity_aperture(adev, i); 379 + if (!amdgpu_sriov_vf(adev)) 380 + mmhub_v9_4_disable_identity_aperture(adev, i); 403 381 mmhub_v9_4_setup_vmid_config(adev, i); 404 382 mmhub_v9_4_program_invalidation(adev, i); 405 383 }
+12
drivers/gpu/drm/amd/amdgpu/mmsch_v1_0.h
··· 47 47 uint32_t uvd_table_size; 48 48 }; 49 49 50 + struct mmsch_vf_eng_init_header { 51 + uint32_t init_status; 52 + uint32_t table_offset; 53 + uint32_t table_size; 54 + }; 55 + 56 + struct mmsch_v1_1_init_header { 57 + uint32_t version; 58 + uint32_t total_size; 59 + struct mmsch_vf_eng_init_header eng[2]; 60 + }; 61 + 50 62 struct mmsch_v1_0_cmd_direct_reg_header { 51 63 uint32_t reg_offset : 28; 52 64 uint32_t command_type : 4;
+2 -2
drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
··· 250 250 */ 251 251 locked = mutex_trylock(&adev->lock_reset); 252 252 if (locked) 253 - adev->in_gpu_reset = 1; 253 + adev->in_gpu_reset = true; 254 254 255 255 do { 256 256 if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) ··· 262 262 263 263 flr_done: 264 264 if (locked) { 265 - adev->in_gpu_reset = 0; 265 + adev->in_gpu_reset = false; 266 266 mutex_unlock(&adev->lock_reset); 267 267 } 268 268
+7 -3
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
··· 252 252 */ 253 253 locked = mutex_trylock(&adev->lock_reset); 254 254 if (locked) 255 - adev->in_gpu_reset = 1; 255 + adev->in_gpu_reset = true; 256 256 257 257 do { 258 258 if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) ··· 264 264 265 265 flr_done: 266 266 if (locked) { 267 - adev->in_gpu_reset = 0; 267 + adev->in_gpu_reset = false; 268 268 mutex_unlock(&adev->lock_reset); 269 269 } 270 270 271 271 /* Trigger recovery for world switch failure if no TDR */ 272 - if (amdgpu_device_should_recover_gpu(adev)) 272 + if (amdgpu_device_should_recover_gpu(adev) 273 + && (adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT || 274 + adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT || 275 + adev->compute_timeout == MAX_SCHEDULE_TIMEOUT || 276 + adev->video_timeout == MAX_SCHEDULE_TIMEOUT)) 273 277 amdgpu_device_gpu_recover(adev, NULL); 274 278 } 275 279
+1 -2
drivers/gpu/drm/amd/amdgpu/navi10_ih.c
··· 110 110 static int navi10_ih_irq_init(struct amdgpu_device *adev) 111 111 { 112 112 struct amdgpu_ih_ring *ih = &adev->irq.ih; 113 - int ret = 0; 114 113 u32 ih_rb_cntl, ih_doorbell_rtpr, ih_chicken; 115 114 u32 tmp; 116 115 ··· 178 179 /* enable interrupts */ 179 180 navi10_ih_enable_interrupts(adev); 180 181 181 - return ret; 182 + return 0; 182 183 } 183 184 184 185 /**
+3 -7
drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
··· 339 339 /* ras_controller_int is dedicated for nbif ras error, 340 340 * not the global interrupt for sync flood 341 341 */ 342 - amdgpu_ras_reset_gpu(adev, true); 342 + amdgpu_ras_reset_gpu(adev); 343 343 } 344 344 } 345 345 ··· 456 456 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 457 457 NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT, 458 458 &adev->nbio.ras_controller_irq); 459 - if (r) 460 - return r; 461 459 462 - return 0; 460 + return r; 463 461 } 464 462 465 463 static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev) ··· 474 476 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 475 477 NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT, 476 478 &adev->nbio.ras_err_event_athub_irq); 477 - if (r) 478 - return r; 479 479 480 - return 0; 480 + return r; 481 481 } 482 482 483 483 #define smnPARITY_ERROR_STATUS_UNCORR_GRP2 0x13a20030
+1
drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
··· 242 242 GFX_FW_TYPE_SDMA5 = 55, /* SDMA5 MI */ 243 243 GFX_FW_TYPE_SDMA6 = 56, /* SDMA6 MI */ 244 244 GFX_FW_TYPE_SDMA7 = 57, /* SDMA7 MI */ 245 + GFX_FW_TYPE_VCN1 = 58, /* VCN1 MI */ 245 246 GFX_FW_TYPE_MAX 246 247 }; 247 248
+28 -14
drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
··· 233 233 return err; 234 234 } 235 235 236 + int psp_v11_0_wait_for_bootloader(struct psp_context *psp) 237 + { 238 + struct amdgpu_device *adev = psp->adev; 239 + 240 + int ret; 241 + int retry_loop; 242 + 243 + for (retry_loop = 0; retry_loop < 10; retry_loop++) { 244 + /* Wait for bootloader to signify that is 245 + ready having bit 31 of C2PMSG_35 set to 1 */ 246 + ret = psp_wait_for(psp, 247 + SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), 248 + 0x80000000, 249 + 0x80000000, 250 + false); 251 + 252 + if (ret == 0) 253 + return 0; 254 + } 255 + 256 + return ret; 257 + } 258 + 236 259 static bool psp_v11_0_is_sos_alive(struct psp_context *psp) 237 260 { 238 261 struct amdgpu_device *adev = psp->adev; ··· 281 258 return 0; 282 259 } 283 260 284 - /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ 285 - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), 286 - 0x80000000, 0x80000000, false); 261 + ret = psp_v11_0_wait_for_bootloader(psp); 287 262 if (ret) 288 263 return ret; 289 264 ··· 297 276 WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 298 277 psp_gfxdrv_command_reg); 299 278 300 - /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1*/ 301 - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), 302 - 0x80000000, 0x80000000, false); 279 + ret = psp_v11_0_wait_for_bootloader(psp); 303 280 304 281 return ret; 305 282 } ··· 317 298 return 0; 318 299 } 319 300 320 - /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ 321 - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), 322 - 0x80000000, 0x80000000, false); 301 + ret = psp_v11_0_wait_for_bootloader(psp); 323 302 if (ret) 324 303 return ret; 325 304 ··· 336 319 /* there might be handshake issue with hardware which needs delay */ 337 320 mdelay(20); 338 321 339 - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), 340 - 0x80000000, 0x80000000, false); 322 + ret = psp_v11_0_wait_for_bootloader(psp); 341 323 342 324 return ret; 343 325 } ··· 353 337 if (psp_v11_0_is_sos_alive(psp)) 354 338 return 0; 355 339 356 - /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ 357 - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), 358 - 0x80000000, 0x80000000, false); 340 + ret = psp_v11_0_wait_for_bootloader(psp); 359 341 if (ret) 360 342 return ret; 361 343
+5 -7
drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
··· 255 255 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 256 256 257 257 /* IB packet must end on a 8 DW boundary */ 258 - sdma_v2_4_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); 258 + sdma_v2_4_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); 259 259 260 260 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | 261 261 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); ··· 750 750 u32 pad_count; 751 751 int i; 752 752 753 - pad_count = (8 - (ib->length_dw & 0x7)) % 8; 753 + pad_count = (-ib->length_dw) & 7; 754 754 for (i = 0; i < pad_count; i++) 755 755 if (sdma && sdma->burst_nop && (i == 0)) 756 756 ib->ptr[ib->length_dw++] = ··· 1260 1260 1261 1261 static void sdma_v2_4_set_vm_pte_funcs(struct amdgpu_device *adev) 1262 1262 { 1263 - struct drm_gpu_scheduler *sched; 1264 1263 unsigned i; 1265 1264 1266 1265 adev->vm_manager.vm_pte_funcs = &sdma_v2_4_vm_pte_funcs; 1267 1266 for (i = 0; i < adev->sdma.num_instances; i++) { 1268 - sched = &adev->sdma.instance[i].ring.sched; 1269 - adev->vm_manager.vm_pte_rqs[i] = 1270 - &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; 1267 + adev->vm_manager.vm_pte_scheds[i] = 1268 + &adev->sdma.instance[i].ring.sched; 1271 1269 } 1272 - adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; 1270 + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; 1273 1271 } 1274 1272 1275 1273 const struct amdgpu_ip_block_version sdma_v2_4_ip_block =
+5 -7
drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
··· 429 429 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 430 430 431 431 /* IB packet must end on a 8 DW boundary */ 432 - sdma_v3_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); 432 + sdma_v3_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); 433 433 434 434 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | 435 435 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); ··· 1021 1021 u32 pad_count; 1022 1022 int i; 1023 1023 1024 - pad_count = (8 - (ib->length_dw & 0x7)) % 8; 1024 + pad_count = (-ib->length_dw) & 7; 1025 1025 for (i = 0; i < pad_count; i++) 1026 1026 if (sdma && sdma->burst_nop && (i == 0)) 1027 1027 ib->ptr[ib->length_dw++] = ··· 1698 1698 1699 1699 static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device *adev) 1700 1700 { 1701 - struct drm_gpu_scheduler *sched; 1702 1701 unsigned i; 1703 1702 1704 1703 adev->vm_manager.vm_pte_funcs = &sdma_v3_0_vm_pte_funcs; 1705 1704 for (i = 0; i < adev->sdma.num_instances; i++) { 1706 - sched = &adev->sdma.instance[i].ring.sched; 1707 - adev->vm_manager.vm_pte_rqs[i] = 1708 - &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; 1705 + adev->vm_manager.vm_pte_scheds[i] = 1706 + &adev->sdma.instance[i].ring.sched; 1709 1707 } 1710 - adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; 1708 + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; 1711 1709 } 1712 1710 1713 1711 const struct amdgpu_ip_block_version sdma_v3_0_ip_block =
+4 -5
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
··· 698 698 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 699 699 700 700 /* IB packet must end on a 8 DW boundary */ 701 - sdma_v4_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); 701 + sdma_v4_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); 702 702 703 703 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | 704 704 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); ··· 1579 1579 u32 pad_count; 1580 1580 int i; 1581 1581 1582 - pad_count = (8 - (ib->length_dw & 0x7)) % 8; 1582 + pad_count = (-ib->length_dw) & 7; 1583 1583 for (i = 0; i < pad_count; i++) 1584 1584 if (sdma && sdma->burst_nop && (i == 0)) 1585 1585 ib->ptr[ib->length_dw++] = ··· 2409 2409 sched = &adev->sdma.instance[i].page.sched; 2410 2410 else 2411 2411 sched = &adev->sdma.instance[i].ring.sched; 2412 - adev->vm_manager.vm_pte_rqs[i] = 2413 - &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; 2412 + adev->vm_manager.vm_pte_scheds[i] = sched; 2414 2413 } 2415 - adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; 2414 + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; 2416 2415 } 2417 2416 2418 2417 const struct amdgpu_ip_block_version sdma_v4_0_ip_block = {
+20 -25
drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
··· 382 382 unsigned vmid = AMDGPU_JOB_GET_VMID(job); 383 383 uint64_t csa_mc_addr = amdgpu_sdma_get_csa_mc_addr(ring, vmid); 384 384 385 - /* IB packet must end on a 8 DW boundary */ 386 - sdma_v5_0_ring_insert_nop(ring, (10 - (lower_32_bits(ring->wptr) & 7)) % 8); 385 + /* An IB packet must end on a 8 DW boundary--the next dword 386 + * must be on a 8-dword boundary. Our IB packet below is 6 387 + * dwords long, thus add x number of NOPs, such that, in 388 + * modular arithmetic, 389 + * wptr + 6 + x = 8k, k >= 0, which in C is, 390 + * (wptr + 6 + x) % 8 = 0. 391 + * The expression below, is a solution of x. 392 + */ 393 + sdma_v5_0_ring_insert_nop(ring, (2 - lower_32_bits(ring->wptr)) & 7); 387 394 388 395 amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT) | 389 396 SDMA_PKT_INDIRECT_HEADER_VMID(vmid & 0xf)); ··· 914 907 udelay(1); 915 908 } 916 909 917 - if (i < adev->usec_timeout) { 918 - if (amdgpu_emu_mode == 1) 919 - DRM_INFO("ring test on %d succeeded in %d msecs\n", ring->idx, i); 920 - else 921 - DRM_INFO("ring test on %d succeeded in %d usecs\n", ring->idx, i); 922 - } else { 923 - DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n", 924 - ring->idx, tmp); 925 - r = -EINVAL; 926 - } 910 + if (i >= adev->usec_timeout) 911 + r = -ETIMEDOUT; 912 + 927 913 amdgpu_device_wb_free(adev, index); 928 914 929 915 return r; ··· 981 981 goto err1; 982 982 } 983 983 tmp = le32_to_cpu(adev->wb.wb[index]); 984 - if (tmp == 0xDEADBEEF) { 985 - DRM_INFO("ib test on ring %d succeeded\n", ring->idx); 984 + if (tmp == 0xDEADBEEF) 986 985 r = 0; 987 - } else { 988 - DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp); 986 + else 989 987 r = -EINVAL; 990 - } 991 988 992 989 err1: 993 990 amdgpu_ib_free(adev, &ib, NULL); ··· 1083 1086 } 1084 1087 1085 1088 /** 1086 - * sdma_v5_0_ring_pad_ib - pad the IB to the required number of dw 1087 - * 1089 + * sdma_v5_0_ring_pad_ib - pad the IB 1088 1090 * @ib: indirect buffer to fill with padding 1089 1091 * 1092 + * Pad the IB with NOPs to a boundary multiple of 8. 1090 1093 */ 1091 1094 static void sdma_v5_0_ring_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib) 1092 1095 { ··· 1094 1097 u32 pad_count; 1095 1098 int i; 1096 1099 1097 - pad_count = (8 - (ib->length_dw & 0x7)) % 8; 1100 + pad_count = (-ib->length_dw) & 0x7; 1098 1101 for (i = 0; i < pad_count; i++) 1099 1102 if (sdma && sdma->burst_nop && (i == 0)) 1100 1103 ib->ptr[ib->length_dw++] = ··· 1718 1721 1719 1722 static void sdma_v5_0_set_vm_pte_funcs(struct amdgpu_device *adev) 1720 1723 { 1721 - struct drm_gpu_scheduler *sched; 1722 1724 unsigned i; 1723 1725 1724 1726 if (adev->vm_manager.vm_pte_funcs == NULL) { 1725 1727 adev->vm_manager.vm_pte_funcs = &sdma_v5_0_vm_pte_funcs; 1726 1728 for (i = 0; i < adev->sdma.num_instances; i++) { 1727 - sched = &adev->sdma.instance[i].ring.sched; 1728 - adev->vm_manager.vm_pte_rqs[i] = 1729 - &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; 1729 + adev->vm_manager.vm_pte_scheds[i] = 1730 + &adev->sdma.instance[i].ring.sched; 1730 1731 } 1731 - adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; 1732 + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; 1732 1733 } 1733 1734 } 1734 1735
+3 -5
drivers/gpu/drm/amd/amdgpu/si_dma.c
··· 834 834 835 835 static void si_dma_set_vm_pte_funcs(struct amdgpu_device *adev) 836 836 { 837 - struct drm_gpu_scheduler *sched; 838 837 unsigned i; 839 838 840 839 adev->vm_manager.vm_pte_funcs = &si_dma_vm_pte_funcs; 841 840 for (i = 0; i < adev->sdma.num_instances; i++) { 842 - sched = &adev->sdma.instance[i].ring.sched; 843 - adev->vm_manager.vm_pte_rqs[i] = 844 - &sched->sched_rq[DRM_SCHED_PRIORITY_KERNEL]; 841 + adev->vm_manager.vm_pte_scheds[i] = 842 + &adev->sdma.instance[i].ring.sched; 845 843 } 846 - adev->vm_manager.vm_pte_num_rqs = adev->sdma.num_instances; 844 + adev->vm_manager.vm_pte_num_scheds = adev->sdma.num_instances; 847 845 } 848 846 849 847 const struct amdgpu_ip_block_version si_dma_ip_block =
+9 -4
drivers/gpu/drm/amd/amdgpu/soc15.c
··· 613 613 switch (adev->asic_type) { 614 614 case CHIP_VEGA10: 615 615 case CHIP_VEGA12: 616 + case CHIP_ARCTURUS: 616 617 soc15_asic_get_baco_capability(adev, &baco_support); 617 618 break; 618 619 case CHIP_VEGA20: ··· 828 827 amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); 829 828 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); 830 829 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); 831 - if (!amdgpu_sriov_vf(adev)) 832 - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 830 + amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); 833 831 834 - if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) 835 - amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); 832 + if (amdgpu_sriov_vf(adev)) { 833 + if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) 834 + amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); 835 + } else { 836 + if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) 837 + amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); 838 + } 836 839 if (!amdgpu_sriov_vf(adev)) 837 840 amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block); 838 841 break;
+82 -81
drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
··· 28 28 #include "rsmu/rsmu_0_0_2_sh_mask.h" 29 29 #include "umc/umc_6_1_1_offset.h" 30 30 #include "umc/umc_6_1_1_sh_mask.h" 31 + #include "umc/umc_6_1_2_offset.h" 31 32 32 33 #define smnMCA_UMC0_MCUMC_ADDRT0 0x50f10 33 34 34 - /* UMC 6_1_2 register offsets */ 35 - #define mmUMCCH0_0_EccErrCntSel_ARCT 0x0360 36 - #define mmUMCCH0_0_EccErrCntSel_ARCT_BASE_IDX 1 37 - #define mmUMCCH0_0_EccErrCnt_ARCT 0x0361 38 - #define mmUMCCH0_0_EccErrCnt_ARCT_BASE_IDX 1 39 - #define mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT 0x03c2 40 - #define mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT_BASE_IDX 1 35 + #define UMC_6_INST_DIST 0x40000 41 36 42 37 /* 43 38 * (addr / 256) * 8192, the higher 26 bits in ErrorAddr 44 39 * is the index of 8KB block 45 40 */ 46 - #define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5) 41 + #define ADDR_OF_8KB_BLOCK(addr) (((addr) & ~0xffULL) << 5) 47 42 /* channel index is the index of 256B block */ 48 43 #define ADDR_OF_256B_BLOCK(channel_index) ((channel_index) << 8) 49 44 /* offset in 256B block */ 50 45 #define OFFSET_IN_256B_BLOCK(addr) ((addr) & 0xffULL) 46 + 47 + #define LOOP_UMC_INST(umc_inst) for ((umc_inst) = 0; (umc_inst) < adev->umc.umc_inst_num; (umc_inst)++) 48 + #define LOOP_UMC_CH_INST(ch_inst) for ((ch_inst) = 0; (ch_inst) < adev->umc.channel_inst_num; (ch_inst)++) 49 + #define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst)) 51 50 52 51 const uint32_t 53 52 umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM] = { ··· 56 57 {9, 25, 0, 16}, {15, 31, 6, 22} 57 58 }; 58 59 59 - static void umc_v6_1_enable_umc_index_mode(struct amdgpu_device *adev, 60 - uint32_t umc_instance) 61 - { 62 - uint32_t rsmu_umc_index; 63 - 64 - rsmu_umc_index = RREG32_SOC15(RSMU, 0, 65 - mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU); 66 - rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index, 67 - RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, 68 - RSMU_UMC_INDEX_MODE_EN, 1); 69 - rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index, 70 - RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, 71 - RSMU_UMC_INDEX_INSTANCE, umc_instance); 72 - rsmu_umc_index = REG_SET_FIELD(rsmu_umc_index, 73 - RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, 74 - RSMU_UMC_INDEX_WREN, 1 << umc_instance); 75 - WREG32_SOC15(RSMU, 0, mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, 76 - rsmu_umc_index); 77 - } 78 - 79 60 static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev) 80 61 { 81 62 WREG32_FIELD15(RSMU, 0, RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, 82 63 RSMU_UMC_INDEX_MODE_EN, 0); 83 64 } 84 65 85 - static uint32_t umc_v6_1_get_umc_inst(struct amdgpu_device *adev) 66 + static inline uint32_t get_umc_6_reg_offset(struct amdgpu_device *adev, 67 + uint32_t umc_inst, 68 + uint32_t ch_inst) 86 69 { 87 - uint32_t rsmu_umc_index; 88 - 89 - rsmu_umc_index = RREG32_SOC15(RSMU, 0, 90 - mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU); 91 - return REG_GET_FIELD(rsmu_umc_index, 92 - RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, 93 - RSMU_UMC_INDEX_INSTANCE); 70 + return adev->umc.channel_offs*ch_inst + UMC_6_INST_DIST*umc_inst; 94 71 } 95 72 96 73 static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev, ··· 80 105 81 106 if (adev->asic_type == CHIP_ARCTURUS) { 82 107 /* UMC 6_1_2 registers */ 83 - 84 108 ecc_err_cnt_sel_addr = 85 109 SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT); 86 110 ecc_err_cnt_addr = ··· 88 114 SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT); 89 115 } else { 90 116 /* UMC 6_1_1 registers */ 91 - 92 117 ecc_err_cnt_sel_addr = 93 118 SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel); 94 119 ecc_err_cnt_addr = ··· 97 124 } 98 125 99 126 /* select the lower chip and check the error count */ 100 - ecc_err_cnt_sel = RREG32(ecc_err_cnt_sel_addr + umc_reg_offset); 127 + ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4); 101 128 ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, 102 129 EccErrCntCsSel, 0); 103 - WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel); 104 - ecc_err_cnt = RREG32(ecc_err_cnt_addr + umc_reg_offset); 130 + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel); 131 + ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4); 105 132 *error_count += 106 133 (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) - 107 134 UMC_V6_1_CE_CNT_INIT); 108 135 /* clear the lower chip err count */ 109 - WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT); 136 + WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT); 110 137 111 138 /* select the higher chip and check the err counter */ 112 139 ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, 113 140 EccErrCntCsSel, 1); 114 - WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel); 115 - ecc_err_cnt = RREG32(ecc_err_cnt_addr + umc_reg_offset); 141 + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel); 142 + ecc_err_cnt = RREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4); 116 143 *error_count += 117 144 (REG_GET_FIELD(ecc_err_cnt, UMCCH0_0_EccErrCnt, EccErrCnt) - 118 145 UMC_V6_1_CE_CNT_INIT); 119 146 /* clear the higher chip err count */ 120 - WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT); 147 + WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT); 121 148 122 149 /* check for SRAM correctable error 123 150 MCUMC_STATUS is a 64 bit register */ 124 - mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset); 151 + mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); 125 152 if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, ErrorCodeExt) == 6 && 126 153 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && 127 154 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) ··· 137 164 138 165 if (adev->asic_type == CHIP_ARCTURUS) { 139 166 /* UMC 6_1_2 registers */ 140 - 141 167 mc_umc_status_addr = 142 168 SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT); 143 169 } else { 144 170 /* UMC 6_1_1 registers */ 145 - 146 171 mc_umc_status_addr = 147 172 SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); 148 173 } 149 174 150 175 /* check the MCUMC_STATUS */ 151 - mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset); 176 + mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); 152 177 if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && 153 178 (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || 154 179 REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || ··· 156 185 *error_count += 1; 157 186 } 158 187 159 - static void umc_v6_1_query_error_count(struct amdgpu_device *adev, 160 - struct ras_err_data *err_data, uint32_t umc_reg_offset, 161 - uint32_t channel_index) 162 - { 163 - umc_v6_1_query_correctable_error_count(adev, umc_reg_offset, 164 - &(err_data->ce_count)); 165 - umc_v6_1_querry_uncorrectable_error_count(adev, umc_reg_offset, 166 - &(err_data->ue_count)); 167 - } 168 - 169 188 static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev, 170 189 void *ras_error_status) 171 190 { 172 - amdgpu_umc_for_each_channel(umc_v6_1_query_error_count); 191 + struct ras_err_data* err_data = (struct ras_err_data*)ras_error_status; 192 + 193 + uint32_t umc_inst = 0; 194 + uint32_t ch_inst = 0; 195 + uint32_t umc_reg_offset = 0; 196 + 197 + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { 198 + umc_reg_offset = get_umc_6_reg_offset(adev, 199 + umc_inst, 200 + ch_inst); 201 + 202 + umc_v6_1_query_correctable_error_count(adev, 203 + umc_reg_offset, 204 + &(err_data->ce_count)); 205 + umc_v6_1_querry_uncorrectable_error_count(adev, 206 + umc_reg_offset, 207 + &(err_data->ue_count)); 208 + } 173 209 } 174 210 175 211 static void umc_v6_1_query_error_address(struct amdgpu_device *adev, 176 212 struct ras_err_data *err_data, 177 - uint32_t umc_reg_offset, uint32_t channel_index) 213 + uint32_t umc_reg_offset, 214 + uint32_t ch_inst, 215 + uint32_t umc_inst) 178 216 { 179 217 uint32_t lsb, mc_umc_status_addr; 180 218 uint64_t mc_umc_status, err_addr, retired_page; 181 219 struct eeprom_table_record *err_rec; 220 + uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; 182 221 183 222 if (adev->asic_type == CHIP_ARCTURUS) { 184 223 /* UMC 6_1_2 registers */ 185 - 186 224 mc_umc_status_addr = 187 225 SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT); 188 226 } else { 189 227 /* UMC 6_1_1 registers */ 190 - 191 228 mc_umc_status_addr = 192 229 SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); 193 230 } ··· 203 224 /* skip error address process if -ENOMEM */ 204 225 if (!err_data->err_addr) { 205 226 /* clear umc status */ 206 - WREG64_UMC(mc_umc_status_addr + umc_reg_offset, 0x0ULL); 227 + WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); 207 228 return; 208 229 } 209 230 210 231 err_rec = &err_data->err_addr[err_data->err_addr_cnt]; 211 - mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset); 232 + mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4); 212 233 213 234 /* calculate error address if ue/ce error is detected */ 214 235 if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && ··· 236 257 err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; 237 258 err_rec->cu = 0; 238 259 err_rec->mem_channel = channel_index; 239 - err_rec->mcumc_id = umc_v6_1_get_umc_inst(adev); 260 + err_rec->mcumc_id = umc_inst; 240 261 241 262 err_data->err_addr_cnt++; 242 263 } 243 264 } 244 265 245 266 /* clear umc status */ 246 - WREG64_UMC(mc_umc_status_addr + umc_reg_offset, 0x0ULL); 267 + WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL); 247 268 } 248 269 249 270 static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev, 250 271 void *ras_error_status) 251 272 { 252 - amdgpu_umc_for_each_channel(umc_v6_1_query_error_address); 273 + struct ras_err_data* err_data = (struct ras_err_data*)ras_error_status; 274 + 275 + uint32_t umc_inst = 0; 276 + uint32_t ch_inst = 0; 277 + uint32_t umc_reg_offset = 0; 278 + 279 + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { 280 + umc_reg_offset = get_umc_6_reg_offset(adev, 281 + umc_inst, 282 + ch_inst); 283 + 284 + umc_v6_1_query_error_address(adev, 285 + err_data, 286 + umc_reg_offset, 287 + ch_inst, 288 + umc_inst); 289 + } 290 + 253 291 } 254 292 255 293 static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev, 256 - struct ras_err_data *err_data, 257 - uint32_t umc_reg_offset, uint32_t channel_index) 294 + uint32_t umc_reg_offset) 258 295 { 259 296 uint32_t ecc_err_cnt_sel, ecc_err_cnt_sel_addr; 260 297 uint32_t ecc_err_cnt_addr; 261 298 262 299 if (adev->asic_type == CHIP_ARCTURUS) { 263 300 /* UMC 6_1_2 registers */ 264 - 265 301 ecc_err_cnt_sel_addr = 266 302 SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel_ARCT); 267 303 ecc_err_cnt_addr = 268 304 SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCnt_ARCT); 269 305 } else { 270 306 /* UMC 6_1_1 registers */ 271 - 272 307 ecc_err_cnt_sel_addr = 273 308 SOC15_REG_OFFSET(UMC, 0, mmUMCCH0_0_EccErrCntSel); 274 309 ecc_err_cnt_addr = ··· 290 297 } 291 298 292 299 /* select the lower chip and check the error count */ 293 - ecc_err_cnt_sel = RREG32(ecc_err_cnt_sel_addr + umc_reg_offset); 300 + ecc_err_cnt_sel = RREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4); 294 301 ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, 295 302 EccErrCntCsSel, 0); 296 303 /* set ce error interrupt type to APIC based interrupt */ 297 304 ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, 298 305 EccErrInt, 0x1); 299 - WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel); 306 + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel); 300 307 /* set error count to initial value */ 301 - WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT); 308 + WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT); 302 309 303 310 /* select the higher chip and check the err counter */ 304 311 ecc_err_cnt_sel = REG_SET_FIELD(ecc_err_cnt_sel, UMCCH0_0_EccErrCntSel, 305 312 EccErrCntCsSel, 1); 306 - WREG32(ecc_err_cnt_sel_addr + umc_reg_offset, ecc_err_cnt_sel); 307 - WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT); 313 + WREG32_PCIE((ecc_err_cnt_sel_addr + umc_reg_offset) * 4, ecc_err_cnt_sel); 314 + WREG32_PCIE((ecc_err_cnt_addr + umc_reg_offset) * 4, UMC_V6_1_CE_CNT_INIT); 308 315 } 309 316 310 317 static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev) 311 318 { 312 - void *ras_error_status = NULL; 319 + uint32_t umc_inst = 0; 320 + uint32_t ch_inst = 0; 321 + uint32_t umc_reg_offset = 0; 313 322 314 - amdgpu_umc_for_each_channel(umc_v6_1_err_cnt_init_per_channel); 323 + umc_v6_1_disable_umc_index_mode(adev); 324 + 325 + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { 326 + umc_reg_offset = get_umc_6_reg_offset(adev, 327 + umc_inst, 328 + ch_inst); 329 + 330 + umc_v6_1_err_cnt_init_per_channel(adev, umc_reg_offset); 331 + } 315 332 } 316 333 317 334 const struct amdgpu_umc_funcs umc_v6_1_funcs = { ··· 329 326 .ras_late_init = amdgpu_umc_ras_late_init, 330 327 .query_ras_error_count = umc_v6_1_query_ras_error_count, 331 328 .query_ras_error_address = umc_v6_1_query_ras_error_address, 332 - .enable_umc_index_mode = umc_v6_1_enable_umc_index_mode, 333 - .disable_umc_index_mode = umc_v6_1_disable_umc_index_mode, 334 329 };
+88 -2
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
··· 25 25 26 26 #include "amdgpu.h" 27 27 #include "amdgpu_vcn.h" 28 + #include "amdgpu_pm.h" 28 29 #include "soc15.h" 29 30 #include "soc15d.h" 30 31 #include "soc15_common.h" ··· 51 50 static int vcn_v1_0_set_powergating_state(void *handle, enum amd_powergating_state state); 52 51 static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev, 53 52 struct dpg_pause_state *new_state); 53 + 54 + static void vcn_v1_0_idle_work_handler(struct work_struct *work); 54 55 55 56 /** 56 57 * vcn_v1_0_early_init - set function pointers ··· 107 104 r = amdgpu_vcn_sw_init(adev); 108 105 if (r) 109 106 return r; 107 + 108 + /* Override the work func */ 109 + adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler; 110 110 111 111 if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 112 112 const struct common_firmware_header *hdr; ··· 1764 1758 return ret; 1765 1759 } 1766 1760 1761 + static void vcn_v1_0_idle_work_handler(struct work_struct *work) 1762 + { 1763 + struct amdgpu_device *adev = 1764 + container_of(work, struct amdgpu_device, vcn.idle_work.work); 1765 + unsigned int fences = 0, i; 1766 + 1767 + for (i = 0; i < adev->vcn.num_enc_rings; ++i) 1768 + fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]); 1769 + 1770 + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 1771 + struct dpg_pause_state new_state; 1772 + 1773 + if (fences) 1774 + new_state.fw_based = VCN_DPG_STATE__PAUSE; 1775 + else 1776 + new_state.fw_based = VCN_DPG_STATE__UNPAUSE; 1777 + 1778 + if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec)) 1779 + new_state.jpeg = VCN_DPG_STATE__PAUSE; 1780 + else 1781 + new_state.jpeg = VCN_DPG_STATE__UNPAUSE; 1782 + 1783 + adev->vcn.pause_dpg_mode(adev, &new_state); 1784 + } 1785 + 1786 + fences += amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec); 1787 + fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_dec); 1788 + 1789 + if (fences == 0) { 1790 + amdgpu_gfx_off_ctrl(adev, true); 1791 + if (adev->pm.dpm_enabled) 1792 + amdgpu_dpm_enable_uvd(adev, false); 1793 + else 1794 + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, 1795 + AMD_PG_STATE_GATE); 1796 + } else { 1797 + schedule_delayed_work(&adev->vcn.idle_work, VCN_IDLE_TIMEOUT); 1798 + } 1799 + } 1800 + 1801 + void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring) 1802 + { 1803 + struct amdgpu_device *adev = ring->adev; 1804 + bool set_clocks = !cancel_delayed_work_sync(&adev->vcn.idle_work); 1805 + 1806 + if (set_clocks) { 1807 + amdgpu_gfx_off_ctrl(adev, false); 1808 + if (adev->pm.dpm_enabled) 1809 + amdgpu_dpm_enable_uvd(adev, true); 1810 + else 1811 + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCN, 1812 + AMD_PG_STATE_UNGATE); 1813 + } 1814 + 1815 + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 1816 + struct dpg_pause_state new_state; 1817 + unsigned int fences = 0, i; 1818 + 1819 + for (i = 0; i < adev->vcn.num_enc_rings; ++i) 1820 + fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]); 1821 + 1822 + if (fences) 1823 + new_state.fw_based = VCN_DPG_STATE__PAUSE; 1824 + else 1825 + new_state.fw_based = VCN_DPG_STATE__UNPAUSE; 1826 + 1827 + if (amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec)) 1828 + new_state.jpeg = VCN_DPG_STATE__PAUSE; 1829 + else 1830 + new_state.jpeg = VCN_DPG_STATE__UNPAUSE; 1831 + 1832 + if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_ENC) 1833 + new_state.fw_based = VCN_DPG_STATE__PAUSE; 1834 + else if (ring->funcs->type == AMDGPU_RING_TYPE_VCN_JPEG) 1835 + new_state.jpeg = VCN_DPG_STATE__PAUSE; 1836 + 1837 + adev->vcn.pause_dpg_mode(adev, &new_state); 1838 + } 1839 + } 1840 + 1767 1841 static const struct amd_ip_funcs vcn_v1_0_ip_funcs = { 1768 1842 .name = "vcn_v1_0", 1769 1843 .early_init = vcn_v1_0_early_init, ··· 1890 1804 .insert_start = vcn_v1_0_dec_ring_insert_start, 1891 1805 .insert_end = vcn_v1_0_dec_ring_insert_end, 1892 1806 .pad_ib = amdgpu_ring_generic_pad_ib, 1893 - .begin_use = amdgpu_vcn_ring_begin_use, 1807 + .begin_use = vcn_v1_0_ring_begin_use, 1894 1808 .end_use = amdgpu_vcn_ring_end_use, 1895 1809 .emit_wreg = vcn_v1_0_dec_ring_emit_wreg, 1896 1810 .emit_reg_wait = vcn_v1_0_dec_ring_emit_reg_wait, ··· 1922 1836 .insert_nop = amdgpu_ring_insert_nop, 1923 1837 .insert_end = vcn_v1_0_enc_ring_insert_end, 1924 1838 .pad_ib = amdgpu_ring_generic_pad_ib, 1925 - .begin_use = amdgpu_vcn_ring_begin_use, 1839 + .begin_use = vcn_v1_0_ring_begin_use, 1926 1840 .end_use = amdgpu_vcn_ring_end_use, 1927 1841 .emit_wreg = vcn_v1_0_enc_ring_emit_wreg, 1928 1842 .emit_reg_wait = vcn_v1_0_enc_ring_emit_reg_wait,
+2
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.h
··· 24 24 #ifndef __VCN_V1_0_H__ 25 25 #define __VCN_V1_0_H__ 26 26 27 + void vcn_v1_0_ring_begin_use(struct amdgpu_ring *ring); 28 + 27 29 extern const struct amdgpu_ip_block_version vcn_v1_0_ip_block; 28 30 29 31 #endif
+250 -12
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
··· 29 29 #include "soc15.h" 30 30 #include "soc15d.h" 31 31 #include "vcn_v2_0.h" 32 + #include "mmsch_v1_0.h" 32 33 33 34 #include "vcn/vcn_2_5_offset.h" 34 35 #include "vcn/vcn_2_5_sh_mask.h" ··· 55 54 static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev); 56 55 static int vcn_v2_5_set_powergating_state(void *handle, 57 56 enum amd_powergating_state state); 57 + static int vcn_v2_5_sriov_start(struct amdgpu_device *adev); 58 58 59 59 static int amdgpu_ih_clientid_vcns[] = { 60 60 SOC15_IH_CLIENTID_VCN, ··· 90 88 } else 91 89 adev->vcn.num_vcn_inst = 1; 92 90 93 - adev->vcn.num_enc_rings = 2; 91 + if (amdgpu_sriov_vf(adev)) { 92 + adev->vcn.num_vcn_inst = 2; 93 + adev->vcn.harvest_config = 0; 94 + adev->vcn.num_enc_rings = 1; 95 + } else { 96 + adev->vcn.num_enc_rings = 2; 97 + } 94 98 95 99 vcn_v2_5_set_dec_ring_funcs(adev); 96 100 vcn_v2_5_set_enc_ring_funcs(adev); ··· 184 176 185 177 ring = &adev->vcn.inst[j].ring_dec; 186 178 ring->use_doorbell = true; 187 - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 8*j; 179 + 180 + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 181 + (amdgpu_sriov_vf(adev) ? 2*j : 8*j); 188 182 sprintf(ring->name, "vcn_dec_%d", j); 189 183 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0); 190 184 if (r) ··· 195 185 for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 196 186 ring = &adev->vcn.inst[j].ring_enc[i]; 197 187 ring->use_doorbell = true; 198 - ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i + 8*j; 188 + 189 + ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 190 + (amdgpu_sriov_vf(adev) ? (1 + i + 2*j) : (2 + i + 8*j)); 191 + 199 192 sprintf(ring->name, "vcn_enc_%d.%d", j, i); 200 193 r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[j].irq, 0); 201 194 if (r) 202 195 return r; 203 196 } 197 + } 198 + 199 + if (amdgpu_sriov_vf(adev)) { 200 + r = amdgpu_virt_alloc_mm_table(adev); 201 + if (r) 202 + return r; 204 203 } 205 204 206 205 return 0; ··· 226 207 { 227 208 int r; 228 209 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 210 + 211 + if (amdgpu_sriov_vf(adev)) 212 + amdgpu_virt_free_mm_table(adev); 229 213 230 214 r = amdgpu_vcn_suspend(adev); 231 215 if (r) ··· 250 228 { 251 229 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 252 230 struct amdgpu_ring *ring; 253 - int i, j, r; 231 + int i, j, r = 0; 232 + 233 + if (amdgpu_sriov_vf(adev)) 234 + r = vcn_v2_5_sriov_start(adev); 254 235 255 236 for (j = 0; j < adev->vcn.num_vcn_inst; ++j) { 256 237 if (adev->vcn.harvest_config & (1 << j)) 257 238 continue; 258 - ring = &adev->vcn.inst[j].ring_dec; 259 239 260 - adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, 240 + if (amdgpu_sriov_vf(adev)) { 241 + adev->vcn.inst[j].ring_enc[0].sched.ready = true; 242 + adev->vcn.inst[j].ring_enc[1].sched.ready = false; 243 + adev->vcn.inst[j].ring_enc[2].sched.ready = false; 244 + adev->vcn.inst[j].ring_dec.sched.ready = true; 245 + } else { 246 + 247 + ring = &adev->vcn.inst[j].ring_dec; 248 + 249 + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, 261 250 ring->doorbell_index, j); 262 251 263 - r = amdgpu_ring_test_helper(ring); 264 - if (r) 265 - goto done; 266 - 267 - for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 268 - ring = &adev->vcn.inst[j].ring_enc[i]; 269 252 r = amdgpu_ring_test_helper(ring); 270 253 if (r) 271 254 goto done; 255 + 256 + for (i = 0; i < adev->vcn.num_enc_rings; ++i) { 257 + ring = &adev->vcn.inst[j].ring_enc[i]; 258 + r = amdgpu_ring_test_helper(ring); 259 + if (r) 260 + goto done; 261 + } 272 262 } 273 263 } 274 264 ··· 775 741 return 0; 776 742 } 777 743 744 + static int vcn_v2_5_mmsch_start(struct amdgpu_device *adev, 745 + struct amdgpu_mm_table *table) 746 + { 747 + uint32_t data = 0, loop = 0, size = 0; 748 + uint64_t addr = table->gpu_addr; 749 + struct mmsch_v1_1_init_header *header = NULL;; 750 + 751 + header = (struct mmsch_v1_1_init_header *)table->cpu_addr; 752 + size = header->total_size; 753 + 754 + /* 755 + * 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr of 756 + * memory descriptor location 757 + */ 758 + WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr)); 759 + WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr)); 760 + 761 + /* 2, update vmid of descriptor */ 762 + data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID); 763 + data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; 764 + /* use domain0 for MM scheduler */ 765 + data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); 766 + WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data); 767 + 768 + /* 3, notify mmsch about the size of this descriptor */ 769 + WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size); 770 + 771 + /* 4, set resp to zero */ 772 + WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0); 773 + 774 + /* 775 + * 5, kick off the initialization and wait until 776 + * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero 777 + */ 778 + WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001); 779 + 780 + data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP); 781 + loop = 10; 782 + while ((data & 0x10000002) != 0x10000002) { 783 + udelay(100); 784 + data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP); 785 + loop--; 786 + if (!loop) 787 + break; 788 + } 789 + 790 + if (!loop) { 791 + dev_err(adev->dev, 792 + "failed to init MMSCH, mmMMSCH_VF_MAILBOX_RESP = %x\n", 793 + data); 794 + return -EBUSY; 795 + } 796 + 797 + return 0; 798 + } 799 + 800 + static int vcn_v2_5_sriov_start(struct amdgpu_device *adev) 801 + { 802 + struct amdgpu_ring *ring; 803 + uint32_t offset, size, tmp, i, rb_bufsz; 804 + uint32_t table_size = 0; 805 + struct mmsch_v1_0_cmd_direct_write direct_wt = { { 0 } }; 806 + struct mmsch_v1_0_cmd_direct_read_modify_write direct_rd_mod_wt = { { 0 } }; 807 + struct mmsch_v1_0_cmd_direct_polling direct_poll = { { 0 } }; 808 + struct mmsch_v1_0_cmd_end end = { { 0 } }; 809 + uint32_t *init_table = adev->virt.mm_table.cpu_addr; 810 + struct mmsch_v1_1_init_header *header = (struct mmsch_v1_1_init_header *)init_table; 811 + 812 + direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE; 813 + direct_rd_mod_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE; 814 + direct_poll.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_POLLING; 815 + end.cmd_header.command_type = MMSCH_COMMAND__END; 816 + 817 + header->version = MMSCH_VERSION; 818 + header->total_size = sizeof(struct mmsch_v1_1_init_header) >> 2; 819 + init_table += header->total_size; 820 + 821 + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { 822 + header->eng[i].table_offset = header->total_size; 823 + header->eng[i].init_status = 0; 824 + header->eng[i].table_size = 0; 825 + 826 + table_size = 0; 827 + 828 + MMSCH_V1_0_INSERT_DIRECT_RD_MOD_WT( 829 + SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS), 830 + ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY); 831 + 832 + size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4); 833 + /* mc resume*/ 834 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { 835 + MMSCH_V1_0_INSERT_DIRECT_WT( 836 + SOC15_REG_OFFSET(UVD, i, 837 + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 838 + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo); 839 + MMSCH_V1_0_INSERT_DIRECT_WT( 840 + SOC15_REG_OFFSET(UVD, i, 841 + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 842 + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi); 843 + offset = 0; 844 + MMSCH_V1_0_INSERT_DIRECT_WT( 845 + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), 0); 846 + } else { 847 + MMSCH_V1_0_INSERT_DIRECT_WT( 848 + SOC15_REG_OFFSET(UVD, i, 849 + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 850 + lower_32_bits(adev->vcn.inst[i].gpu_addr)); 851 + MMSCH_V1_0_INSERT_DIRECT_WT( 852 + SOC15_REG_OFFSET(UVD, i, 853 + mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 854 + upper_32_bits(adev->vcn.inst[i].gpu_addr)); 855 + offset = size; 856 + MMSCH_V1_0_INSERT_DIRECT_WT( 857 + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0), 858 + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); 859 + } 860 + 861 + MMSCH_V1_0_INSERT_DIRECT_WT( 862 + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0), 863 + size); 864 + MMSCH_V1_0_INSERT_DIRECT_WT( 865 + SOC15_REG_OFFSET(UVD, i, 866 + mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 867 + lower_32_bits(adev->vcn.inst[i].gpu_addr + offset)); 868 + MMSCH_V1_0_INSERT_DIRECT_WT( 869 + SOC15_REG_OFFSET(UVD, i, 870 + mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 871 + upper_32_bits(adev->vcn.inst[i].gpu_addr + offset)); 872 + MMSCH_V1_0_INSERT_DIRECT_WT( 873 + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1), 874 + 0); 875 + MMSCH_V1_0_INSERT_DIRECT_WT( 876 + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1), 877 + AMDGPU_VCN_STACK_SIZE); 878 + MMSCH_V1_0_INSERT_DIRECT_WT( 879 + SOC15_REG_OFFSET(UVD, i, 880 + mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), 881 + lower_32_bits(adev->vcn.inst[i].gpu_addr + offset + 882 + AMDGPU_VCN_STACK_SIZE)); 883 + MMSCH_V1_0_INSERT_DIRECT_WT( 884 + SOC15_REG_OFFSET(UVD, i, 885 + mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), 886 + upper_32_bits(adev->vcn.inst[i].gpu_addr + offset + 887 + AMDGPU_VCN_STACK_SIZE)); 888 + MMSCH_V1_0_INSERT_DIRECT_WT( 889 + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2), 890 + 0); 891 + MMSCH_V1_0_INSERT_DIRECT_WT( 892 + SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2), 893 + AMDGPU_VCN_CONTEXT_SIZE); 894 + 895 + ring = &adev->vcn.inst[i].ring_enc[0]; 896 + ring->wptr = 0; 897 + 898 + MMSCH_V1_0_INSERT_DIRECT_WT( 899 + SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO), 900 + lower_32_bits(ring->gpu_addr)); 901 + MMSCH_V1_0_INSERT_DIRECT_WT( 902 + SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI), 903 + upper_32_bits(ring->gpu_addr)); 904 + MMSCH_V1_0_INSERT_DIRECT_WT( 905 + SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE), 906 + ring->ring_size / 4); 907 + 908 + ring = &adev->vcn.inst[i].ring_dec; 909 + ring->wptr = 0; 910 + MMSCH_V1_0_INSERT_DIRECT_WT( 911 + SOC15_REG_OFFSET(UVD, i, 912 + mmUVD_LMI_RBC_RB_64BIT_BAR_LOW), 913 + lower_32_bits(ring->gpu_addr)); 914 + MMSCH_V1_0_INSERT_DIRECT_WT( 915 + SOC15_REG_OFFSET(UVD, i, 916 + mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH), 917 + upper_32_bits(ring->gpu_addr)); 918 + 919 + /* force RBC into idle state */ 920 + rb_bufsz = order_base_2(ring->ring_size); 921 + tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, rb_bufsz); 922 + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1); 923 + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1); 924 + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1); 925 + tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1); 926 + MMSCH_V1_0_INSERT_DIRECT_WT( 927 + SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp); 928 + 929 + /* add end packet */ 930 + memcpy((void *)init_table, &end, sizeof(struct mmsch_v1_0_cmd_end)); 931 + table_size += sizeof(struct mmsch_v1_0_cmd_end) / 4; 932 + init_table += sizeof(struct mmsch_v1_0_cmd_end) / 4; 933 + 934 + /* refine header */ 935 + header->eng[i].table_size = table_size; 936 + header->total_size += table_size; 937 + } 938 + 939 + return vcn_v2_5_mmsch_start(adev, &adev->virt.mm_table); 940 + } 941 + 778 942 static int vcn_v2_5_stop(struct amdgpu_device *adev) 779 943 { 780 944 uint32_t tmp; ··· 1280 1048 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1281 1049 bool enable = (state == AMD_CG_STATE_GATE) ? true : false; 1282 1050 1051 + if (amdgpu_sriov_vf(adev)) 1052 + return 0; 1053 + 1283 1054 if (enable) { 1284 1055 if (vcn_v2_5_is_idle(handle)) 1285 1056 return -EBUSY; ··· 1299 1064 { 1300 1065 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1301 1066 int ret; 1067 + 1068 + if (amdgpu_sriov_vf(adev)) 1069 + return 0; 1302 1070 1303 1071 if(state == adev->vcn.cur_state) 1304 1072 return 0;
+12 -10
drivers/gpu/drm/amd/amdgpu/vega10_ih.c
··· 234 234 WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE_HI, (ih->gpu_addr >> 40) & 0xff); 235 235 236 236 ih_rb_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL); 237 - ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN); 238 237 ih_rb_cntl = vega10_ih_rb_cntl(ih, ih_rb_cntl); 239 - if (adev->irq.ih.use_bus_addr) { 240 - ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1); 241 - } else { 242 - ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, MC_SPACE_FBPA_ENABLE, 1); 243 - } 244 238 ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, 245 239 !!adev->irq.msi_enabled); 246 - 247 240 if (amdgpu_sriov_vf(adev)) { 248 241 if (psp_reg_program(&adev->psp, PSP_REG_IH_RB_CNTL, ih_rb_cntl)) { 249 242 DRM_ERROR("PSP program IH_RB_CNTL failed!\n"); ··· 246 253 WREG32_SOC15(OSSSYS, 0, mmIH_RB_CNTL, ih_rb_cntl); 247 254 } 248 255 249 - if ((adev->asic_type == CHIP_ARCTURUS 250 - && adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) 251 - || adev->asic_type == CHIP_RENOIR) 256 + if ((adev->asic_type == CHIP_ARCTURUS && 257 + adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) || 258 + adev->asic_type == CHIP_RENOIR) { 259 + ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN); 260 + if (adev->irq.ih.use_bus_addr) { 261 + ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, 262 + MC_SPACE_GPA_ENABLE, 1); 263 + } else { 264 + ih_chicken = REG_SET_FIELD(ih_chicken, IH_CHICKEN, 265 + MC_SPACE_FBPA_ENABLE, 1); 266 + } 252 267 WREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN, ih_chicken); 268 + } 253 269 254 270 /* set the writeback address whether it's enabled or not */ 255 271 WREG32_SOC15(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO,
+26 -4
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
··· 42 42 43 43 static long kfd_ioctl(struct file *, unsigned int, unsigned long); 44 44 static int kfd_open(struct inode *, struct file *); 45 + static int kfd_release(struct inode *, struct file *); 45 46 static int kfd_mmap(struct file *, struct vm_area_struct *); 46 47 47 48 static const char kfd_dev_name[] = "kfd"; ··· 52 51 .unlocked_ioctl = kfd_ioctl, 53 52 .compat_ioctl = compat_ptr_ioctl, 54 53 .open = kfd_open, 54 + .release = kfd_release, 55 55 .mmap = kfd_mmap, 56 56 }; 57 57 ··· 126 124 if (IS_ERR(process)) 127 125 return PTR_ERR(process); 128 126 129 - if (kfd_is_locked()) 127 + if (kfd_is_locked()) { 128 + kfd_unref_process(process); 130 129 return -EAGAIN; 130 + } 131 + 132 + /* filep now owns the reference returned by kfd_create_process */ 133 + filep->private_data = process; 131 134 132 135 dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n", 133 136 process->pasid, process->is_32bit_user_mode); 137 + 138 + return 0; 139 + } 140 + 141 + static int kfd_release(struct inode *inode, struct file *filep) 142 + { 143 + struct kfd_process *process = filep->private_data; 144 + 145 + if (process) 146 + kfd_unref_process(process); 134 147 135 148 return 0; 136 149 } ··· 1818 1801 1819 1802 dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg); 1820 1803 1821 - process = kfd_get_process(current); 1822 - if (IS_ERR(process)) { 1823 - dev_dbg(kfd_device, "no process\n"); 1804 + /* Get the process struct from the filep. Only the process 1805 + * that opened /dev/kfd can use the file descriptor. Child 1806 + * processes need to create their own KFD device context. 1807 + */ 1808 + process = filep->private_data; 1809 + if (process->lead_thread != current->group_leader) { 1810 + dev_dbg(kfd_device, "Using KFD FD in wrong process\n"); 1811 + retcode = -EBADF; 1824 1812 goto err_i1; 1825 1813 } 1826 1814
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
··· 93 93 kfd_debugfs_hqds_by_device, &kfd_debugfs_fops); 94 94 debugfs_create_file("rls", S_IFREG | 0444, debugfs_root, 95 95 kfd_debugfs_rls_by_device, &kfd_debugfs_fops); 96 - debugfs_create_file("hang_hws", S_IFREG | 0644, debugfs_root, 96 + debugfs_create_file("hang_hws", S_IFREG | 0200, debugfs_root, 97 97 NULL, &kfd_debugfs_hang_hws_fops); 98 98 } 99 99
+19 -1
drivers/gpu/drm/amd/amdkfd/kfd_device.c
··· 728 728 { 729 729 if (!kfd->init_complete) 730 730 return 0; 731 + 732 + kfd->dqm->ops.pre_reset(kfd->dqm); 733 + 731 734 kgd2kfd_suspend(kfd); 732 735 733 736 kfd_signal_reset_event(kfd); ··· 825 822 return err; 826 823 } 827 824 825 + static inline void kfd_queue_work(struct workqueue_struct *wq, 826 + struct work_struct *work) 827 + { 828 + int cpu, new_cpu; 829 + 830 + cpu = new_cpu = smp_processor_id(); 831 + do { 832 + new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids; 833 + if (cpu_to_node(new_cpu) == numa_node_id()) 834 + break; 835 + } while (cpu != new_cpu); 836 + 837 + queue_work_on(new_cpu, wq, work); 838 + } 839 + 828 840 /* This is called directly from KGD at ISR. */ 829 841 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) 830 842 { ··· 862 844 patched_ihre, &is_patched) 863 845 && enqueue_ih_ring_entry(kfd, 864 846 is_patched ? patched_ihre : ih_ring_entry)) 865 - queue_work(kfd->ih_wq, &kfd->interrupt_work); 847 + kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work); 866 848 867 849 spin_unlock_irqrestore(&kfd->interrupt_lock, flags); 868 850 }
+29 -11
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
··· 930 930 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++) 931 931 kfree(dqm->mqd_mgrs[i]); 932 932 mutex_destroy(&dqm->lock_hidden); 933 - kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem); 934 933 } 935 934 936 935 static int start_nocpsch(struct device_queue_manager *dqm) ··· 946 947 static int stop_nocpsch(struct device_queue_manager *dqm) 947 948 { 948 949 if (dqm->dev->device_info->asic_family == CHIP_HAWAII) 949 - pm_uninit(&dqm->packets); 950 + pm_uninit(&dqm->packets, false); 950 951 dqm->sched_running = false; 951 952 952 953 return 0; 954 + } 955 + 956 + static void pre_reset(struct device_queue_manager *dqm) 957 + { 958 + dqm_lock(dqm); 959 + dqm->is_resetting = true; 960 + dqm_unlock(dqm); 953 961 } 954 962 955 963 static int allocate_sdma_queue(struct device_queue_manager *dqm, ··· 1106 1100 dqm_lock(dqm); 1107 1101 /* clear hang status when driver try to start the hw scheduler */ 1108 1102 dqm->is_hws_hang = false; 1103 + dqm->is_resetting = false; 1109 1104 dqm->sched_running = true; 1110 1105 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); 1111 1106 dqm_unlock(dqm); ··· 1114 1107 return 0; 1115 1108 fail_allocate_vidmem: 1116 1109 fail_set_sched_resources: 1117 - pm_uninit(&dqm->packets); 1110 + pm_uninit(&dqm->packets, false); 1118 1111 fail_packet_manager_init: 1119 1112 return retval; 1120 1113 } 1121 1114 1122 1115 static int stop_cpsch(struct device_queue_manager *dqm) 1123 1116 { 1117 + bool hanging; 1118 + 1124 1119 dqm_lock(dqm); 1125 - unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); 1120 + if (!dqm->is_hws_hang) 1121 + unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); 1122 + hanging = dqm->is_hws_hang || dqm->is_resetting; 1126 1123 dqm->sched_running = false; 1127 1124 dqm_unlock(dqm); 1128 1125 1129 1126 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); 1130 - pm_uninit(&dqm->packets); 1127 + pm_uninit(&dqm->packets, hanging); 1131 1128 1132 1129 return 0; 1133 1130 } ··· 1363 1352 /* should be timed out */ 1364 1353 retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED, 1365 1354 queue_preemption_timeout_ms); 1366 - if (retval) 1355 + if (retval) { 1356 + pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n"); 1357 + dqm->is_hws_hang = true; 1358 + /* It's possible we're detecting a HWS hang in the 1359 + * middle of a GPU reset. No need to schedule another 1360 + * reset in this case. 1361 + */ 1362 + if (!dqm->is_resetting) 1363 + schedule_work(&dqm->hw_exception_work); 1367 1364 return retval; 1365 + } 1368 1366 1369 1367 pm_release_ib(&dqm->packets); 1370 1368 dqm->active_runlist = false; ··· 1391 1371 if (dqm->is_hws_hang) 1392 1372 return -EIO; 1393 1373 retval = unmap_queues_cpsch(dqm, filter, filter_param); 1394 - if (retval) { 1395 - pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n"); 1396 - dqm->is_hws_hang = true; 1397 - schedule_work(&dqm->hw_exception_work); 1374 + if (retval) 1398 1375 return retval; 1399 - } 1400 1376 1401 1377 return map_queues_cpsch(dqm); 1402 1378 } ··· 1786 1770 dqm->ops.initialize = initialize_cpsch; 1787 1771 dqm->ops.start = start_cpsch; 1788 1772 dqm->ops.stop = stop_cpsch; 1773 + dqm->ops.pre_reset = pre_reset; 1789 1774 dqm->ops.destroy_queue = destroy_queue_cpsch; 1790 1775 dqm->ops.update_queue = update_queue; 1791 1776 dqm->ops.register_process = register_process; ··· 1805 1788 /* initialize dqm for no cp scheduling */ 1806 1789 dqm->ops.start = start_nocpsch; 1807 1790 dqm->ops.stop = stop_nocpsch; 1791 + dqm->ops.pre_reset = pre_reset; 1808 1792 dqm->ops.create_queue = create_queue_nocpsch; 1809 1793 dqm->ops.destroy_queue = destroy_queue_nocpsch; 1810 1794 dqm->ops.update_queue = update_queue;
+2 -1
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
··· 104 104 int (*initialize)(struct device_queue_manager *dqm); 105 105 int (*start)(struct device_queue_manager *dqm); 106 106 int (*stop)(struct device_queue_manager *dqm); 107 + void (*pre_reset)(struct device_queue_manager *dqm); 107 108 void (*uninitialize)(struct device_queue_manager *dqm); 108 109 int (*create_kernel_queue)(struct device_queue_manager *dqm, 109 110 struct kernel_queue *kq, ··· 191 190 /* the pasid mapping for each kfd vmid */ 192 191 uint16_t vmid_pasid[VMID_NUM]; 193 192 uint64_t pipelines_addr; 194 - struct kfd_mem_obj *pipeline_mem; 195 193 uint64_t fence_gpu_addr; 196 194 unsigned int *fence_addr; 197 195 struct kfd_mem_obj *fence_mem; ··· 199 199 200 200 /* hw exception */ 201 201 bool is_hws_hang; 202 + bool is_resetting; 202 203 struct work_struct hw_exception_work; 203 204 struct kfd_mem_obj hiq_sdma_mqd; 204 205 bool sched_running;
+4 -4
drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
··· 195 195 } 196 196 197 197 /* Uninitialize a kernel queue and free all its memory usages. */ 198 - static void kq_uninitialize(struct kernel_queue *kq) 198 + static void kq_uninitialize(struct kernel_queue *kq, bool hanging) 199 199 { 200 - if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ) 200 + if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ && !hanging) 201 201 kq->mqd_mgr->destroy_mqd(kq->mqd_mgr, 202 202 kq->queue->mqd, 203 203 KFD_PREEMPT_TYPE_WAVEFRONT_RESET, ··· 337 337 return NULL; 338 338 } 339 339 340 - void kernel_queue_uninit(struct kernel_queue *kq) 340 + void kernel_queue_uninit(struct kernel_queue *kq, bool hanging) 341 341 { 342 - kq_uninitialize(kq); 342 + kq_uninitialize(kq, hanging); 343 343 kfree(kq); 344 344 } 345 345
+2 -2
drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
··· 264 264 return 0; 265 265 } 266 266 267 - void pm_uninit(struct packet_manager *pm) 267 + void pm_uninit(struct packet_manager *pm, bool hanging) 268 268 { 269 269 mutex_destroy(&pm->lock); 270 - kernel_queue_uninit(pm->priv_queue); 270 + kernel_queue_uninit(pm->priv_queue, hanging); 271 271 } 272 272 273 273 int pm_send_set_resources(struct packet_manager *pm,
+2 -2
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
··· 883 883 void device_queue_manager_uninit(struct device_queue_manager *dqm); 884 884 struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, 885 885 enum kfd_queue_type type); 886 - void kernel_queue_uninit(struct kernel_queue *kq); 886 + void kernel_queue_uninit(struct kernel_queue *kq, bool hanging); 887 887 int kfd_process_vm_fault(struct device_queue_manager *dqm, unsigned int pasid); 888 888 889 889 /* Process Queue Manager */ ··· 972 972 extern const struct packet_manager_funcs kfd_v9_pm_funcs; 973 973 974 974 int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm); 975 - void pm_uninit(struct packet_manager *pm); 975 + void pm_uninit(struct packet_manager *pm, bool hanging); 976 976 int pm_send_set_resources(struct packet_manager *pm, 977 977 struct scheduling_resources *res); 978 978 int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues);
+2
drivers/gpu/drm/amd/amdkfd/kfd_process.c
··· 324 324 (int)process->lead_thread->pid); 325 325 } 326 326 out: 327 + if (!IS_ERR(process)) 328 + kref_get(&process->ref); 327 329 mutex_unlock(&kfd_processes_mutex); 328 330 329 331 return process;
+1 -1
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
··· 374 374 /* destroy kernel queue (DIQ) */ 375 375 dqm = pqn->kq->dev->dqm; 376 376 dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd); 377 - kernel_queue_uninit(pqn->kq); 377 + kernel_queue_uninit(pqn->kq, false); 378 378 } 379 379 380 380 if (pqn->q) {
+7
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
··· 486 486 dev->node_props.num_sdma_engines); 487 487 sysfs_show_32bit_prop(buffer, "num_sdma_xgmi_engines", 488 488 dev->node_props.num_sdma_xgmi_engines); 489 + sysfs_show_32bit_prop(buffer, "num_sdma_queues_per_engine", 490 + dev->node_props.num_sdma_queues_per_engine); 491 + sysfs_show_32bit_prop(buffer, "num_cp_queues", 492 + dev->node_props.num_cp_queues); 489 493 490 494 if (dev->gpu) { 491 495 log_max_watch_addr = ··· 1313 1309 dev->node_props.num_sdma_engines = gpu->device_info->num_sdma_engines; 1314 1310 dev->node_props.num_sdma_xgmi_engines = 1315 1311 gpu->device_info->num_xgmi_sdma_engines; 1312 + dev->node_props.num_sdma_queues_per_engine = 1313 + gpu->device_info->num_sdma_queues_per_engine; 1316 1314 dev->node_props.num_gws = (hws_gws_support && 1317 1315 dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ? 1318 1316 amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0; 1317 + dev->node_props.num_cp_queues = get_queues_num(dev->gpu->dqm); 1319 1318 1320 1319 kfd_fill_mem_clk_max_info(dev); 1321 1320 kfd_fill_iolink_non_crat_info(dev);
+2
drivers/gpu/drm/amd/amdkfd/kfd_topology.h
··· 81 81 int32_t drm_render_minor; 82 82 uint32_t num_sdma_engines; 83 83 uint32_t num_sdma_xgmi_engines; 84 + uint32_t num_sdma_queues_per_engine; 85 + uint32_t num_cp_queues; 84 86 char name[KFD_TOPOLOGY_PUBLIC_NAME_SIZE]; 85 87 }; 86 88
+1 -1
drivers/gpu/drm/amd/display/Kconfig
··· 6 6 bool "AMD DC - Enable new display engine" 7 7 default y 8 8 select SND_HDA_COMPONENT if SND_HDA_CORE 9 - select DRM_AMD_DC_DCN if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS) 9 + select DRM_AMD_DC_DCN if (X86 || PPC64) && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS) 10 10 help 11 11 Choose this option if you want to use the new display engine 12 12 support for AMDGPU. This adds required support for Vega and
+153 -101
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 98 98 #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" 99 99 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); 100 100 101 + /* Number of bytes in PSP header for firmware. */ 102 + #define PSP_HEADER_BYTES 0x100 103 + 104 + /* Number of bytes in PSP footer for firmware. */ 105 + #define PSP_FOOTER_BYTES 0x100 106 + 101 107 /** 102 108 * DOC: overview 103 109 * ··· 747 741 748 742 static int dm_dmub_hw_init(struct amdgpu_device *adev) 749 743 { 750 - const unsigned int psp_header_bytes = 0x100; 751 - const unsigned int psp_footer_bytes = 0x100; 752 744 const struct dmcub_firmware_header_v1_0 *hdr; 753 745 struct dmub_srv *dmub_srv = adev->dm.dmub_srv; 746 + struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; 754 747 const struct firmware *dmub_fw = adev->dm.dmub_fw; 755 748 struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; 756 749 struct abm *abm = adev->dm.dc->res_pool->abm; 757 - struct dmub_srv_region_params region_params; 758 - struct dmub_srv_region_info region_info; 759 - struct dmub_srv_fb_params fb_params; 760 - struct dmub_srv_fb_info fb_info; 761 750 struct dmub_srv_hw_params hw_params; 762 751 enum dmub_status status; 763 752 const unsigned char *fw_inst_const, *fw_bss_data; 764 - uint32_t i; 765 - int r; 753 + uint32_t i, fw_inst_const_size, fw_bss_data_size; 766 754 bool has_hw_support; 767 755 768 756 if (!dmub_srv) 769 757 /* DMUB isn't supported on the ASIC. */ 770 758 return 0; 759 + 760 + if (!fb_info) { 761 + DRM_ERROR("No framebuffer info for DMUB service.\n"); 762 + return -EINVAL; 763 + } 771 764 772 765 if (!dmub_fw) { 773 766 /* Firmware required for DMUB support. */ ··· 787 782 788 783 hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; 789 784 790 - /* Calculate the size of all the regions for the DMUB service. */ 791 - memset(&region_params, 0, sizeof(region_params)); 792 - 793 - region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 794 - psp_header_bytes - psp_footer_bytes; 795 - region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 796 - region_params.vbios_size = adev->dm.dc->ctx->dc_bios->bios_size; 797 - 798 - status = dmub_srv_calc_region_info(dmub_srv, &region_params, 799 - &region_info); 800 - 801 - if (status != DMUB_STATUS_OK) { 802 - DRM_ERROR("Error calculating DMUB region info: %d\n", status); 803 - return -EINVAL; 804 - } 805 - 806 - /* 807 - * Allocate a framebuffer based on the total size of all the regions. 808 - * TODO: Move this into GART. 809 - */ 810 - r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, 811 - AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo, 812 - &adev->dm.dmub_bo_gpu_addr, 813 - &adev->dm.dmub_bo_cpu_addr); 814 - if (r) 815 - return r; 816 - 817 - /* Rebase the regions on the framebuffer address. */ 818 - memset(&fb_params, 0, sizeof(fb_params)); 819 - fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr; 820 - fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr; 821 - fb_params.region_info = &region_info; 822 - 823 - status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, &fb_info); 824 - if (status != DMUB_STATUS_OK) { 825 - DRM_ERROR("Error calculating DMUB FB info: %d\n", status); 826 - return -EINVAL; 827 - } 828 - 829 785 fw_inst_const = dmub_fw->data + 830 786 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 831 - psp_header_bytes; 787 + PSP_HEADER_BYTES; 832 788 833 789 fw_bss_data = dmub_fw->data + 834 790 le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 835 791 le32_to_cpu(hdr->inst_const_bytes); 836 792 837 793 /* Copy firmware and bios info into FB memory. */ 838 - memcpy(fb_info.fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, 839 - region_params.inst_const_size); 840 - memcpy(fb_info.fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data, 841 - region_params.bss_data_size); 842 - memcpy(fb_info.fb[DMUB_WINDOW_3_VBIOS].cpu_addr, 843 - adev->dm.dc->ctx->dc_bios->bios, region_params.vbios_size); 794 + fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 795 + PSP_HEADER_BYTES - PSP_FOOTER_BYTES; 796 + 797 + fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 798 + 799 + memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, 800 + fw_inst_const_size); 801 + memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data, 802 + fw_bss_data_size); 803 + memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios, 804 + adev->bios_size); 805 + 806 + /* Reset regions that need to be reset. */ 807 + memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0, 808 + fb_info->fb[DMUB_WINDOW_4_MAILBOX].size); 809 + 810 + memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0, 811 + fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size); 812 + 813 + memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0, 814 + fb_info->fb[DMUB_WINDOW_6_FW_STATE].size); 844 815 845 816 /* Initialize hardware. */ 846 817 memset(&hw_params, 0, sizeof(hw_params)); ··· 826 845 if (dmcu) 827 846 hw_params.psp_version = dmcu->psp_version; 828 847 829 - for (i = 0; i < fb_info.num_fb; ++i) 830 - hw_params.fb[i] = &fb_info.fb[i]; 848 + for (i = 0; i < fb_info->num_fb; ++i) 849 + hw_params.fb[i] = &fb_info->fb[i]; 831 850 832 851 status = dmub_srv_hw_init(dmub_srv, &hw_params); 833 852 if (status != DMUB_STATUS_OK) { ··· 1155 1174 static int dm_dmub_sw_init(struct amdgpu_device *adev) 1156 1175 { 1157 1176 struct dmub_srv_create_params create_params; 1177 + struct dmub_srv_region_params region_params; 1178 + struct dmub_srv_region_info region_info; 1179 + struct dmub_srv_fb_params fb_params; 1180 + struct dmub_srv_fb_info *fb_info; 1181 + struct dmub_srv *dmub_srv; 1158 1182 const struct dmcub_firmware_header_v1_0 *hdr; 1159 1183 const char *fw_name_dmub; 1160 1184 enum dmub_asic dmub_asic; ··· 1175 1189 default: 1176 1190 /* ASIC doesn't support DMUB. */ 1177 1191 return 0; 1178 - } 1179 - 1180 - adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL); 1181 - if (!adev->dm.dmub_srv) { 1182 - DRM_ERROR("Failed to allocate DMUB service!\n"); 1183 - return -ENOMEM; 1184 - } 1185 - 1186 - memset(&create_params, 0, sizeof(create_params)); 1187 - create_params.user_ctx = adev; 1188 - create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read; 1189 - create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write; 1190 - create_params.asic = dmub_asic; 1191 - 1192 - status = dmub_srv_create(adev->dm.dmub_srv, &create_params); 1193 - if (status != DMUB_STATUS_OK) { 1194 - DRM_ERROR("Error creating DMUB service: %d\n", status); 1195 - return -EINVAL; 1196 1192 } 1197 1193 1198 1194 r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev); ··· 1206 1238 DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n", 1207 1239 adev->dm.dmcub_fw_version); 1208 1240 1241 + adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL); 1242 + dmub_srv = adev->dm.dmub_srv; 1243 + 1244 + if (!dmub_srv) { 1245 + DRM_ERROR("Failed to allocate DMUB service!\n"); 1246 + return -ENOMEM; 1247 + } 1248 + 1249 + memset(&create_params, 0, sizeof(create_params)); 1250 + create_params.user_ctx = adev; 1251 + create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read; 1252 + create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write; 1253 + create_params.asic = dmub_asic; 1254 + 1255 + /* Create the DMUB service. */ 1256 + status = dmub_srv_create(dmub_srv, &create_params); 1257 + if (status != DMUB_STATUS_OK) { 1258 + DRM_ERROR("Error creating DMUB service: %d\n", status); 1259 + return -EINVAL; 1260 + } 1261 + 1262 + /* Calculate the size of all the regions for the DMUB service. */ 1263 + memset(&region_params, 0, sizeof(region_params)); 1264 + 1265 + region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - 1266 + PSP_HEADER_BYTES - PSP_FOOTER_BYTES; 1267 + region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes); 1268 + region_params.vbios_size = adev->bios_size; 1269 + region_params.fw_bss_data = 1270 + adev->dm.dmub_fw->data + 1271 + le32_to_cpu(hdr->header.ucode_array_offset_bytes) + 1272 + le32_to_cpu(hdr->inst_const_bytes); 1273 + 1274 + status = dmub_srv_calc_region_info(dmub_srv, &region_params, 1275 + &region_info); 1276 + 1277 + if (status != DMUB_STATUS_OK) { 1278 + DRM_ERROR("Error calculating DMUB region info: %d\n", status); 1279 + return -EINVAL; 1280 + } 1281 + 1282 + /* 1283 + * Allocate a framebuffer based on the total size of all the regions. 1284 + * TODO: Move this into GART. 1285 + */ 1286 + r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, 1287 + AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo, 1288 + &adev->dm.dmub_bo_gpu_addr, 1289 + &adev->dm.dmub_bo_cpu_addr); 1290 + if (r) 1291 + return r; 1292 + 1293 + /* Rebase the regions on the framebuffer address. */ 1294 + memset(&fb_params, 0, sizeof(fb_params)); 1295 + fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr; 1296 + fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr; 1297 + fb_params.region_info = &region_info; 1298 + 1299 + adev->dm.dmub_fb_info = 1300 + kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL); 1301 + fb_info = adev->dm.dmub_fb_info; 1302 + 1303 + if (!fb_info) { 1304 + DRM_ERROR( 1305 + "Failed to allocate framebuffer info for DMUB service!\n"); 1306 + return -ENOMEM; 1307 + } 1308 + 1309 + status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info); 1310 + if (status != DMUB_STATUS_OK) { 1311 + DRM_ERROR("Error calculating DMUB FB info: %d\n", status); 1312 + return -EINVAL; 1313 + } 1314 + 1209 1315 return 0; 1210 1316 } 1211 1317 ··· 1298 1256 static int dm_sw_fini(void *handle) 1299 1257 { 1300 1258 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1259 + 1260 + kfree(adev->dm.dmub_fb_info); 1261 + adev->dm.dmub_fb_info = NULL; 1301 1262 1302 1263 if (adev->dm.dmub_srv) { 1303 1264 dmub_srv_destroy(adev->dm.dmub_srv); ··· 1604 1559 struct dm_plane_state *dm_new_plane_state; 1605 1560 struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); 1606 1561 enum dc_connection_type new_connection_type = dc_connection_none; 1607 - int i; 1562 + int i, r; 1608 1563 1609 1564 /* Recreate dc_state - DC invalidates it when setting power state to S3. */ 1610 1565 dc_release_state(dm_state->context); 1611 1566 dm_state->context = dc_create_state(dm->dc); 1612 1567 /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ 1613 1568 dc_resource_state_construct(dm->dc, dm_state->context); 1569 + 1570 + /* Before powering on DC we need to re-initialize DMUB. */ 1571 + r = dm_dmub_hw_init(adev); 1572 + if (r) 1573 + DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); 1614 1574 1615 1575 /* power on hardware */ 1616 1576 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); ··· 3704 3654 return color_space; 3705 3655 } 3706 3656 3707 - static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out) 3657 + static bool adjust_colour_depth_from_display_info( 3658 + struct dc_crtc_timing *timing_out, 3659 + const struct drm_display_info *info) 3708 3660 { 3709 - if (timing_out->display_color_depth <= COLOR_DEPTH_888) 3710 - return; 3711 - 3712 - timing_out->display_color_depth--; 3713 - } 3714 - 3715 - static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out, 3716 - const struct drm_display_info *info) 3717 - { 3661 + enum dc_color_depth depth = timing_out->display_color_depth; 3718 3662 int normalized_clk; 3719 - if (timing_out->display_color_depth <= COLOR_DEPTH_888) 3720 - return; 3721 3663 do { 3722 3664 normalized_clk = timing_out->pix_clk_100hz / 10; 3723 3665 /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ 3724 3666 if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) 3725 3667 normalized_clk /= 2; 3726 3668 /* Adjusting pix clock following on HDMI spec based on colour depth */ 3727 - switch (timing_out->display_color_depth) { 3669 + switch (depth) { 3670 + case COLOR_DEPTH_888: 3671 + break; 3728 3672 case COLOR_DEPTH_101010: 3729 3673 normalized_clk = (normalized_clk * 30) / 24; 3730 3674 break; ··· 3729 3685 normalized_clk = (normalized_clk * 48) / 24; 3730 3686 break; 3731 3687 default: 3732 - return; 3688 + /* The above depths are the only ones valid for HDMI. */ 3689 + return false; 3733 3690 } 3734 - if (normalized_clk <= info->max_tmds_clock) 3735 - return; 3736 - reduce_mode_colour_depth(timing_out); 3737 - 3738 - } while (timing_out->display_color_depth > COLOR_DEPTH_888); 3739 - 3691 + if (normalized_clk <= info->max_tmds_clock) { 3692 + timing_out->display_color_depth = depth; 3693 + return true; 3694 + } 3695 + } while (--depth > COLOR_DEPTH_666); 3696 + return false; 3740 3697 } 3741 3698 3742 3699 static void fill_stream_properties_from_drm_display_mode( ··· 3818 3773 3819 3774 stream->out_transfer_func->type = TF_TYPE_PREDEFINED; 3820 3775 stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; 3821 - if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 3822 - adjust_colour_depth_from_display_info(timing_out, info); 3776 + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { 3777 + if (!adjust_colour_depth_from_display_info(timing_out, info) && 3778 + drm_mode_is_420_also(info, mode_in) && 3779 + timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) { 3780 + timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 3781 + adjust_colour_depth_from_display_info(timing_out, info); 3782 + } 3783 + } 3823 3784 } 3824 3785 3825 3786 static void fill_audio_info(struct audio_info *audio_info, ··· 4076 4025 4077 4026 if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { 4078 4027 #if defined(CONFIG_DRM_AMD_DC_DCN) 4079 - dc_dsc_parse_dsc_dpcd(aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, 4028 + dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, 4029 + aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, 4080 4030 aconnector->dc_link->dpcd_caps.dsc_caps.dsc_ext_caps.raw, 4081 4031 &dsc_caps); 4082 4032 #endif ··· 5613 5561 5614 5562 drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); 5615 5563 5616 - /* This defaults to the max in the range, but we want 8bpc. */ 5617 - aconnector->base.state->max_bpc = 8; 5618 - aconnector->base.state->max_requested_bpc = 8; 5564 + /* This defaults to the max in the range, but we want 8bpc for non-edp. */ 5565 + aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8; 5566 + aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc; 5619 5567 5620 5568 if (connector_type == DRM_MODE_CONNECTOR_eDP && 5621 5569 dc_is_dmcu_initialized(adev->dm.dc)) {
+7
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
··· 133 133 struct dmub_srv *dmub_srv; 134 134 135 135 /** 136 + * @dmub_fb_info: 137 + * 138 + * Framebuffer regions for the DMUB. 139 + */ 140 + struct dmub_srv_fb_info *dmub_fb_info; 141 + 142 + /** 136 143 * @dmub_fw: 137 144 * 138 145 * DMUB firmware, required on hardware that has DMUB support.
+4 -15
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
··· 111 111 */ 112 112 static void dm_irq_work_func(struct work_struct *work) 113 113 { 114 - struct list_head *entry; 115 114 struct irq_list_head *irq_list_head = 116 115 container_of(work, struct irq_list_head, work); 117 116 struct list_head *handler_list = &irq_list_head->head; 118 117 struct amdgpu_dm_irq_handler_data *handler_data; 119 118 120 - list_for_each(entry, handler_list) { 121 - handler_data = list_entry(entry, 122 - struct amdgpu_dm_irq_handler_data, 123 - list); 124 - 119 + list_for_each_entry(handler_data, handler_list, list) { 125 120 DRM_DEBUG_KMS("DM_IRQ: work_func: for dal_src=%d\n", 126 121 handler_data->irq_source); 127 122 ··· 523 528 enum dc_irq_source irq_source) 524 529 { 525 530 struct amdgpu_dm_irq_handler_data *handler_data; 526 - struct list_head *entry; 527 531 unsigned long irq_table_flags; 528 532 529 533 DM_IRQ_TABLE_LOCK(adev, irq_table_flags); 530 534 531 - list_for_each( 532 - entry, 533 - &adev->dm.irq_handler_list_high_tab[irq_source]) { 534 - 535 - handler_data = list_entry(entry, 536 - struct amdgpu_dm_irq_handler_data, 537 - list); 538 - 535 + list_for_each_entry(handler_data, 536 + &adev->dm.irq_handler_list_high_tab[irq_source], 537 + list) { 539 538 /* Call a subcomponent which registered for immediate 540 539 * interrupt notification */ 541 540 handler_data->handler(handler_data->handler_arg);
+9
drivers/gpu/drm/amd/display/dc/calcs/Makefile
··· 1 1 # 2 2 # Copyright 2017 Advanced Micro Devices, Inc. 3 + # Copyright 2019 Raptor Engineering, LLC 3 4 # 4 5 # Permission is hereby granted, free of charge, to any person obtaining a 5 6 # copy of this software and associated documentation files (the "Software"), ··· 25 24 # It calculates Bandwidth and Watermarks values for HW programming 26 25 # 27 26 27 + ifdef CONFIG_X86 28 28 calcs_ccflags := -mhard-float -msse 29 + endif 30 + 31 + ifdef CONFIG_PPC64 32 + calcs_ccflags := -mhard-float -maltivec 33 + endif 29 34 30 35 ifdef CONFIG_CC_IS_GCC 31 36 ifeq ($(call cc-ifversion, -lt, 0701, y), y) ··· 39 32 endif 40 33 endif 41 34 35 + ifdef CONFIG_X86 42 36 ifdef IS_OLD_GCC 43 37 # Stack alignment mismatch, proceed with caution. 44 38 # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 ··· 47 39 calcs_ccflags += -mpreferred-stack-boundary=4 48 40 else 49 41 calcs_ccflags += -msse2 42 + endif 50 43 endif 51 44 52 45 CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calcs.o := $(calcs_ccflags)
+12 -12
drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
··· 154 154 155 155 156 156 157 - if (data->d0_underlay_mode == bw_def_none) { d0_underlay_enable = 0; } 158 - else { 159 - d0_underlay_enable = 1; 160 - } 161 - if (data->d1_underlay_mode == bw_def_none) { d1_underlay_enable = 0; } 162 - else { 163 - d1_underlay_enable = 1; 164 - } 157 + if (data->d0_underlay_mode == bw_def_none) 158 + d0_underlay_enable = false; 159 + else 160 + d0_underlay_enable = true; 161 + if (data->d1_underlay_mode == bw_def_none) 162 + d1_underlay_enable = false; 163 + else 164 + d1_underlay_enable = true; 165 165 data->number_of_underlay_surfaces = d0_underlay_enable + d1_underlay_enable; 166 166 switch (data->underlay_surface_type) { 167 167 case bw_def_420: ··· 286 286 data->cursor_width_pixels[2] = bw_int_to_fixed(0); 287 287 data->cursor_width_pixels[3] = bw_int_to_fixed(0); 288 288 /* graphics surface parameters from spreadsheet*/ 289 - fbc_enabled = 0; 290 - lpt_enabled = 0; 289 + fbc_enabled = false; 290 + lpt_enabled = false; 291 291 for (i = 4; i <= maximum_number_of_surfaces - 3; i++) { 292 292 if (i < data->number_of_displays + 4) { 293 293 if (i == 4 && data->d0_underlay_mode == bw_def_underlay_only) { ··· 338 338 data->access_one_channel_only[i] = 0; 339 339 } 340 340 if (data->fbc_en[i] == 1) { 341 - fbc_enabled = 1; 341 + fbc_enabled = true; 342 342 if (data->lpt_en[i] == 1) { 343 - lpt_enabled = 1; 343 + lpt_enabled = true; 344 344 } 345 345 } 346 346 data->cursor_width_pixels[i] = bw_int_to_fixed(vbios->cursor_width);
+13 -12
drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
··· 1 1 /* 2 2 * Copyright 2017 Advanced Micro Devices, Inc. 3 + * Copyright 2019 Raptor Engineering, LLC 3 4 * 4 5 * Permission is hereby granted, free of charge, to any person obtaining a 5 6 * copy of this software and associated documentation files (the "Software"), ··· 623 622 { 624 623 bool updated = false; 625 624 626 - kernel_fpu_begin(); 625 + DC_FP_START(); 627 626 if ((int)(dc->dcn_soc->sr_exit_time * 1000) != dc->debug.sr_exit_time_ns 628 627 && dc->debug.sr_exit_time_ns) { 629 628 updated = true; ··· 659 658 dc->dcn_soc->dram_clock_change_latency = 660 659 dc->debug.dram_clock_change_latency_ns / 1000.0; 661 660 } 662 - kernel_fpu_end(); 661 + DC_FP_END(); 663 662 664 663 return updated; 665 664 } ··· 739 738 dcn_bw_sync_calcs_and_dml(dc); 740 739 741 740 memset(v, 0, sizeof(*v)); 742 - kernel_fpu_begin(); 741 + DC_FP_START(); 743 742 744 743 v->sr_exit_time = dc->dcn_soc->sr_exit_time; 745 744 v->sr_enter_plus_exit_time = dc->dcn_soc->sr_enter_plus_exit_time; ··· 1272 1271 bw_limit = dc->dcn_soc->percent_disp_bw_limit * v->fabric_and_dram_bandwidth_vmax0p9; 1273 1272 bw_limit_pass = (v->total_data_read_bandwidth / 1000.0) < bw_limit; 1274 1273 1275 - kernel_fpu_end(); 1274 + DC_FP_END(); 1276 1275 1277 1276 PERFORMANCE_TRACE_END(); 1278 1277 BW_VAL_TRACE_FINISH(); ··· 1440 1439 res = dm_pp_get_clock_levels_by_type_with_voltage( 1441 1440 ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks); 1442 1441 1443 - kernel_fpu_begin(); 1442 + DC_FP_START(); 1444 1443 1445 1444 if (res) 1446 1445 res = verify_clock_values(&fclks); ··· 1460 1459 } else 1461 1460 BREAK_TO_DEBUGGER(); 1462 1461 1463 - kernel_fpu_end(); 1462 + DC_FP_END(); 1464 1463 1465 1464 res = dm_pp_get_clock_levels_by_type_with_voltage( 1466 1465 ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks); 1467 1466 1468 - kernel_fpu_begin(); 1467 + DC_FP_START(); 1469 1468 1470 1469 if (res) 1471 1470 res = verify_clock_values(&dcfclks); ··· 1478 1477 } else 1479 1478 BREAK_TO_DEBUGGER(); 1480 1479 1481 - kernel_fpu_end(); 1480 + DC_FP_END(); 1482 1481 } 1483 1482 1484 1483 void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc) ··· 1493 1492 if (!pp || !pp->set_wm_ranges) 1494 1493 return; 1495 1494 1496 - kernel_fpu_begin(); 1495 + DC_FP_START(); 1497 1496 min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32; 1498 1497 min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000; 1499 1498 socclk_khz = dc->dcn_soc->socclk * 1000; 1500 - kernel_fpu_end(); 1499 + DC_FP_END(); 1501 1500 1502 1501 /* Now notify PPLib/SMU about which Watermarks sets they should select 1503 1502 * depending on DPM state they are in. And update BW MGR GFX Engine and ··· 1548 1547 1549 1548 void dcn_bw_sync_calcs_and_dml(struct dc *dc) 1550 1549 { 1551 - kernel_fpu_begin(); 1550 + DC_FP_START(); 1552 1551 DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %f ns\n" 1553 1552 "sr_enter_plus_exit_time: %f ns\n" 1554 1553 "urgent_latency: %f ns\n" ··· 1737 1736 dc->dml.ip.bug_forcing_LC_req_same_size_fixed = 1738 1737 dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes; 1739 1738 dc->dml.ip.dcfclk_cstate_latency = dc->dcn_ip->dcfclk_cstate_latency; 1740 - kernel_fpu_end(); 1739 + DC_FP_END(); 1741 1740 }
+40 -6
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
··· 27 27 #include "clk_mgr_internal.h" 28 28 29 29 #include "dce100/dce_clk_mgr.h" 30 + #include "dcn20_clk_mgr.h" 30 31 #include "reg_helper.h" 31 32 #include "core_types.h" 32 33 #include "dm_helpers.h" ··· 101 100 } 102 101 103 102 void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, 104 - struct dc_state *context) 103 + struct dc_state *context, bool safe_to_lower) 105 104 { 106 105 int i; 107 106 108 107 clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz; 109 108 for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { 110 - int dpp_inst, dppclk_khz; 109 + int dpp_inst, dppclk_khz, prev_dppclk_khz; 111 110 112 111 /* Loop index will match dpp->inst if resource exists, 113 112 * and we want to avoid dependency on dpp object ··· 115 114 dpp_inst = i; 116 115 dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; 117 116 118 - clk_mgr->dccg->funcs->update_dpp_dto( 119 - clk_mgr->dccg, dpp_inst, dppclk_khz); 117 + prev_dppclk_khz = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; 118 + 119 + if (safe_to_lower || prev_dppclk_khz < dppclk_khz) { 120 + clk_mgr->dccg->funcs->update_dpp_dto( 121 + clk_mgr->dccg, dpp_inst, dppclk_khz); 122 + } 120 123 } 121 124 } 122 125 ··· 166 161 dc->debug.force_clock_mode & 0x1) { 167 162 //this is from resume or boot up, if forced_clock cfg option used, we bypass program dispclk and DPPCLK, but need set them for S3. 168 163 force_reset = true; 164 + 165 + dcn2_read_clocks_from_hw_dentist(clk_mgr_base); 166 + 169 167 //force_clock_mode 0x1: force reset the clock even it is the same clock as long as it is in Passive level. 170 168 } 171 169 display_count = clk_mgr_helper_get_active_display_cnt(dc, context); ··· 248 240 if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) { 249 241 if (dpp_clock_lowered) { 250 242 // if clock is being lowered, increase DTO before lowering refclk 251 - dcn20_update_clocks_update_dpp_dto(clk_mgr, context); 243 + dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); 252 244 dcn20_update_clocks_update_dentist(clk_mgr); 253 245 } else { 254 246 // if clock is being raised, increase refclk before lowering DTO ··· 256 248 dcn20_update_clocks_update_dentist(clk_mgr); 257 249 // always update dtos unless clock is lowered and not safe to lower 258 250 if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) 259 - dcn20_update_clocks_update_dpp_dto(clk_mgr, context); 251 + dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); 260 252 } 261 253 } 262 254 ··· 345 337 if (pp_smu->set_pme_wa_enable) 346 338 pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); 347 339 } 340 + } 341 + 342 + 343 + void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base) 344 + { 345 + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); 346 + uint32_t dispclk_wdivider; 347 + uint32_t dppclk_wdivider; 348 + int disp_divider; 349 + int dpp_divider; 350 + 351 + REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, &dispclk_wdivider); 352 + REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, &dppclk_wdivider); 353 + 354 + disp_divider = dentist_get_divider_from_did(dispclk_wdivider); 355 + dpp_divider = dentist_get_divider_from_did(dispclk_wdivider); 356 + 357 + if (disp_divider && dpp_divider) { 358 + /* Calculate the current DFS clock, in kHz.*/ 359 + clk_mgr_base->clks.dispclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR 360 + * clk_mgr->base.dentist_vco_freq_khz) / disp_divider; 361 + 362 + clk_mgr_base->clks.dppclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR 363 + * clk_mgr->base.dentist_vco_freq_khz) / dpp_divider; 364 + } 365 + 348 366 } 349 367 350 368 void dcn2_get_clock(struct clk_mgr *clk_mgr,
+5 -1
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h
··· 34 34 struct dc_state *context, 35 35 bool safe_to_lower); 36 36 void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, 37 - struct dc_state *context); 37 + struct dc_state *context, bool safe_to_lower); 38 38 39 39 void dcn2_init_clocks(struct clk_mgr *clk_mgr); 40 40 ··· 51 51 struct dc_clock_config *clock_cfg); 52 52 53 53 void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr); 54 + 55 + void dcn2_read_clocks_from_hw_dentist(struct clk_mgr *clk_mgr_base); 56 + 57 + 54 58 #endif //__DCN20_CLK_MGR_H__
+5 -5
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
··· 164 164 } 165 165 166 166 if (dpp_clock_lowered) { 167 - // if clock is being lowered, increase DTO before lowering refclk 168 - dcn20_update_clocks_update_dpp_dto(clk_mgr, context); 167 + // increase per DPP DTO before lowering global dppclk 168 + dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); 169 169 rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); 170 170 } else { 171 - // if clock is being raised, increase refclk before lowering DTO 171 + // increase global DPPCLK before lowering per DPP DTO 172 172 if (update_dppclk || update_dispclk) 173 173 rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); 174 174 // always update dtos unless clock is lowered and not safe to lower 175 175 if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) 176 - dcn20_update_clocks_update_dpp_dto(clk_mgr, context); 176 + dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower); 177 177 } 178 178 179 179 if (update_dispclk && ··· 409 409 continue; 410 410 411 411 ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst; 412 - ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;; 412 + ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type; 413 413 /* We will not select WM based on dcfclk, so leave it as unconstrained */ 414 414 ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; 415 415 ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+118 -59
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 66 66 67 67 #include "dce/dce_i2c.h" 68 68 69 + #define CTX \ 70 + dc->ctx 71 + 69 72 #define DC_LOGGER \ 70 73 dc->ctx->logger 71 74 ··· 582 579 583 580 } 584 581 582 + static bool dc_construct_ctx(struct dc *dc, 583 + const struct dc_init_data *init_params) 584 + { 585 + struct dc_context *dc_ctx; 586 + enum dce_version dc_version = DCE_VERSION_UNKNOWN; 587 + 588 + dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); 589 + if (!dc_ctx) 590 + return false; 591 + 592 + dc_ctx->cgs_device = init_params->cgs_device; 593 + dc_ctx->driver_context = init_params->driver; 594 + dc_ctx->dc = dc; 595 + dc_ctx->asic_id = init_params->asic_id; 596 + dc_ctx->dc_sink_id_count = 0; 597 + dc_ctx->dc_stream_id_count = 0; 598 + dc_ctx->dce_environment = init_params->dce_environment; 599 + 600 + /* Create logger */ 601 + 602 + dc_version = resource_parse_asic_id(init_params->asic_id); 603 + dc_ctx->dce_version = dc_version; 604 + 605 + dc_ctx->perf_trace = dc_perf_trace_create(); 606 + if (!dc_ctx->perf_trace) { 607 + ASSERT_CRITICAL(false); 608 + return false; 609 + } 610 + 611 + dc->ctx = dc_ctx; 612 + 613 + return true; 614 + } 615 + 585 616 static bool dc_construct(struct dc *dc, 586 617 const struct dc_init_data *init_params) 587 618 { ··· 627 590 struct dcn_ip_params *dcn_ip; 628 591 #endif 629 592 630 - enum dce_version dc_version = DCE_VERSION_UNKNOWN; 631 593 dc->config = init_params->flags; 632 594 633 595 // Allocate memory for the vm_helper ··· 672 636 dc->soc_bounding_box = init_params->soc_bounding_box; 673 637 #endif 674 638 675 - dc_ctx = kzalloc(sizeof(*dc_ctx), GFP_KERNEL); 676 - if (!dc_ctx) { 639 + if (!dc_construct_ctx(dc, init_params)) { 677 640 dm_error("%s: failed to create ctx\n", __func__); 678 641 goto fail; 679 642 } 680 643 681 - dc_ctx->cgs_device = init_params->cgs_device; 682 - dc_ctx->driver_context = init_params->driver; 683 - dc_ctx->dc = dc; 684 - dc_ctx->asic_id = init_params->asic_id; 685 - dc_ctx->dc_sink_id_count = 0; 686 - dc_ctx->dc_stream_id_count = 0; 687 - dc->ctx = dc_ctx; 688 - 689 - /* Create logger */ 690 - 691 - dc_ctx->dce_environment = init_params->dce_environment; 692 - 693 - dc_version = resource_parse_asic_id(init_params->asic_id); 694 - dc_ctx->dce_version = dc_version; 644 + dc_ctx = dc->ctx; 695 645 696 646 /* Resource should construct all asic specific resources. 697 647 * This should be the only place where we need to parse the asic id ··· 692 670 bp_init_data.bios = init_params->asic_id.atombios_base_address; 693 671 694 672 dc_ctx->dc_bios = dal_bios_parser_create( 695 - &bp_init_data, dc_version); 673 + &bp_init_data, dc_ctx->dce_version); 696 674 697 675 if (!dc_ctx->dc_bios) { 698 676 ASSERT_CRITICAL(false); ··· 700 678 } 701 679 702 680 dc_ctx->created_bios = true; 703 - } 704 - 705 - dc_ctx->perf_trace = dc_perf_trace_create(); 706 - if (!dc_ctx->perf_trace) { 707 - ASSERT_CRITICAL(false); 708 - goto fail; 709 681 } 682 + 683 + 710 684 711 685 /* Create GPIO service */ 712 686 dc_ctx->gpio_service = dal_gpio_service_create( 713 - dc_version, 687 + dc_ctx->dce_version, 714 688 dc_ctx->dce_environment, 715 689 dc_ctx); 716 690 ··· 715 697 goto fail; 716 698 } 717 699 718 - dc->res_pool = dc_create_resource_pool(dc, init_params, dc_version); 700 + dc->res_pool = dc_create_resource_pool(dc, init_params, dc_ctx->dce_version); 719 701 if (!dc->res_pool) 720 702 goto fail; 721 703 ··· 746 728 return true; 747 729 748 730 fail: 749 - 750 - dc_destruct(dc); 751 731 return false; 752 732 } 753 733 ··· 799 783 dc_release_state(current_ctx); 800 784 } 801 785 786 + static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) 787 + { 788 + int i; 789 + int count = 0; 790 + struct pipe_ctx *pipe; 791 + PERF_TRACE(); 792 + for (i = 0; i < MAX_PIPES; i++) { 793 + pipe = &context->res_ctx.pipe_ctx[i]; 794 + 795 + if (!pipe->plane_state) 796 + continue; 797 + 798 + /* Timeout 100 ms */ 799 + while (count < 100000) { 800 + /* Must set to false to start with, due to OR in update function */ 801 + pipe->plane_state->status.is_flip_pending = false; 802 + dc->hwss.update_pending_status(pipe); 803 + if (!pipe->plane_state->status.is_flip_pending) 804 + break; 805 + udelay(1); 806 + count++; 807 + } 808 + ASSERT(!pipe->plane_state->status.is_flip_pending); 809 + } 810 + PERF_TRACE(); 811 + } 812 + 802 813 /******************************************************************************* 803 814 * Public functions 804 815 ******************************************************************************/ ··· 838 795 if (NULL == dc) 839 796 goto alloc_fail; 840 797 841 - if (false == dc_construct(dc, init_params)) 842 - goto construct_fail; 798 + if (init_params->dce_environment == DCE_ENV_VIRTUAL_HW) { 799 + if (false == dc_construct_ctx(dc, init_params)) { 800 + dc_destruct(dc); 801 + goto construct_fail; 802 + } 803 + } else { 804 + if (false == dc_construct(dc, init_params)) { 805 + dc_destruct(dc); 806 + goto construct_fail; 807 + } 843 808 844 - full_pipe_count = dc->res_pool->pipe_count; 845 - if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) 846 - full_pipe_count--; 847 - dc->caps.max_streams = min( 848 - full_pipe_count, 849 - dc->res_pool->stream_enc_count); 809 + full_pipe_count = dc->res_pool->pipe_count; 810 + if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) 811 + full_pipe_count--; 812 + dc->caps.max_streams = min( 813 + full_pipe_count, 814 + dc->res_pool->stream_enc_count); 850 815 851 - dc->caps.max_links = dc->link_count; 852 - dc->caps.max_audios = dc->res_pool->audio_count; 853 - dc->caps.linear_pitch_alignment = 64; 816 + dc->optimize_seamless_boot_streams = 0; 817 + dc->caps.max_links = dc->link_count; 818 + dc->caps.max_audios = dc->res_pool->audio_count; 819 + dc->caps.linear_pitch_alignment = 64; 854 820 855 - dc->caps.max_dp_protocol_version = DP_VERSION_1_4; 821 + dc->caps.max_dp_protocol_version = DP_VERSION_1_4; 822 + 823 + if (dc->res_pool->dmcu != NULL) 824 + dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; 825 + } 856 826 857 827 /* Populate versioning information */ 858 828 dc->versions.dc_ver = DC_VER; 859 - 860 - if (dc->res_pool->dmcu != NULL) 861 - dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version; 862 829 863 830 dc->build_id = DC_BUILD_ID; 864 831 ··· 887 834 888 835 void dc_hardware_init(struct dc *dc) 889 836 { 890 - dc->hwss.init_hw(dc); 837 + if (dc->ctx->dce_environment != DCE_ENV_VIRTUAL_HW) 838 + dc->hwss.init_hw(dc); 891 839 } 892 840 893 841 void dc_init_callbacks(struct dc *dc, ··· 1202 1148 1203 1149 for (i = 0; i < context->stream_count; i++) { 1204 1150 if (context->streams[i]->apply_seamless_boot_optimization) 1205 - dc->optimize_seamless_boot = true; 1151 + dc->optimize_seamless_boot_streams++; 1206 1152 } 1207 1153 1208 - if (!dc->optimize_seamless_boot) 1154 + if (dc->optimize_seamless_boot_streams == 0) 1209 1155 dc->hwss.prepare_bandwidth(dc, context); 1210 1156 1211 1157 /* re-program planes for existing stream, in case we need to ··· 1278 1224 1279 1225 dc_enable_stereo(dc, context, dc_streams, context->stream_count); 1280 1226 1281 - if (!dc->optimize_seamless_boot) 1282 - /* pplib is notified if disp_num changed */ 1283 - dc->hwss.optimize_bandwidth(dc, context); 1227 + if (dc->optimize_seamless_boot_streams == 0) { 1228 + /* Must wait for no flips to be pending before doing optimize bw */ 1229 + wait_for_no_pipes_pending(dc, context); 1230 + /* pplib is notified if disp_num changed */ 1231 + dc->hwss.optimize_bandwidth(dc, context); 1232 + } 1284 1233 1285 1234 for (i = 0; i < context->stream_count; i++) 1286 1235 context->streams[i]->mode_changed = false; ··· 1324 1267 int i; 1325 1268 struct dc_state *context = dc->current_state; 1326 1269 1327 - if (!dc->optimized_required || dc->optimize_seamless_boot) 1270 + if (!dc->optimized_required || dc->optimize_seamless_boot_streams > 0) 1328 1271 return true; 1329 1272 1330 1273 post_surface_trace(dc); ··· 1600 1543 1601 1544 update_flags->bits.scaling_change = 1; 1602 1545 if (u->scaling_info->src_rect.width > u->surface->src_rect.width 1603 - && u->scaling_info->src_rect.height > u->surface->src_rect.height) 1546 + || u->scaling_info->src_rect.height > u->surface->src_rect.height) 1604 1547 /* Making src rect bigger requires a bandwidth change */ 1605 1548 update_flags->bits.clock_change = 1; 1606 1549 } ··· 1614 1557 update_flags->bits.position_change = 1; 1615 1558 1616 1559 if (update_flags->bits.clock_change 1617 - || update_flags->bits.bandwidth_change) 1560 + || update_flags->bits.bandwidth_change 1561 + || update_flags->bits.scaling_change) 1618 1562 return UPDATE_TYPE_FULL; 1619 1563 1620 - if (update_flags->bits.scaling_change 1621 - || update_flags->bits.position_change) 1564 + if (update_flags->bits.position_change) 1622 1565 return UPDATE_TYPE_MED; 1623 1566 1624 1567 return UPDATE_TYPE_FAST; ··· 2108 2051 2109 2052 dc->hwss.optimize_bandwidth(dc, dc->current_state); 2110 2053 } else { 2111 - if (!dc->optimize_seamless_boot) 2054 + if (dc->optimize_seamless_boot_streams == 0) 2112 2055 dc->hwss.prepare_bandwidth(dc, dc->current_state); 2113 2056 2114 2057 core_link_enable_stream(dc->current_state, pipe_ctx); ··· 2149 2092 int i, j; 2150 2093 struct pipe_ctx *top_pipe_to_program = NULL; 2151 2094 2152 - if (dc->optimize_seamless_boot && surface_count > 0) { 2095 + if (dc->optimize_seamless_boot_streams > 0 && surface_count > 0) { 2153 2096 /* Optimize seamless boot flag keeps clocks and watermarks high until 2154 2097 * first flip. After first flip, optimization is required to lower 2155 2098 * bandwidth. Important to note that it is expected UEFI will ··· 2158 2101 */ 2159 2102 if (stream->apply_seamless_boot_optimization) { 2160 2103 stream->apply_seamless_boot_optimization = false; 2161 - dc->optimize_seamless_boot = false; 2162 - dc->optimized_required = true; 2104 + dc->optimize_seamless_boot_streams--; 2105 + 2106 + if (dc->optimize_seamless_boot_streams == 0) 2107 + dc->optimized_required = true; 2163 2108 } 2164 2109 } 2165 2110 2166 - if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) { 2111 + if (update_type == UPDATE_TYPE_FULL && dc->optimize_seamless_boot_streams == 0) { 2167 2112 dc->hwss.prepare_bandwidth(dc, context); 2168 2113 context_clock_trace(dc, context); 2169 2114 }
+117 -28
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
··· 940 940 941 941 } 942 942 943 - static void calculate_integer_scaling(struct pipe_ctx *pipe_ctx) 943 + /* 944 + * When handling 270 rotation in mixed SLS mode, we have 945 + * stream->timing.h_border_left that is non zero. If we are doing 946 + * pipe-splitting, this h_border_left value gets added to recout.x and when it 947 + * calls calculate_inits_and_adj_vp() and 948 + * adjust_vp_and_init_for_seamless_clip(), it can cause viewport.height for a 949 + * pipe to be incorrect. 950 + * 951 + * To fix this, instead of using stream->timing.h_border_left, we can use 952 + * stream->dst.x to represent the border instead. So we will set h_border_left 953 + * to 0 and shift the appropriate amount in stream->dst.x. We will then 954 + * perform all calculations in resource_build_scaling_params() based on this 955 + * and then restore the h_border_left and stream->dst.x to their original 956 + * values. 957 + * 958 + * shift_border_left_to_dst() will shift the amount of h_border_left to 959 + * stream->dst.x and set h_border_left to 0. restore_border_left_from_dst() 960 + * will restore h_border_left and stream->dst.x back to their original values 961 + * We also need to make sure pipe_ctx->plane_res.scl_data.h_active uses the 962 + * original h_border_left value in its calculation. 963 + */ 964 + int shift_border_left_to_dst(struct pipe_ctx *pipe_ctx) 944 965 { 945 - unsigned int integer_multiple = 1; 966 + int store_h_border_left = pipe_ctx->stream->timing.h_border_left; 946 967 947 - if (pipe_ctx->plane_state->scaling_quality.integer_scaling) { 948 - // calculate maximum # of replication of src onto addressable 949 - integer_multiple = min( 950 - pipe_ctx->stream->timing.h_addressable / pipe_ctx->stream->src.width, 951 - pipe_ctx->stream->timing.v_addressable / pipe_ctx->stream->src.height); 952 - 953 - //scale dst 954 - pipe_ctx->stream->dst.width = integer_multiple * pipe_ctx->stream->src.width; 955 - pipe_ctx->stream->dst.height = integer_multiple * pipe_ctx->stream->src.height; 956 - 957 - //center dst onto addressable 958 - pipe_ctx->stream->dst.x = (pipe_ctx->stream->timing.h_addressable - pipe_ctx->stream->dst.width)/2; 959 - pipe_ctx->stream->dst.y = (pipe_ctx->stream->timing.v_addressable - pipe_ctx->stream->dst.height)/2; 960 - 961 - //We are guaranteed that we are scaling in integer ratio 962 - pipe_ctx->plane_state->scaling_quality.v_taps = 1; 963 - pipe_ctx->plane_state->scaling_quality.h_taps = 1; 964 - pipe_ctx->plane_state->scaling_quality.v_taps_c = 1; 965 - pipe_ctx->plane_state->scaling_quality.h_taps_c = 1; 968 + if (store_h_border_left) { 969 + pipe_ctx->stream->timing.h_border_left = 0; 970 + pipe_ctx->stream->dst.x += store_h_border_left; 966 971 } 972 + return store_h_border_left; 973 + } 974 + 975 + void restore_border_left_from_dst(struct pipe_ctx *pipe_ctx, 976 + int store_h_border_left) 977 + { 978 + pipe_ctx->stream->dst.x -= store_h_border_left; 979 + pipe_ctx->stream->timing.h_border_left = store_h_border_left; 967 980 } 968 981 969 982 bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) ··· 984 971 const struct dc_plane_state *plane_state = pipe_ctx->plane_state; 985 972 struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; 986 973 bool res = false; 974 + int store_h_border_left = shift_border_left_to_dst(pipe_ctx); 987 975 DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); 988 976 /* Important: scaling ratio calculation requires pixel format, 989 977 * lb depth calculation requires recout and taps require scaling ratios. ··· 993 979 pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( 994 980 pipe_ctx->plane_state->format); 995 981 996 - calculate_integer_scaling(pipe_ctx); 997 - 998 982 calculate_scaling_ratios(pipe_ctx); 999 983 1000 984 calculate_viewport(pipe_ctx); 1001 985 1002 - if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || pipe_ctx->plane_res.scl_data.viewport.width < 16) 986 + if (pipe_ctx->plane_res.scl_data.viewport.height < 16 || 987 + pipe_ctx->plane_res.scl_data.viewport.width < 16) { 988 + if (store_h_border_left) { 989 + restore_border_left_from_dst(pipe_ctx, 990 + store_h_border_left); 991 + } 1003 992 return false; 993 + } 1004 994 1005 995 calculate_recout(pipe_ctx); 1006 996 ··· 1017 999 pipe_ctx->plane_res.scl_data.recout.x += timing->h_border_left; 1018 1000 pipe_ctx->plane_res.scl_data.recout.y += timing->v_border_top; 1019 1001 1020 - pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right; 1021 - pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom; 1002 + pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + 1003 + store_h_border_left + timing->h_border_right; 1004 + pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + 1005 + timing->v_border_top + timing->v_border_bottom; 1022 1006 1023 1007 /* Taps calculations */ 1024 1008 if (pipe_ctx->plane_res.xfm != NULL) ··· 1066 1046 plane_state->dst_rect.width, 1067 1047 plane_state->dst_rect.x, 1068 1048 plane_state->dst_rect.y); 1049 + 1050 + if (store_h_border_left) 1051 + restore_border_left_from_dst(pipe_ctx, store_h_border_left); 1069 1052 1070 1053 return res; 1071 1054 } ··· 1917 1894 pipe_ctx->plane_res.dpp = pool->dpps[tg_inst]; 1918 1895 pipe_ctx->stream_res.opp = pool->opps[tg_inst]; 1919 1896 1920 - if (pool->dpps[tg_inst]) 1897 + if (pool->dpps[tg_inst]) { 1921 1898 pipe_ctx->plane_res.mpcc_inst = pool->dpps[tg_inst]->inst; 1899 + 1900 + // Read DPP->MPCC->OPP Pipe from HW State 1901 + if (pool->mpc->funcs->read_mpcc_state) { 1902 + struct mpcc_state s = {0}; 1903 + 1904 + pool->mpc->funcs->read_mpcc_state(pool->mpc, pipe_ctx->plane_res.mpcc_inst, &s); 1905 + 1906 + if (s.dpp_id < MAX_MPCC) 1907 + pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].dpp_id = s.dpp_id; 1908 + 1909 + if (s.bot_mpcc_id < MAX_MPCC) 1910 + pool->mpc->mpcc_array[pipe_ctx->plane_res.mpcc_inst].mpcc_bot = 1911 + &pool->mpc->mpcc_array[s.bot_mpcc_id]; 1912 + 1913 + if (s.opp_id < MAX_OPP) 1914 + pipe_ctx->stream_res.opp->mpc_tree_params.opp_id = s.opp_id; 1915 + } 1916 + } 1922 1917 pipe_ctx->pipe_idx = tg_inst; 1923 1918 1924 1919 pipe_ctx->stream = stream; ··· 2322 2281 if (color_space == COLOR_SPACE_SRGB || 2323 2282 color_space == COLOR_SPACE_2020_RGB_FULLRANGE) { 2324 2283 hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_FULL_RANGE; 2325 - hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_FULL_RANGE; 2284 + hdmi_info.bits.YQ0_YQ1 = YYC_QUANTIZATION_LIMITED_RANGE; 2326 2285 } else if (color_space == COLOR_SPACE_SRGB_LIMITED || 2327 2286 color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) { 2328 2287 hdmi_info.bits.Q0_Q1 = RGB_QUANTIZATION_LIMITED_RANGE; ··· 2852 2811 return -1; 2853 2812 } 2854 2813 } 2814 + static unsigned int get_max_audio_sample_rate(struct audio_mode *modes) 2815 + { 2816 + if (modes) { 2817 + if (modes->sample_rates.rate.RATE_192) 2818 + return 192000; 2819 + if (modes->sample_rates.rate.RATE_176_4) 2820 + return 176400; 2821 + if (modes->sample_rates.rate.RATE_96) 2822 + return 96000; 2823 + if (modes->sample_rates.rate.RATE_88_2) 2824 + return 88200; 2825 + if (modes->sample_rates.rate.RATE_48) 2826 + return 48000; 2827 + if (modes->sample_rates.rate.RATE_44_1) 2828 + return 44100; 2829 + if (modes->sample_rates.rate.RATE_32) 2830 + return 32000; 2831 + } 2832 + /*original logic when no audio info*/ 2833 + return 441000; 2834 + } 2835 + 2836 + void get_audio_check(struct audio_info *aud_modes, 2837 + struct audio_check *audio_chk) 2838 + { 2839 + unsigned int i; 2840 + unsigned int max_sample_rate = 0; 2841 + 2842 + if (aud_modes) { 2843 + audio_chk->audio_packet_type = 0x2;/*audio sample packet AP = .25 for layout0, 1 for layout1*/ 2844 + 2845 + audio_chk->max_audiosample_rate = 0; 2846 + for (i = 0; i < aud_modes->mode_count; i++) { 2847 + max_sample_rate = get_max_audio_sample_rate(&aud_modes->modes[i]); 2848 + if (audio_chk->max_audiosample_rate < max_sample_rate) 2849 + audio_chk->max_audiosample_rate = max_sample_rate; 2850 + /*dts takes the same as type 2: AP = 0.25*/ 2851 + } 2852 + /*check which one take more bandwidth*/ 2853 + if (audio_chk->max_audiosample_rate > 192000) 2854 + audio_chk->audio_packet_type = 0x9;/*AP =1*/ 2855 + audio_chk->acat = 0;/*not support*/ 2856 + } 2857 + } 2858 + 2859 + 2860 + 2861 +
+37 -22
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
··· 406 406 stream->writeback_info[stream->num_wb_info++] = *wb_info; 407 407 } 408 408 409 - if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { 410 - dm_error("DC: update_bandwidth failed!\n"); 411 - return false; 412 - } 413 - 414 - /* enable writeback */ 415 409 if (dc->hwss.enable_writeback) { 416 410 struct dc_stream_status *stream_status = dc_stream_get_status(stream); 417 411 struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; 412 + dwb->otg_inst = stream_status->primary_otg_inst; 413 + } 414 + if (IS_DIAG_DC(dc->ctx->dce_environment)) { 415 + if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { 416 + dm_error("DC: update_bandwidth failed!\n"); 417 + return false; 418 + } 418 419 419 - if (dwb->funcs->is_enabled(dwb)) { 420 - /* writeback pipe already enabled, only need to update */ 421 - dc->hwss.update_writeback(dc, stream_status, wb_info, dc->current_state); 422 - } else { 423 - /* Enable writeback pipe from scratch*/ 424 - dc->hwss.enable_writeback(dc, stream_status, wb_info, dc->current_state); 420 + /* enable writeback */ 421 + if (dc->hwss.enable_writeback) { 422 + struct dwbc *dwb = dc->res_pool->dwbc[wb_info->dwb_pipe_inst]; 423 + 424 + if (dwb->funcs->is_enabled(dwb)) { 425 + /* writeback pipe already enabled, only need to update */ 426 + dc->hwss.update_writeback(dc, wb_info, dc->current_state); 427 + } else { 428 + /* Enable writeback pipe from scratch*/ 429 + dc->hwss.enable_writeback(dc, wb_info, dc->current_state); 430 + } 425 431 } 426 432 } 427 - 428 433 return true; 429 434 } 430 435 ··· 468 463 } 469 464 stream->num_wb_info = j; 470 465 471 - /* recalculate and apply DML parameters */ 472 - if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { 473 - dm_error("DC: update_bandwidth failed!\n"); 474 - return false; 466 + if (IS_DIAG_DC(dc->ctx->dce_environment)) { 467 + /* recalculate and apply DML parameters */ 468 + if (!dc->hwss.update_bandwidth(dc, dc->current_state)) { 469 + dm_error("DC: update_bandwidth failed!\n"); 470 + return false; 471 + } 472 + 473 + /* disable writeback */ 474 + if (dc->hwss.disable_writeback) 475 + dc->hwss.disable_writeback(dc, dwb_pipe_inst); 475 476 } 476 - 477 - /* disable writeback */ 478 - if (dc->hwss.disable_writeback) 479 - dc->hwss.disable_writeback(dc, dwb_pipe_inst); 480 - 481 477 return true; 482 478 } 483 479 480 + bool dc_stream_warmup_writeback(struct dc *dc, 481 + int num_dwb, 482 + struct dc_writeback_info *wb_info) 483 + { 484 + if (dc->hwss.mmhubbub_warmup) 485 + return dc->hwss.mmhubbub_warmup(dc, num_dwb, wb_info); 486 + else 487 + return false; 488 + } 484 489 uint32_t dc_stream_get_vblank_counter(const struct dc_stream_state *stream) 485 490 { 486 491 uint8_t i;
+3 -2
drivers/gpu/drm/amd/display/dc/dc.h
··· 39 39 #include "inc/hw/dmcu.h" 40 40 #include "dml/display_mode_lib.h" 41 41 42 - #define DC_VER "3.2.62" 42 + #define DC_VER "3.2.64" 43 43 44 44 #define MAX_SURFACES 3 45 45 #define MAX_PLANES 6 ··· 367 367 bool disable_hubp_power_gate; 368 368 bool disable_dsc_power_gate; 369 369 int dsc_min_slice_height_override; 370 + int dsc_bpp_increment_div; 370 371 bool native422_support; 371 372 bool disable_pplib_wm_range; 372 373 enum wm_report_mode pplib_wm_report_mode; ··· 514 513 bool optimized_required; 515 514 516 515 /* Require to maintain clocks and bandwidth for UEFI enabled HW */ 517 - bool optimize_seamless_boot; 516 + int optimize_seamless_boot_streams; 518 517 519 518 /* FBC compressor */ 520 519 struct compressor *fbc_compressor;
+4 -1
drivers/gpu/drm/amd/display/dc/dc_dsc.h
··· 53 53 uint32_t min_target_bpp; 54 54 }; 55 55 56 - bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, 56 + bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, 57 + const uint8_t *dpcd_dsc_basic_data, 57 58 const uint8_t *dpcd_dsc_ext_data, 58 59 struct dsc_dec_dpcd_caps *dsc_sink_caps); 59 60 ··· 77 76 78 77 void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing, 79 78 struct dc_dsc_policy *policy); 79 + 80 + void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit); 80 81 81 82 #endif
+1
drivers/gpu/drm/amd/display/dc/dc_link.h
··· 133 133 struct link_flags { 134 134 bool dp_keep_receiver_powered; 135 135 bool dp_skip_DID2; 136 + bool dp_skip_reset_segment; 136 137 } wa_flags; 137 138 struct link_mst_stream_allocation_table mst_stream_alloc_table; 138 139
+7
drivers/gpu/drm/amd/display/dc/dc_stream.h
··· 344 344 bool dc_stream_add_writeback(struct dc *dc, 345 345 struct dc_stream_state *stream, 346 346 struct dc_writeback_info *wb_info); 347 + 347 348 bool dc_stream_remove_writeback(struct dc *dc, 348 349 struct dc_stream_state *stream, 349 350 uint32_t dwb_pipe_inst); 351 + 352 + bool dc_stream_warmup_writeback(struct dc *dc, 353 + int num_dwb, 354 + struct dc_writeback_info *wb_info); 355 + 350 356 bool dc_stream_dmdata_status_done(struct dc *dc, struct dc_stream_state *stream); 357 + 351 358 bool dc_stream_set_dynamic_metadata(struct dc *dc, 352 359 struct dc_stream_state *stream, 353 360 struct dc_dmdata_attributes *dmdata_attr);
+11 -2
drivers/gpu/drm/amd/display/dc/dc_types.h
··· 60 60 DCE_ENV_FPGA_MAXIMUS, 61 61 /* Emulation on real HW or on FPGA. Used by Diagnostics, enforces 62 62 * requirements of Diagnostics team. */ 63 - DCE_ENV_DIAG 63 + DCE_ENV_DIAG, 64 + /* 65 + * Guest VM system, DC HW may exist but is not virtualized and 66 + * should not be used. SW support for VDI only. 67 + */ 68 + DCE_ENV_VIRTUAL_HW 64 69 }; 65 70 66 71 /* Note: use these macro definitions instead of direct comparison! */ ··· 603 598 /* this field must be last in this struct */ 604 599 struct audio_mode modes[DC_MAX_AUDIO_DESC_COUNT]; 605 600 }; 606 - 601 + struct audio_check { 602 + unsigned int audio_packet_type; 603 + unsigned int max_audiosample_rate; 604 + unsigned int acat; 605 + }; 607 606 enum dc_infoframe_type { 608 607 DC_HDMI_INFOFRAME_TYPE_VENDOR = 0x81, 609 608 DC_HDMI_INFOFRAME_TYPE_AVI = 0x82,
+1 -2
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
··· 810 810 void min_set_viewport( 811 811 struct hubp *hubp, 812 812 const struct rect *viewport, 813 - const struct rect *viewport_c, 814 - enum dc_rotation_angle rotation) 813 + const struct rect *viewport_c) 815 814 { 816 815 struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp); 817 816
+1 -3
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h
··· 749 749 750 750 void min_set_viewport(struct hubp *hubp, 751 751 const struct rect *viewport, 752 - const struct rect *viewport_c, 753 - enum dc_rotation_angle rotation); 754 - /* rotation angle added for use by hubp21_set_viewport */ 752 + const struct rect *viewport_c); 755 753 756 754 void hubp1_clk_cntl(struct hubp *hubp, bool enable); 757 755 void hubp1_vtg_sel(struct hubp *hubp, uint32_t otg_inst);
+50 -22
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
··· 479 479 struct dce_hwseq *hws, 480 480 bool enable) 481 481 { 482 - bool force_on = 1; /* disable power gating */ 482 + bool force_on = true; /* disable power gating */ 483 483 484 484 if (enable) 485 - force_on = 0; 485 + force_on = false; 486 486 487 487 /* DCHUBP0/1/2/3 */ 488 488 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on); ··· 860 860 struct dc_state *context) 861 861 { 862 862 int i; 863 + struct dc_link *link; 863 864 DC_LOGGER_INIT(dc->ctx->logger); 864 865 if (pipe_ctx->stream_res.stream_enc == NULL) { 865 866 pipe_ctx->stream = NULL; ··· 868 867 } 869 868 870 869 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { 871 - /* DPMS may already disable */ 872 - if (!pipe_ctx->stream->dpms_off) 870 + link = pipe_ctx->stream->link; 871 + /* DPMS may already disable or */ 872 + /* dpms_off status is incorrect due to fastboot 873 + * feature. When system resume from S4 with second 874 + * screen only, the dpms_off would be true but 875 + * VBIOS lit up eDP, so check link status too. 876 + */ 877 + if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) 873 878 core_link_disable_stream(pipe_ctx); 874 879 else if (pipe_ctx->stream_res.audio) 875 880 dc->hwss.disable_audio_stream(pipe_ctx); ··· 1163 1156 } 1164 1157 } 1165 1158 1166 - for (i = 0; i < dc->res_pool->pipe_count; i++) { 1159 + /* num_opp will be equal to number of mpcc */ 1160 + for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) { 1167 1161 struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 1168 1162 1169 1163 /* Cannot reset the MPC mux if seamless boot */ ··· 2299 2291 hubp->funcs->mem_program_viewport( 2300 2292 hubp, 2301 2293 &pipe_ctx->plane_res.scl_data.viewport, 2302 - &pipe_ctx->plane_res.scl_data.viewport_c, 2303 - plane_state->rotation); 2294 + &pipe_ctx->plane_res.scl_data.viewport_c); 2304 2295 } 2305 2296 2306 2297 if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { ··· 2916 2909 .rotation = pipe_ctx->plane_state->rotation, 2917 2910 .mirror = pipe_ctx->plane_state->horizontal_mirror 2918 2911 }; 2912 + bool pipe_split_on = (pipe_ctx->top_pipe != NULL) || 2913 + (pipe_ctx->bottom_pipe != NULL); 2919 2914 2920 2915 int x_plane = pipe_ctx->plane_state->dst_rect.x; 2921 2916 int y_plane = pipe_ctx->plane_state->dst_rect.y; ··· 2950 2941 // Swap axis and mirror horizontally 2951 2942 if (param.rotation == ROTATION_ANGLE_90) { 2952 2943 uint32_t temp_x = pos_cpy.x; 2944 + 2953 2945 pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width - 2954 2946 (pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x; 2955 2947 pos_cpy.y = temp_x; ··· 2958 2948 // Swap axis and mirror vertically 2959 2949 else if (param.rotation == ROTATION_ANGLE_270) { 2960 2950 uint32_t temp_y = pos_cpy.y; 2961 - if (pos_cpy.x > pipe_ctx->plane_res.scl_data.viewport.height) { 2962 - pos_cpy.x = pos_cpy.x - pipe_ctx->plane_res.scl_data.viewport.height; 2963 - pos_cpy.y = pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.x; 2964 - } else { 2965 - pos_cpy.y = 2 * pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.x; 2966 - } 2951 + int viewport_height = 2952 + pipe_ctx->plane_res.scl_data.viewport.height; 2953 + 2954 + if (pipe_split_on) { 2955 + if (pos_cpy.x > viewport_height) { 2956 + pos_cpy.x = pos_cpy.x - viewport_height; 2957 + pos_cpy.y = viewport_height - pos_cpy.x; 2958 + } else { 2959 + pos_cpy.y = 2 * viewport_height - pos_cpy.x; 2960 + } 2961 + } else 2962 + pos_cpy.y = viewport_height - pos_cpy.x; 2967 2963 pos_cpy.x = temp_y; 2968 2964 } 2969 2965 // Mirror horizontally and vertically 2970 2966 else if (param.rotation == ROTATION_ANGLE_180) { 2971 - if (pos_cpy.x >= pipe_ctx->plane_res.scl_data.viewport.width + pipe_ctx->plane_res.scl_data.viewport.x) { 2972 - pos_cpy.x = 2 * pipe_ctx->plane_res.scl_data.viewport.width 2973 - - pos_cpy.x + 2 * pipe_ctx->plane_res.scl_data.viewport.x; 2974 - } else { 2975 - uint32_t temp_x = pos_cpy.x; 2976 - pos_cpy.x = 2 * pipe_ctx->plane_res.scl_data.viewport.x - pos_cpy.x; 2977 - if (temp_x >= pipe_ctx->plane_res.scl_data.viewport.x + (int)hubp->curs_attr.width 2978 - || pos_cpy.x <= (int)hubp->curs_attr.width + pipe_ctx->plane_state->src_rect.x) { 2979 - pos_cpy.x = temp_x + pipe_ctx->plane_res.scl_data.viewport.width; 2967 + int viewport_width = 2968 + pipe_ctx->plane_res.scl_data.viewport.width; 2969 + int viewport_x = 2970 + pipe_ctx->plane_res.scl_data.viewport.x; 2971 + 2972 + if (pipe_split_on) { 2973 + if (pos_cpy.x >= viewport_width + viewport_x) { 2974 + pos_cpy.x = 2 * viewport_width 2975 + - pos_cpy.x + 2 * viewport_x; 2976 + } else { 2977 + uint32_t temp_x = pos_cpy.x; 2978 + 2979 + pos_cpy.x = 2 * viewport_x - pos_cpy.x; 2980 + if (temp_x >= viewport_x + 2981 + (int)hubp->curs_attr.width || pos_cpy.x 2982 + <= (int)hubp->curs_attr.width + 2983 + pipe_ctx->plane_state->src_rect.x) { 2984 + pos_cpy.x = temp_x + viewport_width; 2985 + } 2980 2986 } 2987 + } else { 2988 + pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x; 2981 2989 } 2982 2990 pos_cpy.y = pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y; 2983 2991 }
+8
drivers/gpu/drm/amd/display/dc/dcn20/Makefile
··· 9 9 10 10 DCN20 += dcn20_dsc.o 11 11 12 + ifdef CONFIG_X86 12 13 CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -msse 14 + endif 15 + 16 + ifdef CONFIG_PPC64 17 + CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o := -mhard-float -maltivec 18 + endif 13 19 14 20 ifdef CONFIG_CC_IS_GCC 15 21 ifeq ($(call cc-ifversion, -lt, 0701, y), y) ··· 23 17 endif 24 18 endif 25 19 20 + ifdef CONFIG_X86 26 21 ifdef IS_OLD_GCC 27 22 # Stack alignment mismatch, proceed with caution. 28 23 # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 ··· 31 24 CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -mpreferred-stack-boundary=4 32 25 else 33 26 CFLAGS_$(AMDDALPATH)/dc/dcn20/dcn20_resource.o += -msse2 27 + endif 34 28 endif 35 29 36 30 AMD_DAL_DCN20 = $(addprefix $(AMDDALPATH)/dc/dcn20/,$(DCN20))
+10 -10
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
··· 50 50 51 51 if (dccg->ref_dppclk && req_dppclk) { 52 52 int ref_dppclk = dccg->ref_dppclk; 53 + int modulo, phase; 53 54 54 - ASSERT(req_dppclk <= ref_dppclk); 55 - /* need to clamp to 8 bits */ 56 - if (ref_dppclk > 0xff) { 57 - int divider = (ref_dppclk + 0xfe) / 0xff; 55 + // phase / modulo = dpp pipe clk / dpp global clk 56 + modulo = 0xff; // use FF at the end 57 + phase = ((modulo * req_dppclk) + ref_dppclk - 1) / ref_dppclk; 58 58 59 - ref_dppclk /= divider; 60 - req_dppclk = (req_dppclk + divider - 1) / divider; 61 - if (req_dppclk > ref_dppclk) 62 - req_dppclk = ref_dppclk; 59 + if (phase > 0xff) { 60 + ASSERT(false); 61 + phase = 0xff; 63 62 } 63 + 64 64 REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, 65 - DPPCLK0_DTO_PHASE, req_dppclk, 66 - DPPCLK0_DTO_MODULO, ref_dppclk); 65 + DPPCLK0_DTO_PHASE, phase, 66 + DPPCLK0_DTO_MODULO, modulo); 67 67 REG_UPDATE(DPPCLK_DTO_CTRL, 68 68 DPPCLK_DTO_ENABLE[dpp_inst], 1); 69 69 } else {
+27 -13
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
··· 183 183 struct dce_hwseq *hws, 184 184 bool enable) 185 185 { 186 - bool force_on = 1; /* disable power gating */ 186 + bool force_on = true; /* disable power gating */ 187 187 188 188 if (enable) 189 - force_on = 0; 189 + force_on = false; 190 190 191 191 /* DCHUBP0/1/2/3/4/5 */ 192 192 REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on); ··· 1305 1305 struct hubp *hubp = pipe_ctx->plane_res.hubp; 1306 1306 struct dpp *dpp = pipe_ctx->plane_res.dpp; 1307 1307 struct dc_plane_state *plane_state = pipe_ctx->plane_state; 1308 + bool viewport_changed = false; 1308 1309 1309 1310 if (pipe_ctx->update_flags.bits.dppclk) 1310 1311 dpp->funcs->dpp_dppclk_control(dpp, false, true); ··· 1356 1355 || plane_state->update_flags.bits.global_alpha_change 1357 1356 || plane_state->update_flags.bits.per_pixel_alpha_change) { 1358 1357 // MPCC inst is equal to pipe index in practice 1359 - int mpcc_inst = pipe_ctx->pipe_idx; 1358 + int mpcc_inst = hubp->inst; 1360 1359 int opp_inst; 1361 - int opp_count = dc->res_pool->res_cap->num_opp; 1360 + int opp_count = dc->res_pool->pipe_count; 1362 1361 1363 1362 for (opp_inst = 0; opp_inst < opp_count; opp_inst++) { 1364 1363 if (dc->res_pool->opps[opp_inst]->mpcc_disconnect_pending[mpcc_inst]) { ··· 1384 1383 1385 1384 if (pipe_ctx->update_flags.bits.viewport || 1386 1385 (context == dc->current_state && plane_state->update_flags.bits.scaling_change) || 1387 - (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) 1386 + (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) { 1387 + 1388 1388 hubp->funcs->mem_program_viewport( 1389 1389 hubp, 1390 1390 &pipe_ctx->plane_res.scl_data.viewport, 1391 - &pipe_ctx->plane_res.scl_data.viewport_c, 1392 - plane_state->rotation); 1391 + &pipe_ctx->plane_res.scl_data.viewport_c); 1392 + viewport_changed = true; 1393 + } 1393 1394 1394 1395 /* Any updates are handled in dc interface, just need to apply existing for plane enable */ 1395 - if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed) 1396 + if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed || 1397 + pipe_ctx->update_flags.bits.scaler || pipe_ctx->update_flags.bits.viewport) 1396 1398 && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { 1397 1399 dc->hwss.set_cursor_position(pipe_ctx); 1398 1400 dc->hwss.set_cursor_attribute(pipe_ctx); ··· 1445 1441 hubp->power_gated = false; 1446 1442 } 1447 1443 1444 + if (hubp->funcs->apply_PLAT_54186_wa && viewport_changed) 1445 + hubp->funcs->apply_PLAT_54186_wa(hubp, &plane_state->address); 1446 + 1448 1447 if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update) 1449 1448 hws->funcs.update_plane_addr(dc, pipe_ctx); 1449 + 1450 + 1450 1451 1451 1452 if (pipe_ctx->update_flags.bits.enable) 1452 1453 hubp->funcs->set_blank(hubp, false); ··· 1740 1731 1741 1732 void dcn20_enable_writeback( 1742 1733 struct dc *dc, 1743 - const struct dc_stream_status *stream_status, 1744 1734 struct dc_writeback_info *wb_info, 1745 1735 struct dc_state *context) 1746 1736 { ··· 1753 1745 mcif_wb = dc->res_pool->mcif_wb[wb_info->dwb_pipe_inst]; 1754 1746 1755 1747 /* set the OPTC source mux */ 1756 - ASSERT(stream_status->primary_otg_inst < MAX_PIPES); 1757 - optc = dc->res_pool->timing_generators[stream_status->primary_otg_inst]; 1748 + optc = dc->res_pool->timing_generators[dwb->otg_inst]; 1758 1749 optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst); 1759 1750 /* set MCIF_WB buffer and arbitration configuration */ 1760 1751 mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height); ··· 2002 1995 struct dc_state *context) 2003 1996 { 2004 1997 int i; 1998 + struct dc_link *link; 2005 1999 DC_LOGGER_INIT(dc->ctx->logger); 2006 2000 if (pipe_ctx->stream_res.stream_enc == NULL) { 2007 2001 pipe_ctx->stream = NULL; ··· 2010 2002 } 2011 2003 2012 2004 if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { 2013 - /* DPMS may already disable */ 2014 - if (!pipe_ctx->stream->dpms_off) 2005 + link = pipe_ctx->stream->link; 2006 + /* DPMS may already disable or */ 2007 + /* dpms_off status is incorrect due to fastboot 2008 + * feature. When system resume from S4 with second 2009 + * screen only, the dpms_off would be true but 2010 + * VBIOS lit up eDP, so check link status too. 2011 + */ 2012 + if (!pipe_ctx->stream->dpms_off || link->link_status.link_active) 2015 2013 core_link_disable_stream(pipe_ctx); 2016 2014 else if (pipe_ctx->stream_res.audio) 2017 2015 dc->hwss.disable_audio_stream(pipe_ctx);
-1
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
··· 104 104 bool enable_triple_buffer); 105 105 void dcn20_enable_writeback( 106 106 struct dc *dc, 107 - const struct dc_stream_status *stream_status, 108 107 struct dc_writeback_info *wb_info, 109 108 struct dc_state *context); 110 109 void dcn20_disable_writeback(
+13 -11
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
··· 236 236 struct dc_crtc_timing *timing) 237 237 { 238 238 struct optc *optc1 = DCN10TG_FROM_TG(optc); 239 - /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192 */ 240 239 int mpcc_hactive = (timing->h_addressable + timing->h_border_left + timing->h_border_right) 241 240 / opp_cnt; 242 - int memory_mask = mpcc_hactive <= 2560 ? 0x3 : 0xf; 241 + uint32_t memory_mask; 243 242 uint32_t data_fmt = 0; 243 + 244 + ASSERT(opp_cnt == 2); 244 245 245 246 /* TODO: In pseudocode but does not affect maximus, delete comment if we dont need on asic 246 247 * REG_SET(OTG_GLOBAL_CONTROL2, 0, GLOBAL_UPDATE_LOCK_EN, 1); ··· 250 249 * MASTER_UPDATE_LOCK_DB_X, 160, 251 250 * MASTER_UPDATE_LOCK_DB_Y, 240); 252 251 */ 252 + 253 + /* 2 pieces of memory required for up to 5120 displays, 4 for up to 8192, 254 + * however, for ODM combine we can simplify by always using 4. 255 + * To make sure there's no overlap, each instance "reserves" 2 memories and 256 + * they are uniquely combined here. 257 + */ 258 + memory_mask = 0x3 << (opp_id[0] * 2) | 0x3 << (opp_id[1] * 2); 259 + 253 260 if (REG(OPTC_MEMORY_CONFIG)) 254 261 REG_SET(OPTC_MEMORY_CONFIG, 0, 255 - OPTC_MEM_SEL, memory_mask << (optc->inst * 4)); 262 + OPTC_MEM_SEL, memory_mask); 256 263 257 264 if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) 258 265 data_fmt = 1; ··· 269 260 270 261 REG_UPDATE(OPTC_DATA_FORMAT_CONTROL, OPTC_DATA_FORMAT, data_fmt); 271 262 272 - ASSERT(opp_cnt == 2); 273 263 REG_SET_3(OPTC_DATA_SOURCE_SELECT, 0, 274 264 OPTC_NUM_OF_INPUT_SEGMENT, 1, 275 265 OPTC_SEG0_SRC_SEL, opp_id[0], ··· 390 382 { 391 383 struct optc *optc1 = DCN10TG_FROM_TG(optc); 392 384 393 - REG_SET(OTG_MANUAL_FLOW_CONTROL, 0, 394 - MANUAL_FLOW_CONTROL, 1); 395 - 396 - REG_SET(OTG_GLOBAL_CONTROL2, 0, 397 - MANUAL_FLOW_CONTROL_SEL, optc->inst); 398 - 399 385 REG_SET_8(OTG_TRIGA_CNTL, 0, 400 - OTG_TRIGA_SOURCE_SELECT, 22, 386 + OTG_TRIGA_SOURCE_SELECT, 21, 401 387 OTG_TRIGA_SOURCE_PIPE_SELECT, optc->inst, 402 388 OTG_TRIGA_RISING_EDGE_DETECT_CNTL, 1, 403 389 OTG_TRIGA_FALLING_EDGE_DETECT_CNTL, 0,
+1
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.h
··· 106 106 void optc2_triplebuffer_unlock(struct timing_generator *optc); 107 107 void optc2_lock_doublebuffer_disable(struct timing_generator *optc); 108 108 void optc2_lock_doublebuffer_enable(struct timing_generator *optc); 109 + void optc2_setup_manual_trigger(struct timing_generator *optc); 109 110 void optc2_program_manual_trigger(struct timing_generator *optc); 110 111 bool optc2_is_two_pixels_per_containter(const struct dc_crtc_timing *timing); 111 112 #endif /* __DC_OPTC_DCN20_H__ */
+24 -8
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
··· 1 1 /* 2 2 * Copyright 2016 Advanced Micro Devices, Inc. 3 + * Copyright 2019 Raptor Engineering, LLC 3 4 * 4 5 * Permission is hereby granted, free of charge, to any person obtaining a 5 6 * copy of this software and associated documentation files (the "Software"), ··· 66 65 67 66 #include "dcn/dcn_2_0_0_offset.h" 68 67 #include "dcn/dcn_2_0_0_sh_mask.h" 68 + #include "dpcs/dpcs_2_0_0_offset.h" 69 + #include "dpcs/dpcs_2_0_0_sh_mask.h" 69 70 70 71 #include "nbio/nbio_2_3_offset.h" 71 72 ··· 551 548 [id] = {\ 552 549 LE_DCN10_REG_LIST(id), \ 553 550 UNIPHY_DCN2_REG_LIST(phyid), \ 551 + DPCS_DCN2_REG_LIST(id), \ 554 552 SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ 555 553 } 556 554 ··· 565 561 }; 566 562 567 563 static const struct dcn10_link_enc_shift le_shift = { 568 - LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT) 564 + LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\ 565 + DPCS_DCN2_MASK_SH_LIST(__SHIFT) 569 566 }; 570 567 571 568 static const struct dcn10_link_enc_mask le_mask = { 572 - LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK) 569 + LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\ 570 + DPCS_DCN2_MASK_SH_LIST(_MASK) 573 571 }; 574 572 575 573 #define ipp_regs(id)\ ··· 2892 2886 bool voltage_supported = false; 2893 2887 bool full_pstate_supported = false; 2894 2888 bool dummy_pstate_supported = false; 2895 - double p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us; 2896 - context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support = dc->debug.disable_dram_clock_change_vactive_support; 2889 + double p_state_latency_us; 2897 2890 2898 - if (fast_validate) 2899 - return dcn20_validate_bandwidth_internal(dc, context, true); 2891 + DC_FP_START(); 2892 + p_state_latency_us = context->bw_ctx.dml.soc.dram_clock_change_latency_us; 2893 + context->bw_ctx.dml.soc.disable_dram_clock_change_vactive_support = 2894 + dc->debug.disable_dram_clock_change_vactive_support; 2900 2895 2896 + if (fast_validate) { 2897 + voltage_supported = dcn20_validate_bandwidth_internal(dc, context, true); 2898 + 2899 + DC_FP_END(); 2900 + return voltage_supported; 2901 + } 2901 2902 2902 2903 // Best case, we support full UCLK switch latency 2903 2904 voltage_supported = dcn20_validate_bandwidth_internal(dc, context, false); ··· 2933 2920 restore_dml_state: 2934 2921 context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us; 2935 2922 2923 + DC_FP_END(); 2936 2924 return voltage_supported; 2937 2925 } 2938 2926 ··· 3225 3211 3226 3212 void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb) 3227 3213 { 3228 - kernel_fpu_begin(); 3229 3214 if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns 3230 3215 && dc->bb_overrides.sr_exit_time_ns) { 3231 3216 bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; ··· 3248 3235 bb->dram_clock_change_latency_us = 3249 3236 dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; 3250 3237 } 3251 - kernel_fpu_end(); 3252 3238 } 3253 3239 3254 3240 static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb( ··· 3452 3440 get_asic_rev_ip_params(ctx->asic_id.hw_internal_rev); 3453 3441 enum dml_project dml_project_version = 3454 3442 get_dml_project_version(ctx->asic_id.hw_internal_rev); 3443 + 3444 + DC_FP_START(); 3455 3445 3456 3446 ctx->dc_bios->regs = &bios_regs; 3457 3447 pool->base.funcs = &dcn20_res_pool_funcs; ··· 3752 3738 pool->base.oem_device = NULL; 3753 3739 } 3754 3740 3741 + DC_FP_END(); 3755 3742 return true; 3756 3743 3757 3744 create_fail: 3758 3745 3746 + DC_FP_END(); 3759 3747 dcn20_resource_destruct(pool); 3760 3748 3761 3749 return false;
+8
drivers/gpu/drm/amd/display/dc/dcn21/Makefile
··· 5 5 DCN21 = dcn21_init.o dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o \ 6 6 dcn21_hwseq.o dcn21_link_encoder.o 7 7 8 + ifdef CONFIG_X86 8 9 CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse 10 + endif 11 + 12 + ifdef CONFIG_PPC64 13 + CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -maltivec 14 + endif 9 15 10 16 ifdef CONFIG_CC_IS_GCC 11 17 ifeq ($(call cc-ifversion, -lt, 0701, y), y) ··· 19 13 endif 20 14 endif 21 15 16 + ifdef CONFIG_X86 22 17 ifdef IS_OLD_GCC 23 18 # Stack alignment mismatch, proceed with caution. 24 19 # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 ··· 27 20 CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -mpreferred-stack-boundary=4 28 21 else 29 22 CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -msse2 23 + endif 30 24 endif 31 25 32 26 AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21))
+286 -28
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
··· 169 169 void hubp21_set_viewport( 170 170 struct hubp *hubp, 171 171 const struct rect *viewport, 172 - const struct rect *viewport_c, 173 - enum dc_rotation_angle rotation) 172 + const struct rect *viewport_c) 174 173 { 175 174 struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); 176 - int patched_viewport_height = 0; 177 - struct dc_debug_options *debug = &hubp->ctx->dc->debug; 178 175 179 176 REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION, 0, 180 177 PRI_VIEWPORT_WIDTH, viewport->width, ··· 190 193 SEC_VIEWPORT_X_START, viewport->x, 191 194 SEC_VIEWPORT_Y_START, viewport->y); 192 195 193 - /* 194 - * Work around for underflow issue with NV12 + rIOMMU translation 195 - * + immediate flip. This will cause hubp underflow, but will not 196 - * be user visible since underflow is in blank region 197 - * Disable w/a when rotated 180 degrees, causes vertical chroma offset 198 - */ 199 - patched_viewport_height = viewport_c->height; 200 - if (debug->nv12_iflip_vm_wa && viewport_c->height > 512 && 201 - rotation != ROTATION_ANGLE_180) { 202 - int pte_row_height = 0; 203 - int pte_rows = 0; 204 - 205 - REG_GET(DCHUBP_REQ_SIZE_CONFIG_C, 206 - PTE_ROW_HEIGHT_LINEAR_C, &pte_row_height); 207 - 208 - pte_row_height = 1 << (pte_row_height + 3); 209 - pte_rows = (viewport_c->height / pte_row_height) + 1; 210 - patched_viewport_height = pte_rows * pte_row_height + 1; 211 - } 212 - 213 - 214 196 /* DC supports NV12 only at the moment */ 215 197 REG_SET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C, 0, 216 198 PRI_VIEWPORT_WIDTH_C, viewport_c->width, 217 - PRI_VIEWPORT_HEIGHT_C, patched_viewport_height); 199 + PRI_VIEWPORT_HEIGHT_C, viewport_c->height); 218 200 219 201 REG_SET_2(DCSURF_PRI_VIEWPORT_START_C, 0, 220 202 PRI_VIEWPORT_X_START_C, viewport_c->x, ··· 201 225 202 226 REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION_C, 0, 203 227 SEC_VIEWPORT_WIDTH_C, viewport_c->width, 204 - SEC_VIEWPORT_HEIGHT_C, patched_viewport_height); 228 + SEC_VIEWPORT_HEIGHT_C, viewport_c->height); 205 229 206 230 REG_SET_2(DCSURF_SEC_VIEWPORT_START_C, 0, 207 231 SEC_VIEWPORT_X_START_C, viewport_c->x, 208 232 SEC_VIEWPORT_Y_START_C, viewport_c->y); 233 + } 234 + 235 + static void hubp21_apply_PLAT_54186_wa( 236 + struct hubp *hubp, 237 + const struct dc_plane_address *address) 238 + { 239 + struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); 240 + struct dc_debug_options *debug = &hubp->ctx->dc->debug; 241 + unsigned int chroma_bpe = 2; 242 + unsigned int luma_addr_high_part = 0; 243 + unsigned int row_height = 0; 244 + unsigned int chroma_pitch = 0; 245 + unsigned int viewport_c_height = 0; 246 + unsigned int viewport_c_width = 0; 247 + unsigned int patched_viewport_height = 0; 248 + unsigned int patched_viewport_width = 0; 249 + unsigned int rotation_angle = 0; 250 + unsigned int pix_format = 0; 251 + unsigned int h_mirror_en = 0; 252 + unsigned int tile_blk_size = 64 * 1024; /* 64KB for 64KB SW, 4KB for 4KB SW */ 253 + 254 + 255 + if (!debug->nv12_iflip_vm_wa) 256 + return; 257 + 258 + REG_GET(DCHUBP_REQ_SIZE_CONFIG_C, 259 + PTE_ROW_HEIGHT_LINEAR_C, &row_height); 260 + 261 + REG_GET_2(DCSURF_PRI_VIEWPORT_DIMENSION_C, 262 + PRI_VIEWPORT_WIDTH_C, &viewport_c_width, 263 + PRI_VIEWPORT_HEIGHT_C, &viewport_c_height); 264 + 265 + REG_GET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, 266 + PRIMARY_SURFACE_ADDRESS_HIGH_C, &luma_addr_high_part); 267 + 268 + REG_GET(DCSURF_SURFACE_PITCH_C, 269 + PITCH_C, &chroma_pitch); 270 + 271 + chroma_pitch += 1; 272 + 273 + REG_GET_3(DCSURF_SURFACE_CONFIG, 274 + SURFACE_PIXEL_FORMAT, &pix_format, 275 + ROTATION_ANGLE, &rotation_angle, 276 + H_MIRROR_EN, &h_mirror_en); 277 + 278 + /* apply wa only for NV12 surface with scatter gather enabled with view port > 512 */ 279 + if (address->type != PLN_ADDR_TYPE_VIDEO_PROGRESSIVE || 280 + address->video_progressive.luma_addr.high_part == 0xf4 281 + || viewport_c_height <= 512) 282 + return; 283 + 284 + switch (rotation_angle) { 285 + case 0: /* 0 degree rotation */ 286 + row_height = 128; 287 + patched_viewport_height = (viewport_c_height / row_height + 1) * row_height + 1; 288 + patched_viewport_width = viewport_c_width; 289 + hubp21->PLAT_54186_wa_chroma_addr_offset = 0; 290 + break; 291 + case 2: /* 180 degree rotation */ 292 + row_height = 128; 293 + patched_viewport_height = viewport_c_height + row_height; 294 + patched_viewport_width = viewport_c_width; 295 + hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - chroma_pitch * row_height * chroma_bpe; 296 + break; 297 + case 1: /* 90 degree rotation */ 298 + row_height = 256; 299 + if (h_mirror_en) { 300 + patched_viewport_height = viewport_c_height; 301 + patched_viewport_width = viewport_c_width + row_height; 302 + hubp21->PLAT_54186_wa_chroma_addr_offset = 0; 303 + } else { 304 + patched_viewport_height = viewport_c_height; 305 + patched_viewport_width = viewport_c_width + row_height; 306 + hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - tile_blk_size; 307 + } 308 + break; 309 + case 3: /* 270 degree rotation */ 310 + row_height = 256; 311 + if (h_mirror_en) { 312 + patched_viewport_height = viewport_c_height; 313 + patched_viewport_width = viewport_c_width + row_height; 314 + hubp21->PLAT_54186_wa_chroma_addr_offset = 0 - tile_blk_size; 315 + } else { 316 + patched_viewport_height = viewport_c_height; 317 + patched_viewport_width = viewport_c_width + row_height; 318 + hubp21->PLAT_54186_wa_chroma_addr_offset = 0; 319 + } 320 + break; 321 + default: 322 + ASSERT(0); 323 + break; 324 + } 325 + 326 + /* catch cases where viewport keep growing */ 327 + ASSERT(patched_viewport_height && patched_viewport_height < 5000); 328 + ASSERT(patched_viewport_width && patched_viewport_width < 5000); 329 + 330 + REG_UPDATE_2(DCSURF_PRI_VIEWPORT_DIMENSION_C, 331 + PRI_VIEWPORT_WIDTH_C, patched_viewport_width, 332 + PRI_VIEWPORT_HEIGHT_C, patched_viewport_height); 209 333 } 210 334 211 335 void hubp21_set_vm_system_aperture_settings(struct hubp *hubp, ··· 678 602 dml_dlg_attr->refcyc_per_meta_chunk_flip_l, dlg_attr.refcyc_per_meta_chunk_flip_l); 679 603 } 680 604 605 + bool hubp21_program_surface_flip_and_addr( 606 + struct hubp *hubp, 607 + const struct dc_plane_address *address, 608 + bool flip_immediate) 609 + { 610 + struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp); 611 + struct dc_debug_options *debug = &hubp->ctx->dc->debug; 612 + 613 + //program flip type 614 + REG_UPDATE(DCSURF_FLIP_CONTROL, 615 + SURFACE_FLIP_TYPE, flip_immediate); 616 + 617 + // Program VMID reg 618 + REG_UPDATE(VMID_SETTINGS_0, 619 + VMID, address->vmid); 620 + 621 + if (address->type == PLN_ADDR_TYPE_GRPH_STEREO) { 622 + REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x1); 623 + REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x1); 624 + 625 + } else { 626 + // turn off stereo if not in stereo 627 + REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_MODE_FOR_STEREOSYNC, 0x0); 628 + REG_UPDATE(DCSURF_FLIP_CONTROL, SURFACE_FLIP_IN_STEREOSYNC, 0x0); 629 + } 630 + 631 + 632 + 633 + /* HW automatically latch rest of address register on write to 634 + * DCSURF_PRIMARY_SURFACE_ADDRESS if SURFACE_UPDATE_LOCK is not used 635 + * 636 + * program high first and then the low addr, order matters! 637 + */ 638 + switch (address->type) { 639 + case PLN_ADDR_TYPE_GRAPHICS: 640 + /* DCN1.0 does not support const color 641 + * TODO: program DCHUBBUB_RET_PATH_DCC_CFGx_0/1 642 + * base on address->grph.dcc_const_color 643 + * x = 0, 2, 4, 6 for pipe 0, 1, 2, 3 for rgb and luma 644 + * x = 1, 3, 5, 7 for pipe 0, 1, 2, 3 for chroma 645 + */ 646 + 647 + if (address->grph.addr.quad_part == 0) 648 + break; 649 + 650 + REG_UPDATE_2(DCSURF_SURFACE_CONTROL, 651 + PRIMARY_SURFACE_TMZ, address->tmz_surface, 652 + PRIMARY_META_SURFACE_TMZ, address->tmz_surface); 653 + 654 + if (address->grph.meta_addr.quad_part != 0) { 655 + REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0, 656 + PRIMARY_META_SURFACE_ADDRESS_HIGH, 657 + address->grph.meta_addr.high_part); 658 + 659 + REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0, 660 + PRIMARY_META_SURFACE_ADDRESS, 661 + address->grph.meta_addr.low_part); 662 + } 663 + 664 + REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0, 665 + PRIMARY_SURFACE_ADDRESS_HIGH, 666 + address->grph.addr.high_part); 667 + 668 + REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0, 669 + PRIMARY_SURFACE_ADDRESS, 670 + address->grph.addr.low_part); 671 + break; 672 + case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE: 673 + if (address->video_progressive.luma_addr.quad_part == 0 674 + || address->video_progressive.chroma_addr.quad_part == 0) 675 + break; 676 + 677 + REG_UPDATE_4(DCSURF_SURFACE_CONTROL, 678 + PRIMARY_SURFACE_TMZ, address->tmz_surface, 679 + PRIMARY_SURFACE_TMZ_C, address->tmz_surface, 680 + PRIMARY_META_SURFACE_TMZ, address->tmz_surface, 681 + PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface); 682 + 683 + if (address->video_progressive.luma_meta_addr.quad_part != 0) { 684 + REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 0, 685 + PRIMARY_META_SURFACE_ADDRESS_HIGH_C, 686 + address->video_progressive.chroma_meta_addr.high_part); 687 + 688 + REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, 0, 689 + PRIMARY_META_SURFACE_ADDRESS_C, 690 + address->video_progressive.chroma_meta_addr.low_part); 691 + 692 + REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0, 693 + PRIMARY_META_SURFACE_ADDRESS_HIGH, 694 + address->video_progressive.luma_meta_addr.high_part); 695 + 696 + REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0, 697 + PRIMARY_META_SURFACE_ADDRESS, 698 + address->video_progressive.luma_meta_addr.low_part); 699 + } 700 + 701 + REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, 0, 702 + PRIMARY_SURFACE_ADDRESS_HIGH_C, 703 + address->video_progressive.chroma_addr.high_part); 704 + 705 + if (debug->nv12_iflip_vm_wa) { 706 + REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_C, 0, 707 + PRIMARY_SURFACE_ADDRESS_C, 708 + address->video_progressive.chroma_addr.low_part + hubp21->PLAT_54186_wa_chroma_addr_offset); 709 + } else { 710 + REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_C, 0, 711 + PRIMARY_SURFACE_ADDRESS_C, 712 + address->video_progressive.chroma_addr.low_part); 713 + } 714 + 715 + REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0, 716 + PRIMARY_SURFACE_ADDRESS_HIGH, 717 + address->video_progressive.luma_addr.high_part); 718 + 719 + REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0, 720 + PRIMARY_SURFACE_ADDRESS, 721 + address->video_progressive.luma_addr.low_part); 722 + break; 723 + case PLN_ADDR_TYPE_GRPH_STEREO: 724 + if (address->grph_stereo.left_addr.quad_part == 0) 725 + break; 726 + if (address->grph_stereo.right_addr.quad_part == 0) 727 + break; 728 + 729 + REG_UPDATE_8(DCSURF_SURFACE_CONTROL, 730 + PRIMARY_SURFACE_TMZ, address->tmz_surface, 731 + PRIMARY_SURFACE_TMZ_C, address->tmz_surface, 732 + PRIMARY_META_SURFACE_TMZ, address->tmz_surface, 733 + PRIMARY_META_SURFACE_TMZ_C, address->tmz_surface, 734 + SECONDARY_SURFACE_TMZ, address->tmz_surface, 735 + SECONDARY_SURFACE_TMZ_C, address->tmz_surface, 736 + SECONDARY_META_SURFACE_TMZ, address->tmz_surface, 737 + SECONDARY_META_SURFACE_TMZ_C, address->tmz_surface); 738 + 739 + if (address->grph_stereo.right_meta_addr.quad_part != 0) { 740 + 741 + REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH, 0, 742 + SECONDARY_META_SURFACE_ADDRESS_HIGH, 743 + address->grph_stereo.right_meta_addr.high_part); 744 + 745 + REG_SET(DCSURF_SECONDARY_META_SURFACE_ADDRESS, 0, 746 + SECONDARY_META_SURFACE_ADDRESS, 747 + address->grph_stereo.right_meta_addr.low_part); 748 + } 749 + if (address->grph_stereo.left_meta_addr.quad_part != 0) { 750 + 751 + REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH, 0, 752 + PRIMARY_META_SURFACE_ADDRESS_HIGH, 753 + address->grph_stereo.left_meta_addr.high_part); 754 + 755 + REG_SET(DCSURF_PRIMARY_META_SURFACE_ADDRESS, 0, 756 + PRIMARY_META_SURFACE_ADDRESS, 757 + address->grph_stereo.left_meta_addr.low_part); 758 + } 759 + 760 + REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, 0, 761 + SECONDARY_SURFACE_ADDRESS_HIGH, 762 + address->grph_stereo.right_addr.high_part); 763 + 764 + REG_SET(DCSURF_SECONDARY_SURFACE_ADDRESS, 0, 765 + SECONDARY_SURFACE_ADDRESS, 766 + address->grph_stereo.right_addr.low_part); 767 + 768 + REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, 0, 769 + PRIMARY_SURFACE_ADDRESS_HIGH, 770 + address->grph_stereo.left_addr.high_part); 771 + 772 + REG_SET(DCSURF_PRIMARY_SURFACE_ADDRESS, 0, 773 + PRIMARY_SURFACE_ADDRESS, 774 + address->grph_stereo.left_addr.low_part); 775 + break; 776 + default: 777 + BREAK_TO_DEBUGGER(); 778 + break; 779 + } 780 + 781 + hubp->request_address = *address; 782 + 783 + return true; 784 + } 785 + 681 786 void hubp21_init(struct hubp *hubp) 682 787 { 683 788 // DEDCN21-133: Inconsistent row starting line for flip between DPTE and Meta ··· 871 614 static struct hubp_funcs dcn21_hubp_funcs = { 872 615 .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, 873 616 .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, 874 - .hubp_program_surface_flip_and_addr = hubp2_program_surface_flip_and_addr, 617 + .hubp_program_surface_flip_and_addr = hubp21_program_surface_flip_and_addr, 875 618 .hubp_program_surface_config = hubp1_program_surface_config, 876 619 .hubp_is_flip_pending = hubp1_is_flip_pending, 877 620 .hubp_setup = hubp21_setup, ··· 880 623 .set_blank = hubp1_set_blank, 881 624 .dcc_control = hubp1_dcc_control, 882 625 .mem_program_viewport = hubp21_set_viewport, 626 + .apply_PLAT_54186_wa = hubp21_apply_PLAT_54186_wa, 883 627 .set_cursor_attributes = hubp2_cursor_set_attributes, 884 628 .set_cursor_position = hubp1_cursor_set_position, 885 629 .hubp_clk_cntl = hubp1_clk_cntl,
+1
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.h
··· 108 108 const struct dcn_hubp2_registers *hubp_regs; 109 109 const struct dcn_hubp2_shift *hubp_shift; 110 110 const struct dcn_hubp2_mask *hubp_mask; 111 + int PLAT_54186_wa_chroma_addr_offset; 111 112 }; 112 113 113 114 bool hubp21_construct(
+24 -24
drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
··· 1 1 /* 2 2 * Copyright 2018 Advanced Micro Devices, Inc. 3 + * Copyright 2019 Raptor Engineering, LLC 3 4 * 4 5 * Permission is hereby granted, free of charge, to any person obtaining a 5 6 * copy of this software and associated documentation files (the "Software"), ··· 63 62 64 63 #include "dcn20/dcn20_dwb.h" 65 64 #include "dcn20/dcn20_mmhubbub.h" 65 + #include "dpcs/dpcs_2_1_0_offset.h" 66 + #include "dpcs/dpcs_2_1_0_sh_mask.h" 66 67 67 68 #include "renoir_ip_offset.h" 68 69 #include "dcn/dcn_2_1_0_offset.h" ··· 996 993 { 997 994 int i; 998 995 999 - kernel_fpu_begin(); 996 + DC_FP_START(); 997 + 1000 998 if (dc->bb_overrides.sr_exit_time_ns) { 1001 999 for (i = 0; i < WM_SET_COUNT; i++) { 1002 1000 dc->clk_mgr->bw_params->wm_table.entries[i].sr_exit_time_us = ··· 1023 1019 } 1024 1020 } 1025 1021 1026 - kernel_fpu_end(); 1022 + DC_FP_END(); 1027 1023 } 1028 1024 1029 1025 void dcn21_calculate_wm( ··· 1323 1319 1324 1320 static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) 1325 1321 { 1326 - /* 1327 - TODO: Fix this function to calcualte correct values. 1328 - There are known issues with this function currently 1329 - that will need to be investigated. Use hardcoded known good values for now. 1330 - 1331 - 1332 1322 struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool); 1333 1323 struct clk_limit_table *clk_table = &bw_params->clk_table; 1334 1324 int i; ··· 1337 1339 dcn2_1_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; 1338 1340 dcn2_1_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; 1339 1341 dcn2_1_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz; 1340 - dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 16 / 1000; 1342 + dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2; 1341 1343 } 1342 - dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - i]; 1344 + dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - 1]; 1343 1345 dcn2_1_soc.num_states = i; 1344 - */ 1345 1346 } 1346 1347 1347 1348 /* Temporary Place holder until we can get them from fuse */ ··· 1494 1497 1495 1498 #define link_regs(id, phyid)\ 1496 1499 [id] = {\ 1497 - LE_DCN10_REG_LIST(id), \ 1500 + LE_DCN2_REG_LIST(id), \ 1498 1501 UNIPHY_DCN2_REG_LIST(phyid), \ 1502 + DPCS_DCN21_REG_LIST(id), \ 1499 1503 SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ 1500 1504 } 1501 1505 ··· 1535 1537 }; 1536 1538 1537 1539 static const struct dcn10_link_enc_shift le_shift = { 1538 - LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT) 1540 + LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT),\ 1541 + DPCS_DCN21_MASK_SH_LIST(__SHIFT) 1539 1542 }; 1540 1543 1541 1544 static const struct dcn10_link_enc_mask le_mask = { 1542 - LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK) 1545 + LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK),\ 1546 + DPCS_DCN21_MASK_SH_LIST(_MASK) 1543 1547 }; 1544 1548 1545 1549 static int map_transmitter_id_to_phy_instance( ··· 1776 1776 if ((pipe_fuses & (1 << i)) != 0) 1777 1777 continue; 1778 1778 1779 - pool->base.hubps[i] = dcn21_hubp_create(ctx, i); 1780 - if (pool->base.hubps[i] == NULL) { 1779 + pool->base.hubps[j] = dcn21_hubp_create(ctx, i); 1780 + if (pool->base.hubps[j] == NULL) { 1781 1781 BREAK_TO_DEBUGGER(); 1782 1782 dm_error( 1783 1783 "DC: failed to create memory input!\n"); 1784 1784 goto create_fail; 1785 1785 } 1786 1786 1787 - pool->base.ipps[i] = dcn21_ipp_create(ctx, i); 1788 - if (pool->base.ipps[i] == NULL) { 1787 + pool->base.ipps[j] = dcn21_ipp_create(ctx, i); 1788 + if (pool->base.ipps[j] == NULL) { 1789 1789 BREAK_TO_DEBUGGER(); 1790 1790 dm_error( 1791 1791 "DC: failed to create input pixel processor!\n"); 1792 1792 goto create_fail; 1793 1793 } 1794 1794 1795 - pool->base.dpps[i] = dcn21_dpp_create(ctx, i); 1796 - if (pool->base.dpps[i] == NULL) { 1795 + pool->base.dpps[j] = dcn21_dpp_create(ctx, i); 1796 + if (pool->base.dpps[j] == NULL) { 1797 1797 BREAK_TO_DEBUGGER(); 1798 1798 dm_error( 1799 1799 "DC: failed to create dpps!\n"); 1800 1800 goto create_fail; 1801 1801 } 1802 1802 1803 - pool->base.opps[i] = dcn21_opp_create(ctx, i); 1804 - if (pool->base.opps[i] == NULL) { 1803 + pool->base.opps[j] = dcn21_opp_create(ctx, i); 1804 + if (pool->base.opps[j] == NULL) { 1805 1805 BREAK_TO_DEBUGGER(); 1806 1806 dm_error( 1807 1807 "DC: failed to create output pixel processor!\n"); 1808 1808 goto create_fail; 1809 1809 } 1810 1810 1811 - pool->base.timing_generators[i] = dcn21_timing_generator_create( 1811 + pool->base.timing_generators[j] = dcn21_timing_generator_create( 1812 1812 ctx, i); 1813 - if (pool->base.timing_generators[i] == NULL) { 1813 + if (pool->base.timing_generators[j] == NULL) { 1814 1814 BREAK_TO_DEBUGGER(); 1815 1815 dm_error("DC: failed to create tg!\n"); 1816 1816 goto create_fail;
+2 -1
drivers/gpu/drm/amd/display/dc/dm_services_types.h
··· 220 220 }; 221 221 222 222 /* Total size of the structure should not exceed 256 bytes */ 223 + #define BL_DATA_POINTS 99 223 224 struct dm_acpi_atif_backlight_caps { 224 225 uint16_t size; /* Bytes 0-1 (2 bytes) */ 225 226 uint16_t flags; /* Byted 2-3 (2 bytes) */ ··· 230 229 uint8_t min_input_signal; /* Byte 7 */ 231 230 uint8_t max_input_signal; /* Byte 8 */ 232 231 uint8_t num_data_points; /* Byte 9 */ 233 - struct dm_bl_data_point data_points[99]; /* Bytes 10-207 (198 bytes)*/ 232 + struct dm_bl_data_point data_points[BL_DATA_POINTS]; /* Bytes 10-207 (198 bytes)*/ 234 233 }; 235 234 236 235 enum dm_acpi_display_type {
+9
drivers/gpu/drm/amd/display/dc/dml/Makefile
··· 1 1 # 2 2 # Copyright 2017 Advanced Micro Devices, Inc. 3 + # Copyright 2019 Raptor Engineering, LLC 3 4 # 4 5 # Permission is hereby granted, free of charge, to any person obtaining a 5 6 # copy of this software and associated documentation files (the "Software"), ··· 25 24 # It provides the general basic services required by other DAL 26 25 # subcomponents. 27 26 27 + ifdef CONFIG_X86 28 28 dml_ccflags := -mhard-float -msse 29 + endif 30 + 31 + ifdef CONFIG_PPC64 32 + dml_ccflags := -mhard-float -maltivec 33 + endif 29 34 30 35 ifdef CONFIG_CC_IS_GCC 31 36 ifeq ($(call cc-ifversion, -lt, 0701, y), y) ··· 39 32 endif 40 33 endif 41 34 35 + ifdef CONFIG_X86 42 36 ifdef IS_OLD_GCC 43 37 # Stack alignment mismatch, proceed with caution. 44 38 # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 ··· 47 39 dml_ccflags += -mpreferred-stack-boundary=4 48 40 else 49 41 dml_ccflags += -msse2 42 + endif 50 43 endif 51 44 52 45 CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
+12 -12
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
··· 107 107 108 108 static bool is_dual_plane(enum source_format_class source_format) 109 109 { 110 - bool ret_val = 0; 110 + bool ret_val = false; 111 111 112 112 if ((source_format == dm_420_8) || (source_format == dm_420_10)) 113 - ret_val = 1; 113 + ret_val = true; 114 114 115 115 return ret_val; 116 116 } ··· 240 240 unsigned int swath_bytes_c = 0; 241 241 unsigned int full_swath_bytes_packed_l = 0; 242 242 unsigned int full_swath_bytes_packed_c = 0; 243 - bool req128_l = 0; 244 - bool req128_c = 0; 243 + bool req128_l = false; 244 + bool req128_c = false; 245 245 bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear); 246 246 bool surf_vert = (pipe_src_param.source_scan == dm_vert); 247 247 unsigned int log2_swath_height_l = 0; ··· 264 264 total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c; 265 265 266 266 if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request 267 - req128_l = 0; 268 - req128_c = 0; 267 + req128_l = false; 268 + req128_c = false; 269 269 swath_bytes_l = full_swath_bytes_packed_l; 270 270 swath_bytes_c = full_swath_bytes_packed_c; 271 271 } else { //128b request (for luma only for yuv420 8bpc) 272 - req128_l = 1; 273 - req128_c = 0; 272 + req128_l = true; 273 + req128_c = false; 274 274 swath_bytes_l = full_swath_bytes_packed_l / 2; 275 275 swath_bytes_c = full_swath_bytes_packed_c; 276 276 } ··· 280 280 total_swath_bytes = 2 * full_swath_bytes_packed_l; 281 281 282 282 if (total_swath_bytes <= detile_buf_size_in_bytes) 283 - req128_l = 0; 283 + req128_l = false; 284 284 else 285 - req128_l = 1; 285 + req128_l = true; 286 286 287 287 swath_bytes_l = total_swath_bytes; 288 288 swath_bytes_c = 0; ··· 670 670 const display_pipe_source_params_st pipe_src_param, 671 671 bool is_chroma) 672 672 { 673 - bool mode_422 = 0; 673 + bool mode_422 = false; 674 674 unsigned int vp_width = 0; 675 675 unsigned int vp_height = 0; 676 676 unsigned int data_pitch = 0; ··· 958 958 // Source 959 959 // dcc_en = src.dcc; 960 960 dual_plane = is_dual_plane((enum source_format_class)(src->source_format)); 961 - mode_422 = 0; // TODO 961 + mode_422 = false; // TODO 962 962 access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed 963 963 // bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0); 964 964 // bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
+12 -12
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
··· 107 107 108 108 static bool is_dual_plane(enum source_format_class source_format) 109 109 { 110 - bool ret_val = 0; 110 + bool ret_val = false; 111 111 112 112 if ((source_format == dm_420_8) || (source_format == dm_420_10)) 113 - ret_val = 1; 113 + ret_val = true; 114 114 115 115 return ret_val; 116 116 } ··· 240 240 unsigned int swath_bytes_c = 0; 241 241 unsigned int full_swath_bytes_packed_l = 0; 242 242 unsigned int full_swath_bytes_packed_c = 0; 243 - bool req128_l = 0; 244 - bool req128_c = 0; 243 + bool req128_l = false; 244 + bool req128_c = false; 245 245 bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear); 246 246 bool surf_vert = (pipe_src_param.source_scan == dm_vert); 247 247 unsigned int log2_swath_height_l = 0; ··· 264 264 total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c; 265 265 266 266 if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request 267 - req128_l = 0; 268 - req128_c = 0; 267 + req128_l = false; 268 + req128_c = false; 269 269 swath_bytes_l = full_swath_bytes_packed_l; 270 270 swath_bytes_c = full_swath_bytes_packed_c; 271 271 } else { //128b request (for luma only for yuv420 8bpc) 272 - req128_l = 1; 273 - req128_c = 0; 272 + req128_l = true; 273 + req128_c = false; 274 274 swath_bytes_l = full_swath_bytes_packed_l / 2; 275 275 swath_bytes_c = full_swath_bytes_packed_c; 276 276 } ··· 280 280 total_swath_bytes = 2 * full_swath_bytes_packed_l; 281 281 282 282 if (total_swath_bytes <= detile_buf_size_in_bytes) 283 - req128_l = 0; 283 + req128_l = false; 284 284 else 285 - req128_l = 1; 285 + req128_l = true; 286 286 287 287 swath_bytes_l = total_swath_bytes; 288 288 swath_bytes_c = 0; ··· 670 670 const display_pipe_source_params_st pipe_src_param, 671 671 bool is_chroma) 672 672 { 673 - bool mode_422 = 0; 673 + bool mode_422 = false; 674 674 unsigned int vp_width = 0; 675 675 unsigned int vp_height = 0; 676 676 unsigned int data_pitch = 0; ··· 959 959 // Source 960 960 // dcc_en = src.dcc; 961 961 dual_plane = is_dual_plane((enum source_format_class)(src->source_format)); 962 - mode_422 = 0; // TODO 962 + mode_422 = false; // TODO 963 963 access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed 964 964 // bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0); 965 965 // bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
+3 -3
drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
··· 4121 4121 } 4122 4122 for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { 4123 4123 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { 4124 - locals->RequiresDSC[i][k] = 0; 4124 + locals->RequiresDSC[i][k] = false; 4125 4125 locals->RequiresFEC[i][k] = 0; 4126 4126 if (mode_lib->vba.BlendingAndTiming[k] == k) { 4127 4127 if (mode_lib->vba.Output[k] == dm_hdmi) { 4128 - locals->RequiresDSC[i][k] = 0; 4128 + locals->RequiresDSC[i][k] = false; 4129 4129 locals->RequiresFEC[i][k] = 0; 4130 4130 locals->OutputBppPerState[i][k] = TruncToValidBPP( 4131 4131 dml_min(600.0, mode_lib->vba.PHYCLKPerState[i]) / mode_lib->vba.PixelClockBackEnd[k] * 24, ··· 5204 5204 mode_lib->vba.ODMCombineEnabled[k] = 5205 5205 locals->ODMCombineEnablePerState[mode_lib->vba.VoltageLevel][k]; 5206 5206 } else { 5207 - mode_lib->vba.ODMCombineEnabled[k] = 0; 5207 + mode_lib->vba.ODMCombineEnabled[k] = false; 5208 5208 } 5209 5209 mode_lib->vba.DSCEnabled[k] = 5210 5210 locals->RequiresDSC[mode_lib->vba.VoltageLevel][k];
+12 -12
drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
··· 82 82 83 83 static bool is_dual_plane(enum source_format_class source_format) 84 84 { 85 - bool ret_val = 0; 85 + bool ret_val = false; 86 86 87 87 if ((source_format == dm_420_8) || (source_format == dm_420_10)) 88 - ret_val = 1; 88 + ret_val = true; 89 89 90 90 return ret_val; 91 91 } ··· 222 222 unsigned int swath_bytes_c = 0; 223 223 unsigned int full_swath_bytes_packed_l = 0; 224 224 unsigned int full_swath_bytes_packed_c = 0; 225 - bool req128_l = 0; 226 - bool req128_c = 0; 225 + bool req128_l = false; 226 + bool req128_c = false; 227 227 bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear); 228 228 bool surf_vert = (pipe_src_param.source_scan == dm_vert); 229 229 unsigned int log2_swath_height_l = 0; ··· 248 248 total_swath_bytes = 2 * full_swath_bytes_packed_l + 2 * full_swath_bytes_packed_c; 249 249 250 250 if (total_swath_bytes <= detile_buf_size_in_bytes) { //full 256b request 251 - req128_l = 0; 252 - req128_c = 0; 251 + req128_l = false; 252 + req128_c = false; 253 253 swath_bytes_l = full_swath_bytes_packed_l; 254 254 swath_bytes_c = full_swath_bytes_packed_c; 255 255 } else { //128b request (for luma only for yuv420 8bpc) 256 - req128_l = 1; 257 - req128_c = 0; 256 + req128_l = true; 257 + req128_c = false; 258 258 swath_bytes_l = full_swath_bytes_packed_l / 2; 259 259 swath_bytes_c = full_swath_bytes_packed_c; 260 260 } ··· 264 264 total_swath_bytes = 2 * full_swath_bytes_packed_l; 265 265 266 266 if (total_swath_bytes <= detile_buf_size_in_bytes) 267 - req128_l = 0; 267 + req128_l = false; 268 268 else 269 - req128_l = 1; 269 + req128_l = true; 270 270 271 271 swath_bytes_l = total_swath_bytes; 272 272 swath_bytes_c = 0; ··· 679 679 const display_pipe_params_st pipe_param, 680 680 bool is_chroma) 681 681 { 682 - bool mode_422 = 0; 682 + bool mode_422 = false; 683 683 unsigned int vp_width = 0; 684 684 unsigned int vp_height = 0; 685 685 unsigned int data_pitch = 0; ··· 1010 1010 // Source 1011 1011 // dcc_en = src.dcc; 1012 1012 dual_plane = is_dual_plane((enum source_format_class) (src->source_format)); 1013 - mode_422 = 0; // FIXME 1013 + mode_422 = false; // FIXME 1014 1014 access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed 1015 1015 // bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0); 1016 1016 // bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1);
+1
drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
··· 278 278 int output_type; 279 279 int output_format; 280 280 int dsc_slices; 281 + int max_audio_sample_rate; 281 282 struct writeback_st wb; 282 283 }; 283 284
+1 -1
drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
··· 454 454 dout->dp_lanes; 455 455 /* TODO: Needs to be set based on dout->audio.audio_sample_rate_khz/sample_layout */ 456 456 mode_lib->vba.AudioSampleRate[mode_lib->vba.NumberOfActivePlanes] = 457 - 44.1 * 1000; 457 + dout->max_audio_sample_rate; 458 458 mode_lib->vba.AudioSampleLayout[mode_lib->vba.NumberOfActivePlanes] = 459 459 1; 460 460 mode_lib->vba.DRAMClockChangeLatencyOverride = 0.0;
+8
drivers/gpu/drm/amd/display/dc/dsc/Makefile
··· 2 2 # 3 3 # Makefile for the 'dsc' sub-component of DAL. 4 4 5 + ifdef CONFIG_X86 5 6 dsc_ccflags := -mhard-float -msse 7 + endif 8 + 9 + ifdef CONFIG_PPC64 10 + dsc_ccflags := -mhard-float -maltivec 11 + endif 6 12 7 13 ifdef CONFIG_CC_IS_GCC 8 14 ifeq ($(call cc-ifversion, -lt, 0701, y), y) ··· 16 10 endif 17 11 endif 18 12 13 + ifdef CONFIG_X86 19 14 ifdef IS_OLD_GCC 20 15 # Stack alignment mismatch, proceed with caution. 21 16 # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 ··· 24 17 dsc_ccflags += -mpreferred-stack-boundary=4 25 18 else 26 19 dsc_ccflags += -msse2 20 + endif 27 21 endif 28 22 29 23 CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_ccflags)
+29 -4
drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
··· 29 29 30 30 /* This module's internal functions */ 31 31 32 + /* default DSC policy target bitrate limit is 16bpp */ 33 + static uint32_t dsc_policy_max_target_bpp_limit = 16; 34 + 32 35 static uint32_t dc_dsc_bandwidth_in_kbps_from_timing( 33 36 const struct dc_crtc_timing *timing) 34 37 { ··· 760 757 return is_dsc_possible; 761 758 } 762 759 763 - bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps) 760 + bool dc_dsc_parse_dsc_dpcd(const struct dc *dc, const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps) 764 761 { 765 762 if (!dpcd_dsc_basic_data) 766 763 return false; ··· 812 809 813 810 if (!dsc_bpp_increment_div_from_dpcd(dpcd_dsc_basic_data[DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT], &dsc_sink_caps->bpp_increment_div)) 814 811 return false; 812 + 813 + if (dc->debug.dsc_bpp_increment_div) { 814 + /* dsc_bpp_increment_div should onl be 1, 2, 4, 8 or 16, but rather than rejecting invalid values, 815 + * we'll accept all and get it into range. This also makes the above check against 0 redundant, 816 + * but that one stresses out the override will be only used if it's not 0. 817 + */ 818 + if (dc->debug.dsc_bpp_increment_div >= 1) 819 + dsc_sink_caps->bpp_increment_div = 1; 820 + if (dc->debug.dsc_bpp_increment_div >= 2) 821 + dsc_sink_caps->bpp_increment_div = 2; 822 + if (dc->debug.dsc_bpp_increment_div >= 4) 823 + dsc_sink_caps->bpp_increment_div = 4; 824 + if (dc->debug.dsc_bpp_increment_div >= 8) 825 + dsc_sink_caps->bpp_increment_div = 8; 826 + if (dc->debug.dsc_bpp_increment_div >= 16) 827 + dsc_sink_caps->bpp_increment_div = 16; 828 + } 815 829 816 830 /* Extended caps */ 817 831 if (dpcd_dsc_ext_data == NULL) { // Extended DPCD DSC data can be null, e.g. because it doesn't apply to SST ··· 971 951 default: 972 952 return; 973 953 } 974 - /* internal upper limit to 16 bpp */ 975 - if (policy->max_target_bpp > 16) 976 - policy->max_target_bpp = 16; 954 + /* internal upper limit, default 16 bpp */ 955 + if (policy->max_target_bpp > dsc_policy_max_target_bpp_limit) 956 + policy->max_target_bpp = dsc_policy_max_target_bpp_limit; 957 + } 958 + 959 + void dc_dsc_policy_set_max_target_bpp_limit(uint32_t limit) 960 + { 961 + dsc_policy_max_target_bpp_limit = limit; 977 962 }
+2 -1
drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
··· 113 113 int wb_src_plane_inst;/*hubp, mpcc, inst*/ 114 114 bool update_privacymask; 115 115 uint32_t mask_id; 116 - 116 + int otg_inst; 117 + bool mvc_cfg; 117 118 }; 118 119 119 120 struct dwbc_funcs {
+4 -3
drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
··· 82 82 void (*mem_program_viewport)( 83 83 struct hubp *hubp, 84 84 const struct rect *viewport, 85 - const struct rect *viewport_c, 86 - enum dc_rotation_angle rotation); 87 - /* rotation needed for Renoir workaround */ 85 + const struct rect *viewport_c); 86 + 87 + void (*apply_PLAT_54186_wa)(struct hubp *hubp, 88 + const struct dc_plane_address *address); 88 89 89 90 bool (*hubp_program_surface_flip_and_addr)( 90 91 struct hubp *hubp,
+4 -2
drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
··· 149 149 150 150 /* Writeback Related */ 151 151 void (*update_writeback)(struct dc *dc, 152 - const struct dc_stream_status *stream_status, 153 152 struct dc_writeback_info *wb_info, 154 153 struct dc_state *context); 155 154 void (*enable_writeback)(struct dc *dc, 156 - const struct dc_stream_status *stream_status, 157 155 struct dc_writeback_info *wb_info, 158 156 struct dc_state *context); 159 157 void (*disable_writeback)(struct dc *dc, 160 158 unsigned int dwb_pipe_inst); 159 + 160 + bool (*mmhubbub_warmup)(struct dc *dc, 161 + unsigned int num_dwb, 162 + struct dc_writeback_info *wb_info); 161 163 162 164 /* Clock Related */ 163 165 enum dc_status (*set_clock)(struct dc *dc,
+2
drivers/gpu/drm/amd/display/dc/inc/resource.h
··· 177 177 178 178 unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format); 179 179 180 + void get_audio_check(struct audio_info *aud_modes, 181 + struct audio_check *aud_chk); 180 182 #endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
+32
drivers/gpu/drm/amd/display/dc/os_types.h
··· 1 1 /* 2 2 * Copyright 2012-16 Advanced Micro Devices, Inc. 3 + * Copyright 2019 Raptor Engineering, LLC 3 4 * 4 5 * Permission is hereby granted, free of charge, to any person obtaining a 5 6 * copy of this software and associated documentation files (the "Software"), ··· 51 50 #define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__) 52 51 53 52 #if defined(CONFIG_DRM_AMD_DC_DCN) 53 + #if defined(CONFIG_X86) 54 54 #include <asm/fpu/api.h> 55 + #define DC_FP_START() kernel_fpu_begin() 56 + #define DC_FP_END() kernel_fpu_end() 57 + #elif defined(CONFIG_PPC64) 58 + #include <asm/switch_to.h> 59 + #include <asm/cputable.h> 60 + #define DC_FP_START() { \ 61 + if (cpu_has_feature(CPU_FTR_VSX_COMP)) { \ 62 + preempt_disable(); \ 63 + enable_kernel_vsx(); \ 64 + } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { \ 65 + preempt_disable(); \ 66 + enable_kernel_altivec(); \ 67 + } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { \ 68 + preempt_disable(); \ 69 + enable_kernel_fp(); \ 70 + } \ 71 + } 72 + #define DC_FP_END() { \ 73 + if (cpu_has_feature(CPU_FTR_VSX_COMP)) { \ 74 + disable_kernel_vsx(); \ 75 + preempt_enable(); \ 76 + } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { \ 77 + disable_kernel_altivec(); \ 78 + preempt_enable(); \ 79 + } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { \ 80 + disable_kernel_fp(); \ 81 + preempt_enable(); \ 82 + } \ 83 + } 84 + #endif 55 85 #endif 56 86 57 87 /*
+27 -37
drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_state.h drivers/gpu/drm/amd/display/dmub/inc/dmub_fw_meta.h
··· 22 22 * Authors: AMD 23 23 * 24 24 */ 25 - 26 - #ifndef _DMUB_FW_STATE_H_ 27 - #define _DMUB_FW_STATE_H_ 25 + #ifndef _DMUB_META_H_ 26 + #define _DMUB_META_H_ 28 27 29 28 #include "dmub_types.h" 30 29 31 30 #pragma pack(push, 1) 32 31 33 - struct dmub_fw_state { 34 - /** 35 - * @phy_initialized_during_fw_boot: 36 - * 37 - * Detects if VBIOS/VBL has ran before firmware boot. 38 - * A value of 1 will usually mean S0i3 boot. 39 - */ 40 - uint8_t phy_initialized_during_fw_boot; 32 + /* Magic value for identifying dmub_fw_meta_info */ 33 + #define DMUB_FW_META_MAGIC 0x444D5542 41 34 42 - /** 43 - * @intialized_phy: 44 - * 45 - * Bit vector of initialized PHY. 46 - */ 47 - uint8_t initialized_phy; 35 + /* Offset from the end of the file to the dmub_fw_meta_info */ 36 + #define DMUB_FW_META_OFFSET 0x24 48 37 49 - /** 50 - * @enabled_phy: 51 - * 52 - * Bit vector of enabled PHY for DP alt mode switch tracking. 53 - */ 54 - uint8_t enabled_phy; 38 + /** 39 + * struct dmub_fw_meta_info - metadata associated with fw binary 40 + * 41 + * NOTE: This should be considered a stable API. Fields should 42 + * not be repurposed or reordered. New fields should be 43 + * added instead to extend the structure. 44 + * 45 + * @magic_value: magic value identifying DMUB firmware meta info 46 + * @fw_region_size: size of the firmware state region 47 + * @trace_buffer_size: size of the tracebuffer region 48 + */ 49 + struct dmub_fw_meta_info { 50 + uint32_t magic_value; 51 + uint32_t fw_region_size; 52 + uint32_t trace_buffer_size; 53 + }; 55 54 56 - /** 57 - * @dmcu_fw_loaded: 58 - * 59 - * DMCU auto load state. 60 - */ 61 - uint8_t dmcu_fw_loaded; 62 - 63 - /** 64 - * @psr_state: 65 - * 66 - * PSR state tracking. 67 - */ 68 - uint8_t psr_state; 55 + /* Ensure that the structure remains 64 bytes. */ 56 + union dmub_fw_meta { 57 + struct dmub_fw_meta_info info; 58 + uint8_t reserved[64]; 69 59 }; 70 60 71 61 #pragma pack(pop) 72 62 73 - #endif /* _DMUB_FW_STATE_H_ */ 63 + #endif /* _DMUB_META_H_ */
+5 -2
drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
··· 67 67 #include "dmub_types.h" 68 68 #include "dmub_cmd.h" 69 69 #include "dmub_rb.h" 70 - #include "dmub_fw_state.h" 71 70 72 71 #if defined(__cplusplus) 73 72 extern "C" { ··· 75 76 /* Forward declarations */ 76 77 struct dmub_srv; 77 78 struct dmub_cmd_header; 78 - struct dmcu; 79 + struct dmub_srv_common_regs; 79 80 80 81 /* enum dmub_status - return code for dmcub functions */ 81 82 enum dmub_status { ··· 144 145 * @inst_const_size: size of the fw inst const section 145 146 * @bss_data_size: size of the fw bss data section 146 147 * @vbios_size: size of the vbios data 148 + * @fw_bss_data: raw firmware bss data section 147 149 */ 148 150 struct dmub_srv_region_params { 149 151 uint32_t inst_const_size; 150 152 uint32_t bss_data_size; 151 153 uint32_t vbios_size; 154 + const uint8_t *fw_bss_data; 152 155 }; 153 156 154 157 /** ··· 308 307 volatile const struct dmub_fw_state *fw_state; 309 308 310 309 /* private: internal use only */ 310 + const struct dmub_srv_common_regs *regs; 311 + 311 312 struct dmub_srv_base_funcs funcs; 312 313 struct dmub_srv_hw_funcs hw_funcs; 313 314 struct dmub_rb inbox1_rb;
+67 -19
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
··· 25 25 26 26 #include "../inc/dmub_srv.h" 27 27 #include "dmub_reg.h" 28 + #include "dmub_dcn20.h" 28 29 29 30 #include "dcn/dcn_2_0_0_offset.h" 30 31 #include "dcn/dcn_2_0_0_sh_mask.h" ··· 34 33 35 34 #define BASE_INNER(seg) DCN_BASE__INST0_SEG##seg 36 35 #define CTX dmub 36 + #define REGS dmub->regs 37 + 38 + /* Registers. */ 39 + 40 + const struct dmub_srv_common_regs dmub_srv_dcn20_regs = { 41 + #define DMUB_SR(reg) REG_OFFSET(reg), 42 + { DMUB_COMMON_REGS() }, 43 + #undef DMUB_SR 44 + 45 + #define DMUB_SF(reg, field) FD_MASK(reg, field), 46 + { DMUB_COMMON_FIELDS() }, 47 + #undef DMUB_SF 48 + 49 + #define DMUB_SF(reg, field) FD_SHIFT(reg, field), 50 + { DMUB_COMMON_FIELDS() }, 51 + #undef DMUB_SF 52 + }; 53 + 54 + /* Shared functions. */ 55 + 56 + static inline void dmub_dcn20_translate_addr(const union dmub_addr *addr_in, 57 + uint64_t fb_base, 58 + uint64_t fb_offset, 59 + union dmub_addr *addr_out) 60 + { 61 + addr_out->quad_part = addr_in->quad_part - fb_base + fb_offset; 62 + } 37 63 38 64 void dmub_dcn20_reset(struct dmub_srv *dmub) 39 65 { ··· 75 47 REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 0); 76 48 } 77 49 78 - void dmub_dcn20_backdoor_load(struct dmub_srv *dmub, struct dmub_window *cw0, 79 - struct dmub_window *cw1) 50 + void dmub_dcn20_backdoor_load(struct dmub_srv *dmub, 51 + const struct dmub_window *cw0, 52 + const struct dmub_window *cw1) 80 53 { 81 - REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); 82 - REG_UPDATE_2(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE, 0x4, 83 - DMCUB_MEM_WRITE_SPACE, 0x4); 54 + union dmub_addr offset; 55 + uint64_t fb_base = dmub->fb_base, fb_offset = dmub->fb_offset; 84 56 85 - REG_WRITE(DMCUB_REGION3_CW0_OFFSET, cw0->offset.u.low_part); 86 - REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, cw0->offset.u.high_part); 57 + REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); 58 + REG_UPDATE_2(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE, 0x3, 59 + DMCUB_MEM_WRITE_SPACE, 0x3); 60 + 61 + dmub_dcn20_translate_addr(&cw0->offset, fb_base, fb_offset, &offset); 62 + 63 + REG_WRITE(DMCUB_REGION3_CW0_OFFSET, offset.u.low_part); 64 + REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, offset.u.high_part); 87 65 REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base); 88 66 REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0, 89 67 DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top, 90 68 DMCUB_REGION3_CW0_ENABLE, 1); 91 69 92 - REG_WRITE(DMCUB_REGION3_CW1_OFFSET, cw1->offset.u.low_part); 93 - REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, cw1->offset.u.high_part); 70 + dmub_dcn20_translate_addr(&cw1->offset, fb_base, fb_offset, &offset); 71 + 72 + REG_WRITE(DMCUB_REGION3_CW1_OFFSET, offset.u.low_part); 73 + REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, offset.u.high_part); 94 74 REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base); 95 75 REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0, 96 76 DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top, ··· 115 79 const struct dmub_window *cw5, 116 80 const struct dmub_window *cw6) 117 81 { 118 - REG_WRITE(DMCUB_REGION3_CW2_OFFSET, cw2->offset.u.low_part); 119 - REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, cw2->offset.u.high_part); 82 + union dmub_addr offset; 83 + uint64_t fb_base = dmub->fb_base, fb_offset = dmub->fb_offset; 84 + 85 + dmub_dcn20_translate_addr(&cw2->offset, fb_base, fb_offset, &offset); 86 + 87 + REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part); 88 + REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part); 120 89 REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base); 121 90 REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0, 122 91 DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top, 123 92 DMCUB_REGION3_CW2_ENABLE, 1); 124 93 125 - REG_WRITE(DMCUB_REGION3_CW3_OFFSET, cw3->offset.u.low_part); 126 - REG_WRITE(DMCUB_REGION3_CW3_OFFSET_HIGH, cw3->offset.u.high_part); 94 + dmub_dcn20_translate_addr(&cw3->offset, fb_base, fb_offset, &offset); 95 + 96 + REG_WRITE(DMCUB_REGION3_CW3_OFFSET, offset.u.low_part); 97 + REG_WRITE(DMCUB_REGION3_CW3_OFFSET_HIGH, offset.u.high_part); 127 98 REG_WRITE(DMCUB_REGION3_CW3_BASE_ADDRESS, cw3->region.base); 128 99 REG_SET_2(DMCUB_REGION3_CW3_TOP_ADDRESS, 0, 129 100 DMCUB_REGION3_CW3_TOP_ADDRESS, cw3->region.top, 130 101 DMCUB_REGION3_CW3_ENABLE, 1); 131 102 132 103 /* TODO: Move this to CW4. */ 104 + dmub_dcn20_translate_addr(&cw4->offset, fb_base, fb_offset, &offset); 133 105 134 - REG_WRITE(DMCUB_REGION4_OFFSET, cw4->offset.u.low_part); 135 - REG_WRITE(DMCUB_REGION4_OFFSET_HIGH, cw4->offset.u.high_part); 106 + REG_WRITE(DMCUB_REGION4_OFFSET, offset.u.low_part); 107 + REG_WRITE(DMCUB_REGION4_OFFSET_HIGH, offset.u.high_part); 136 108 REG_SET_2(DMCUB_REGION4_TOP_ADDRESS, 0, DMCUB_REGION4_TOP_ADDRESS, 137 109 cw4->region.top - cw4->region.base - 1, DMCUB_REGION4_ENABLE, 138 110 1); 139 111 140 - REG_WRITE(DMCUB_REGION3_CW5_OFFSET, cw5->offset.u.low_part); 141 - REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, cw5->offset.u.high_part); 112 + dmub_dcn20_translate_addr(&cw5->offset, fb_base, fb_offset, &offset); 113 + 114 + REG_WRITE(DMCUB_REGION3_CW5_OFFSET, offset.u.low_part); 115 + REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, offset.u.high_part); 142 116 REG_WRITE(DMCUB_REGION3_CW5_BASE_ADDRESS, cw5->region.base); 143 117 REG_SET_2(DMCUB_REGION3_CW5_TOP_ADDRESS, 0, 144 118 DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top, 145 119 DMCUB_REGION3_CW5_ENABLE, 1); 146 120 147 - REG_WRITE(DMCUB_REGION3_CW6_OFFSET, cw6->offset.u.low_part); 148 - REG_WRITE(DMCUB_REGION3_CW6_OFFSET_HIGH, cw6->offset.u.high_part); 121 + dmub_dcn20_translate_addr(&cw6->offset, fb_base, fb_offset, &offset); 122 + 123 + REG_WRITE(DMCUB_REGION3_CW6_OFFSET, offset.u.low_part); 124 + REG_WRITE(DMCUB_REGION3_CW6_OFFSET_HIGH, offset.u.high_part); 149 125 REG_WRITE(DMCUB_REGION3_CW6_BASE_ADDRESS, cw6->region.base); 150 126 REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0, 151 127 DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top,
+117
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
··· 30 30 31 31 struct dmub_srv; 32 32 33 + /* DCN20 register definitions. */ 34 + 35 + #define DMUB_COMMON_REGS() \ 36 + DMUB_SR(DMCUB_CNTL) \ 37 + DMUB_SR(DMCUB_MEM_CNTL) \ 38 + DMUB_SR(DMCUB_SEC_CNTL) \ 39 + DMUB_SR(DMCUB_INBOX1_BASE_ADDRESS) \ 40 + DMUB_SR(DMCUB_INBOX1_SIZE) \ 41 + DMUB_SR(DMCUB_INBOX1_RPTR) \ 42 + DMUB_SR(DMCUB_INBOX1_WPTR) \ 43 + DMUB_SR(DMCUB_REGION3_CW0_OFFSET) \ 44 + DMUB_SR(DMCUB_REGION3_CW1_OFFSET) \ 45 + DMUB_SR(DMCUB_REGION3_CW2_OFFSET) \ 46 + DMUB_SR(DMCUB_REGION3_CW3_OFFSET) \ 47 + DMUB_SR(DMCUB_REGION3_CW4_OFFSET) \ 48 + DMUB_SR(DMCUB_REGION3_CW5_OFFSET) \ 49 + DMUB_SR(DMCUB_REGION3_CW6_OFFSET) \ 50 + DMUB_SR(DMCUB_REGION3_CW7_OFFSET) \ 51 + DMUB_SR(DMCUB_REGION3_CW0_OFFSET_HIGH) \ 52 + DMUB_SR(DMCUB_REGION3_CW1_OFFSET_HIGH) \ 53 + DMUB_SR(DMCUB_REGION3_CW2_OFFSET_HIGH) \ 54 + DMUB_SR(DMCUB_REGION3_CW3_OFFSET_HIGH) \ 55 + DMUB_SR(DMCUB_REGION3_CW4_OFFSET_HIGH) \ 56 + DMUB_SR(DMCUB_REGION3_CW5_OFFSET_HIGH) \ 57 + DMUB_SR(DMCUB_REGION3_CW6_OFFSET_HIGH) \ 58 + DMUB_SR(DMCUB_REGION3_CW7_OFFSET_HIGH) \ 59 + DMUB_SR(DMCUB_REGION3_CW0_BASE_ADDRESS) \ 60 + DMUB_SR(DMCUB_REGION3_CW1_BASE_ADDRESS) \ 61 + DMUB_SR(DMCUB_REGION3_CW2_BASE_ADDRESS) \ 62 + DMUB_SR(DMCUB_REGION3_CW3_BASE_ADDRESS) \ 63 + DMUB_SR(DMCUB_REGION3_CW4_BASE_ADDRESS) \ 64 + DMUB_SR(DMCUB_REGION3_CW5_BASE_ADDRESS) \ 65 + DMUB_SR(DMCUB_REGION3_CW6_BASE_ADDRESS) \ 66 + DMUB_SR(DMCUB_REGION3_CW7_BASE_ADDRESS) \ 67 + DMUB_SR(DMCUB_REGION3_CW0_TOP_ADDRESS) \ 68 + DMUB_SR(DMCUB_REGION3_CW1_TOP_ADDRESS) \ 69 + DMUB_SR(DMCUB_REGION3_CW2_TOP_ADDRESS) \ 70 + DMUB_SR(DMCUB_REGION3_CW3_TOP_ADDRESS) \ 71 + DMUB_SR(DMCUB_REGION3_CW4_TOP_ADDRESS) \ 72 + DMUB_SR(DMCUB_REGION3_CW5_TOP_ADDRESS) \ 73 + DMUB_SR(DMCUB_REGION3_CW6_TOP_ADDRESS) \ 74 + DMUB_SR(DMCUB_REGION3_CW7_TOP_ADDRESS) \ 75 + DMUB_SR(DMCUB_REGION4_OFFSET) \ 76 + DMUB_SR(DMCUB_REGION4_OFFSET_HIGH) \ 77 + DMUB_SR(DMCUB_REGION4_TOP_ADDRESS) \ 78 + DMUB_SR(DMCUB_SCRATCH0) \ 79 + DMUB_SR(DMCUB_SCRATCH1) \ 80 + DMUB_SR(DMCUB_SCRATCH2) \ 81 + DMUB_SR(DMCUB_SCRATCH3) \ 82 + DMUB_SR(DMCUB_SCRATCH4) \ 83 + DMUB_SR(DMCUB_SCRATCH5) \ 84 + DMUB_SR(DMCUB_SCRATCH6) \ 85 + DMUB_SR(DMCUB_SCRATCH7) \ 86 + DMUB_SR(DMCUB_SCRATCH8) \ 87 + DMUB_SR(DMCUB_SCRATCH9) \ 88 + DMUB_SR(DMCUB_SCRATCH10) \ 89 + DMUB_SR(DMCUB_SCRATCH11) \ 90 + DMUB_SR(DMCUB_SCRATCH12) \ 91 + DMUB_SR(DMCUB_SCRATCH13) \ 92 + DMUB_SR(DMCUB_SCRATCH14) \ 93 + DMUB_SR(DMCUB_SCRATCH15) \ 94 + DMUB_SR(CC_DC_PIPE_DIS) 95 + 96 + #define DMUB_COMMON_FIELDS() \ 97 + DMUB_SF(DMCUB_CNTL, DMCUB_ENABLE) \ 98 + DMUB_SF(DMCUB_CNTL, DMCUB_SOFT_RESET) \ 99 + DMUB_SF(DMCUB_CNTL, DMCUB_TRACEPORT_EN) \ 100 + DMUB_SF(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE) \ 101 + DMUB_SF(DMCUB_MEM_CNTL, DMCUB_MEM_WRITE_SPACE) \ 102 + DMUB_SF(DMCUB_SEC_CNTL, DMCUB_SEC_RESET) \ 103 + DMUB_SF(DMCUB_SEC_CNTL, DMCUB_MEM_UNIT_ID) \ 104 + DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_TOP_ADDRESS) \ 105 + DMUB_SF(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE) \ 106 + DMUB_SF(DMCUB_REGION3_CW1_TOP_ADDRESS, DMCUB_REGION3_CW1_TOP_ADDRESS) \ 107 + DMUB_SF(DMCUB_REGION3_CW1_TOP_ADDRESS, DMCUB_REGION3_CW1_ENABLE) \ 108 + DMUB_SF(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_TOP_ADDRESS) \ 109 + DMUB_SF(DMCUB_REGION3_CW2_TOP_ADDRESS, DMCUB_REGION3_CW2_ENABLE) \ 110 + DMUB_SF(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_TOP_ADDRESS) \ 111 + DMUB_SF(DMCUB_REGION3_CW3_TOP_ADDRESS, DMCUB_REGION3_CW3_ENABLE) \ 112 + DMUB_SF(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_TOP_ADDRESS) \ 113 + DMUB_SF(DMCUB_REGION3_CW4_TOP_ADDRESS, DMCUB_REGION3_CW4_ENABLE) \ 114 + DMUB_SF(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_TOP_ADDRESS) \ 115 + DMUB_SF(DMCUB_REGION3_CW5_TOP_ADDRESS, DMCUB_REGION3_CW5_ENABLE) \ 116 + DMUB_SF(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_TOP_ADDRESS) \ 117 + DMUB_SF(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE) \ 118 + DMUB_SF(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_TOP_ADDRESS) \ 119 + DMUB_SF(DMCUB_REGION3_CW7_TOP_ADDRESS, DMCUB_REGION3_CW7_ENABLE) \ 120 + DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_TOP_ADDRESS) \ 121 + DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \ 122 + DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) 123 + 124 + struct dmub_srv_common_reg_offset { 125 + #define DMUB_SR(reg) uint32_t reg; 126 + DMUB_COMMON_REGS() 127 + #undef DMUB_SR 128 + }; 129 + 130 + struct dmub_srv_common_reg_shift { 131 + #define DMUB_SF(reg, field) uint8_t reg##__##field; 132 + DMUB_COMMON_FIELDS() 133 + #undef DMUB_SF 134 + }; 135 + 136 + struct dmub_srv_common_reg_mask { 137 + #define DMUB_SF(reg, field) uint32_t reg##__##field; 138 + DMUB_COMMON_FIELDS() 139 + #undef DMUB_SF 140 + }; 141 + 142 + struct dmub_srv_common_regs { 143 + const struct dmub_srv_common_reg_offset offset; 144 + const struct dmub_srv_common_reg_mask mask; 145 + const struct dmub_srv_common_reg_shift shift; 146 + }; 147 + 148 + extern const struct dmub_srv_common_regs dmub_srv_dcn20_regs; 149 + 33 150 /* Hardware functions. */ 34 151 35 152 void dmub_dcn20_init(struct dmub_srv *dmub);
+15 -92
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.c
··· 25 25 26 26 #include "../inc/dmub_srv.h" 27 27 #include "dmub_reg.h" 28 + #include "dmub_dcn21.h" 28 29 29 30 #include "dcn/dcn_2_1_0_offset.h" 30 31 #include "dcn/dcn_2_1_0_sh_mask.h" ··· 33 32 34 33 #define BASE_INNER(seg) DMU_BASE__INST0_SEG##seg 35 34 #define CTX dmub 35 + #define REGS dmub->regs 36 36 37 - static inline void dmub_dcn21_translate_addr(const union dmub_addr *addr_in, 38 - uint64_t fb_base, 39 - uint64_t fb_offset, 40 - union dmub_addr *addr_out) 41 - { 42 - addr_out->quad_part = addr_in->quad_part - fb_base + fb_offset; 43 - } 37 + /* Registers. */ 44 38 45 - void dmub_dcn21_backdoor_load(struct dmub_srv *dmub, 46 - const struct dmub_window *cw0, 47 - const struct dmub_window *cw1) 48 - { 49 - union dmub_addr offset; 50 - uint64_t fb_base = dmub->fb_base, fb_offset = dmub->fb_offset; 39 + const struct dmub_srv_common_regs dmub_srv_dcn21_regs = { 40 + #define DMUB_SR(reg) REG_OFFSET(reg), 41 + { DMUB_COMMON_REGS() }, 42 + #undef DMUB_SR 51 43 52 - REG_UPDATE(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 1); 53 - REG_UPDATE_2(DMCUB_MEM_CNTL, DMCUB_MEM_READ_SPACE, 0x3, 54 - DMCUB_MEM_WRITE_SPACE, 0x3); 44 + #define DMUB_SF(reg, field) FD_MASK(reg, field), 45 + { DMUB_COMMON_FIELDS() }, 46 + #undef DMUB_SF 55 47 56 - dmub_dcn21_translate_addr(&cw0->offset, fb_base, fb_offset, &offset); 48 + #define DMUB_SF(reg, field) FD_SHIFT(reg, field), 49 + { DMUB_COMMON_FIELDS() }, 50 + #undef DMUB_SF 51 + }; 57 52 58 - REG_WRITE(DMCUB_REGION3_CW0_OFFSET, offset.u.low_part); 59 - REG_WRITE(DMCUB_REGION3_CW0_OFFSET_HIGH, offset.u.high_part); 60 - REG_WRITE(DMCUB_REGION3_CW0_BASE_ADDRESS, cw0->region.base); 61 - REG_SET_2(DMCUB_REGION3_CW0_TOP_ADDRESS, 0, 62 - DMCUB_REGION3_CW0_TOP_ADDRESS, cw0->region.top, 63 - DMCUB_REGION3_CW0_ENABLE, 1); 64 - 65 - dmub_dcn21_translate_addr(&cw1->offset, fb_base, fb_offset, &offset); 66 - 67 - REG_WRITE(DMCUB_REGION3_CW1_OFFSET, offset.u.low_part); 68 - REG_WRITE(DMCUB_REGION3_CW1_OFFSET_HIGH, offset.u.high_part); 69 - REG_WRITE(DMCUB_REGION3_CW1_BASE_ADDRESS, cw1->region.base); 70 - REG_SET_2(DMCUB_REGION3_CW1_TOP_ADDRESS, 0, 71 - DMCUB_REGION3_CW1_TOP_ADDRESS, cw1->region.top, 72 - DMCUB_REGION3_CW1_ENABLE, 1); 73 - 74 - REG_UPDATE_2(DMCUB_SEC_CNTL, DMCUB_SEC_RESET, 0, DMCUB_MEM_UNIT_ID, 75 - 0x20); 76 - } 77 - 78 - void dmub_dcn21_setup_windows(struct dmub_srv *dmub, 79 - const struct dmub_window *cw2, 80 - const struct dmub_window *cw3, 81 - const struct dmub_window *cw4, 82 - const struct dmub_window *cw5, 83 - const struct dmub_window *cw6) 84 - { 85 - union dmub_addr offset; 86 - uint64_t fb_base = dmub->fb_base, fb_offset = dmub->fb_offset; 87 - 88 - dmub_dcn21_translate_addr(&cw2->offset, fb_base, fb_offset, &offset); 89 - 90 - REG_WRITE(DMCUB_REGION3_CW2_OFFSET, offset.u.low_part); 91 - REG_WRITE(DMCUB_REGION3_CW2_OFFSET_HIGH, offset.u.high_part); 92 - REG_WRITE(DMCUB_REGION3_CW2_BASE_ADDRESS, cw2->region.base); 93 - REG_SET_2(DMCUB_REGION3_CW2_TOP_ADDRESS, 0, 94 - DMCUB_REGION3_CW2_TOP_ADDRESS, cw2->region.top, 95 - DMCUB_REGION3_CW2_ENABLE, 1); 96 - 97 - dmub_dcn21_translate_addr(&cw3->offset, fb_base, fb_offset, &offset); 98 - 99 - REG_WRITE(DMCUB_REGION3_CW3_OFFSET, offset.u.low_part); 100 - REG_WRITE(DMCUB_REGION3_CW3_OFFSET_HIGH, offset.u.high_part); 101 - REG_WRITE(DMCUB_REGION3_CW3_BASE_ADDRESS, cw3->region.base); 102 - REG_SET_2(DMCUB_REGION3_CW3_TOP_ADDRESS, 0, 103 - DMCUB_REGION3_CW3_TOP_ADDRESS, cw3->region.top, 104 - DMCUB_REGION3_CW3_ENABLE, 1); 105 - 106 - /* TODO: Move this to CW4. */ 107 - dmub_dcn21_translate_addr(&cw4->offset, fb_base, fb_offset, &offset); 108 - 109 - REG_WRITE(DMCUB_REGION4_OFFSET, offset.u.low_part); 110 - REG_WRITE(DMCUB_REGION4_OFFSET_HIGH, offset.u.high_part); 111 - REG_SET_2(DMCUB_REGION4_TOP_ADDRESS, 0, DMCUB_REGION4_TOP_ADDRESS, 112 - cw4->region.top - cw4->region.base - 1, DMCUB_REGION4_ENABLE, 113 - 1); 114 - 115 - dmub_dcn21_translate_addr(&cw5->offset, fb_base, fb_offset, &offset); 116 - 117 - REG_WRITE(DMCUB_REGION3_CW5_OFFSET, offset.u.low_part); 118 - REG_WRITE(DMCUB_REGION3_CW5_OFFSET_HIGH, offset.u.high_part); 119 - REG_WRITE(DMCUB_REGION3_CW5_BASE_ADDRESS, cw5->region.base); 120 - REG_SET_2(DMCUB_REGION3_CW5_TOP_ADDRESS, 0, 121 - DMCUB_REGION3_CW5_TOP_ADDRESS, cw5->region.top, 122 - DMCUB_REGION3_CW5_ENABLE, 1); 123 - 124 - dmub_dcn21_translate_addr(&cw6->offset, fb_base, fb_offset, &offset); 125 - 126 - REG_WRITE(DMCUB_REGION3_CW6_OFFSET, offset.u.low_part); 127 - REG_WRITE(DMCUB_REGION3_CW6_OFFSET_HIGH, offset.u.high_part); 128 - REG_WRITE(DMCUB_REGION3_CW6_BASE_ADDRESS, cw6->region.base); 129 - REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0, 130 - DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top, 131 - DMCUB_REGION3_CW6_ENABLE, 1); 132 - } 53 + /* Shared functions. */ 133 54 134 55 bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub) 135 56 {
+4 -11
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn21.h
··· 28 28 29 29 #include "dmub_dcn20.h" 30 30 31 + /* Registers. */ 32 + 33 + extern const struct dmub_srv_common_regs dmub_srv_dcn21_regs; 34 + 31 35 /* Hardware functions. */ 32 - 33 - void dmub_dcn21_backdoor_load(struct dmub_srv *dmub, 34 - const struct dmub_window *cw0, 35 - const struct dmub_window *cw1); 36 - 37 - void dmub_dcn21_setup_windows(struct dmub_srv *dmub, 38 - const struct dmub_window *cw2, 39 - const struct dmub_window *cw3, 40 - const struct dmub_window *cw4, 41 - const struct dmub_window *cw5, 42 - const struct dmub_window *cw6); 43 36 44 37 bool dmub_dcn21_is_auto_load_done(struct dmub_srv *dmub); 45 38
+7 -3
drivers/gpu/drm/amd/display/dmub/src/dmub_reg.h
··· 34 34 35 35 #define BASE(seg) BASE_INNER(seg) 36 36 37 - #define REG_OFFSET(base_index, addr) (BASE(base_index) + addr) 37 + #define REG_OFFSET(reg_name) (BASE(mm##reg_name##_BASE_IDX) + mm##reg_name) 38 38 39 - #define REG(reg_name) REG_OFFSET(mm ## reg_name ## _BASE_IDX, mm ## reg_name) 39 + #define FD_SHIFT(reg_name, field) reg_name##__##field##__SHIFT 40 40 41 - #define FD(reg_field) reg_field ## __SHIFT, reg_field ## _MASK 41 + #define FD_MASK(reg_name, field) reg_name##__##field##_MASK 42 + 43 + #define REG(reg) (REGS)->offset.reg 44 + 45 + #define FD(reg_field) (REGS)->shift.reg_field, (REGS)->mask.reg_field 42 46 43 47 #define FN(reg_name, field) FD(reg_name##__##field) 44 48
+49 -8
drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
··· 26 26 #include "../inc/dmub_srv.h" 27 27 #include "dmub_dcn20.h" 28 28 #include "dmub_dcn21.h" 29 - #include "dmub_trace_buffer.h" 29 + #include "dmub_fw_meta.h" 30 30 #include "os_types.h" 31 31 /* 32 32 * Note: the DMUB service is standalone. No additional headers should be ··· 46 46 /* Mailbox size */ 47 47 #define DMUB_MAILBOX_SIZE (DMUB_RB_SIZE) 48 48 49 + /* Default state size if meta is absent. */ 50 + #define DMUB_FW_STATE_SIZE (1024) 51 + 52 + /* Default tracebuffer size if meta is absent. */ 53 + #define DMUB_TRACE_BUFFER_SIZE (1024) 49 54 50 55 /* Number of windows in use. */ 51 56 #define DMUB_NUM_WINDOWS (DMUB_WINDOW_6_FW_STATE + 1) ··· 67 62 return (val + factor - 1) / factor * factor; 68 63 } 69 64 65 + static const struct dmub_fw_meta_info * 66 + dmub_get_fw_meta_info(const uint8_t *fw_bss_data, uint32_t fw_bss_data_size) 67 + { 68 + const union dmub_fw_meta *meta; 69 + 70 + if (fw_bss_data == NULL) 71 + return NULL; 72 + 73 + if (fw_bss_data_size < sizeof(union dmub_fw_meta) + DMUB_FW_META_OFFSET) 74 + return NULL; 75 + 76 + meta = (const union dmub_fw_meta *)(fw_bss_data + fw_bss_data_size - 77 + DMUB_FW_META_OFFSET - 78 + sizeof(union dmub_fw_meta)); 79 + 80 + if (meta->info.magic_value != DMUB_FW_META_MAGIC) 81 + return NULL; 82 + 83 + return &meta->info; 84 + } 85 + 70 86 static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic) 71 87 { 72 88 struct dmub_srv_hw_funcs *funcs = &dmub->hw_funcs; ··· 95 69 switch (asic) { 96 70 case DMUB_ASIC_DCN20: 97 71 case DMUB_ASIC_DCN21: 72 + dmub->regs = &dmub_srv_dcn20_regs; 73 + 98 74 funcs->reset = dmub_dcn20_reset; 99 75 funcs->reset_release = dmub_dcn20_reset_release; 100 76 funcs->backdoor_load = dmub_dcn20_backdoor_load; ··· 108 80 funcs->is_hw_init = dmub_dcn20_is_hw_init; 109 81 110 82 if (asic == DMUB_ASIC_DCN21) { 111 - funcs->backdoor_load = dmub_dcn21_backdoor_load; 112 - funcs->setup_windows = dmub_dcn21_setup_windows; 83 + dmub->regs = &dmub_srv_dcn21_regs; 84 + 113 85 funcs->is_auto_load_done = dmub_dcn21_is_auto_load_done; 114 86 funcs->is_phy_init = dmub_dcn21_is_phy_init; 115 87 } ··· 188 160 struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX]; 189 161 struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF]; 190 162 struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE]; 163 + const struct dmub_fw_meta_info *fw_info; 164 + uint32_t fw_state_size = DMUB_FW_STATE_SIZE; 165 + uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE; 191 166 192 167 if (!dmub->sw_init) 193 168 return DMUB_STATUS_INVALID; ··· 205 174 data->base = dmub_align(inst->top, 256); 206 175 data->top = data->base + params->bss_data_size; 207 176 177 + /* 178 + * All cache windows below should be aligned to the size 179 + * of the DMCUB cache line, 64 bytes. 180 + */ 181 + 208 182 stack->base = dmub_align(data->top, 256); 209 183 stack->top = stack->base + DMUB_STACK_SIZE + DMUB_CONTEXT_SIZE; 210 184 ··· 219 183 mail->base = dmub_align(bios->top, 256); 220 184 mail->top = mail->base + DMUB_MAILBOX_SIZE; 221 185 186 + fw_info = dmub_get_fw_meta_info(params->fw_bss_data, 187 + params->bss_data_size); 188 + 189 + if (fw_info) { 190 + fw_state_size = fw_info->fw_region_size; 191 + trace_buffer_size = fw_info->trace_buffer_size; 192 + } 193 + 222 194 trace_buff->base = dmub_align(mail->top, 256); 223 - trace_buff->top = trace_buff->base + TRACE_BUF_SIZE; 195 + trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64); 224 196 225 197 fw_state->base = dmub_align(trace_buff->top, 256); 226 - 227 - /* Align firmware state to size of cache line. */ 228 - fw_state->top = 229 - fw_state->base + dmub_align(sizeof(struct dmub_fw_state), 64); 198 + fw_state->top = fw_state->base + dmub_align(fw_state_size, 64); 230 199 231 200 out->fb_size = dmub_align(fw_state->top, 4096); 232 201
+5 -1
drivers/gpu/drm/amd/display/include/dal_asic_id.h
··· 138 138 #define RAVEN2_15D8_REV_E4 0xE4 139 139 #define RAVEN1_F0 0xF0 140 140 #define RAVEN_UNKNOWN 0xFF 141 - 141 + #ifndef ASICREV_IS_RAVEN 142 142 #define ASICREV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN) 143 + #endif 144 + 143 145 #define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < RAVEN2_A0)) 146 + #ifndef ASICREV_IS_RAVEN2 144 147 #define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < RAVEN1_F0)) 148 + #endif 145 149 #define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN)) 146 150 #define ASICREV_IS_DALI(eChipRev) ((eChipRev == RAVEN2_15D8_REV_E3) \ 147 151 || (eChipRev == RAVEN2_15D8_REV_E4))
+4 -2
drivers/gpu/drm/amd/display/modules/color/color_gamma.c
··· 364 364 scratch_2 = dc_fixpt_mul(gamma_of_2, 365 365 pow_buffer[pow_buffer_ptr%16]); 366 366 367 - pow_buffer[pow_buffer_ptr%16] = scratch_2; 368 - pow_buffer_ptr++; 367 + if (pow_buffer_ptr != -1) { 368 + pow_buffer[pow_buffer_ptr%16] = scratch_2; 369 + pow_buffer_ptr++; 370 + } 369 371 370 372 scratch_1 = dc_fixpt_mul(scratch_1, scratch_2); 371 373 scratch_1 = dc_fixpt_sub(scratch_1, args->a2);
+19 -13
drivers/gpu/drm/amd/display/modules/freesync/freesync.c
··· 37 37 #define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65) 38 38 /* Number of elements in the render times cache array */ 39 39 #define RENDER_TIMES_MAX_COUNT 10 40 - /* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */ 41 - #define BTR_EXIT_MARGIN 2000 40 + /* Threshold to exit/exit BTR (to avoid frequent enter-exits at the lower limit) */ 41 + #define BTR_MAX_MARGIN 2500 42 42 /* Threshold to change BTR multiplier (to avoid frequent changes) */ 43 43 #define BTR_DRIFT_MARGIN 2000 44 44 /*Threshold to exit fixed refresh rate*/ ··· 254 254 unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF; 255 255 unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF; 256 256 unsigned int frames_to_insert = 0; 257 - unsigned int min_frame_duration_in_ns = 0; 258 - unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us; 259 257 unsigned int delta_from_mid_point_delta_in_us; 260 - 261 - min_frame_duration_in_ns = ((unsigned int) (div64_u64( 262 - (1000000000ULL * 1000000), 263 - in_out_vrr->max_refresh_in_uhz))); 258 + unsigned int max_render_time_in_us = 259 + in_out_vrr->max_duration_in_us - in_out_vrr->btr.margin_in_us; 264 260 265 261 /* Program BTR */ 266 - if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) { 262 + if ((last_render_time_in_us + in_out_vrr->btr.margin_in_us / 2) < max_render_time_in_us) { 267 263 /* Exit Below the Range */ 268 264 if (in_out_vrr->btr.btr_active) { 269 265 in_out_vrr->btr.frame_counter = 0; 270 266 in_out_vrr->btr.btr_active = false; 271 267 } 272 - } else if (last_render_time_in_us > max_render_time_in_us) { 268 + } else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) { 273 269 /* Enter Below the Range */ 274 - in_out_vrr->btr.btr_active = true; 270 + if (!in_out_vrr->btr.btr_active) { 271 + in_out_vrr->btr.btr_active = true; 272 + } 275 273 } 276 274 277 275 /* BTR set to "not active" so disengage */ ··· 325 327 /* Choose number of frames to insert based on how close it 326 328 * can get to the mid point of the variable range. 327 329 */ 328 - if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) { 330 + if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us && 331 + (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 || 332 + mid_point_frames_floor < 2)) { 329 333 frames_to_insert = mid_point_frames_ceil; 330 334 delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 - 331 335 delta_from_mid_point_in_us_1; ··· 343 343 if (in_out_vrr->btr.frames_to_insert != 0 && 344 344 delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) { 345 345 if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) < 346 - in_out_vrr->max_duration_in_us) && 346 + max_render_time_in_us) && 347 347 ((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) > 348 348 in_out_vrr->min_duration_in_us)) 349 349 frames_to_insert = in_out_vrr->btr.frames_to_insert; ··· 796 796 refresh_range = in_out_vrr->max_refresh_in_uhz - 797 797 in_out_vrr->min_refresh_in_uhz; 798 798 799 + in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us - 800 + 2 * in_out_vrr->min_duration_in_us; 801 + if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN) 802 + in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN; 803 + 799 804 in_out_vrr->supported = true; 800 805 } 801 806 ··· 816 811 in_out_vrr->btr.inserted_duration_in_us = 0; 817 812 in_out_vrr->btr.frames_to_insert = 0; 818 813 in_out_vrr->btr.frame_counter = 0; 814 + 819 815 in_out_vrr->btr.mid_point_in_us = 820 816 (in_out_vrr->min_duration_in_us + 821 817 in_out_vrr->max_duration_in_us) / 2;
+16 -4
drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
··· 67 67 break; 68 68 case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER: 69 69 if (input->bcaps_read != PASS || 70 - input->r0p_read != PASS || 71 - input->rx_validation != PASS || 72 - (!conn->is_repeater && input->encryption != PASS)) { 70 + input->r0p_read != PASS) { 71 + fail_and_restart_in_ms(0, &status, output); 72 + break; 73 + } else if (input->rx_validation != PASS) { 73 74 /* 1A-06: consider invalid r0' a failure */ 74 75 /* 1A-08: consider bksv listed in SRM a failure */ 76 + /* 77 + * some slow RX will fail rx validation when it is 78 + * not ready. give it more time to react before retry. 79 + */ 80 + fail_and_restart_in_ms(1000, &status, output); 81 + break; 82 + } else if (!conn->is_repeater && input->encryption != PASS) { 75 83 fail_and_restart_in_ms(0, &status, output); 76 84 break; 77 85 } ··· 220 212 * after 3 attempts. 221 213 * 1A-08: consider bksv listed in SRM a failure 222 214 */ 223 - fail_and_restart_in_ms(0, &status, output); 215 + /* 216 + * some slow RX will fail rx validation when it is 217 + * not ready. give it more time to react before retry. 218 + */ 219 + fail_and_restart_in_ms(1000, &status, output); 224 220 } 225 221 break; 226 222 } else if ((!conn->is_repeater && input->encryption != PASS) ||
+11 -6
drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
··· 114 114 if (event_ctx->event == 115 115 MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { 116 116 /* 1A-11-3: consider h' timeout a failure */ 117 - fail_and_restart_in_ms(0, &status, output); 117 + fail_and_restart_in_ms(1000, &status, output); 118 118 } else { 119 119 /* continue h' polling */ 120 120 callback_in_ms(100, output); ··· 166 166 if (event_ctx->event == 167 167 MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { 168 168 /* 1A-11-2: consider h' timeout a failure */ 169 - fail_and_restart_in_ms(0, &status, output); 169 + fail_and_restart_in_ms(1000, &status, output); 170 170 } else { 171 171 /* continue h' polling */ 172 172 callback_in_ms(20, output); ··· 439 439 if (event_ctx->event == 440 440 MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) 441 441 /* 1A-10-3: consider h' timeout a failure */ 442 - fail_and_restart_in_ms(0, &status, output); 442 + fail_and_restart_in_ms(1000, &status, output); 443 443 else 444 444 increment_stay_counter(hdcp); 445 445 break; ··· 484 484 if (event_ctx->event == 485 485 MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) 486 486 /* 1A-10-2: consider h' timeout a failure */ 487 - fail_and_restart_in_ms(0, &status, output); 487 + fail_and_restart_in_ms(1000, &status, output); 488 488 else 489 489 increment_stay_counter(hdcp); 490 490 break; ··· 630 630 break; 631 631 } else if (input->prepare_stream_manage != PASS || 632 632 input->stream_manage_write != PASS) { 633 - fail_and_restart_in_ms(0, &status, output); 633 + if (event_ctx->event == MOD_HDCP_EVENT_CALLBACK) 634 + fail_and_restart_in_ms(0, &status, output); 635 + else 636 + increment_stay_counter(hdcp); 634 637 break; 635 638 } 636 639 callback_in_ms(100, output); ··· 658 655 */ 659 656 if (hdcp->auth.count.stream_management_retry_count > 10) { 660 657 fail_and_restart_in_ms(0, &status, output); 661 - } else { 658 + } else if (event_ctx->event == MOD_HDCP_EVENT_CALLBACK) { 662 659 hdcp->auth.count.stream_management_retry_count++; 663 660 callback_in_ms(0, output); 664 661 set_state_id(hdcp, output, D2_A9_SEND_STREAM_MANAGEMENT); 662 + } else { 663 + increment_stay_counter(hdcp); 665 664 } 666 665 break; 667 666 }
+4 -3
drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
··· 145 145 146 146 psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); 147 147 148 + hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle; 149 + 148 150 if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) 149 151 return MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE; 150 152 151 - hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle; 152 153 hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary; 153 154 memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary, 154 155 sizeof(hdcp->auth.msg.hdcp1.aksv)); ··· 511 510 psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); 512 511 513 512 if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) 514 - return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE; 513 + return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE; 515 514 516 515 if (msg_out->process.msg1_status != TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) 517 516 return MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE; ··· 795 794 hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2; 796 795 psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); 797 796 798 - return (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) && 797 + return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) && 799 798 (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__SUCCESS) 800 799 ? MOD_HDCP_STATUS_SUCCESS 801 800 : MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE;
+1
drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h
··· 92 92 uint32_t inserted_duration_in_us; 93 93 uint32_t frames_to_insert; 94 94 uint32_t frame_counter; 95 + uint32_t margin_in_us; 95 96 }; 96 97 97 98 struct mod_vrr_params_fixed_refresh {
drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_2_1_0_offset.h drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_offset.h
drivers/gpu/drm/amd/include/asic_reg/dcn/dpcs_2_1_0_sh_mask.h drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_1_0_sh_mask.h
+16
drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
··· 38 38 #define smnPerfMonCtlHi2 0x01d464UL 39 39 #define smnPerfMonCtlLo3 0x01d470UL 40 40 #define smnPerfMonCtlHi3 0x01d474UL 41 + #define smnPerfMonCtlLo4 0x01d880UL 42 + #define smnPerfMonCtlHi4 0x01d884UL 43 + #define smnPerfMonCtlLo5 0x01d888UL 44 + #define smnPerfMonCtlHi5 0x01d88cUL 45 + #define smnPerfMonCtlLo6 0x01d890UL 46 + #define smnPerfMonCtlHi6 0x01d894UL 47 + #define smnPerfMonCtlLo7 0x01d898UL 48 + #define smnPerfMonCtlHi7 0x01d89cUL 41 49 42 50 #define smnPerfMonCtrLo0 0x01d448UL 43 51 #define smnPerfMonCtrHi0 0x01d44cUL ··· 55 47 #define smnPerfMonCtrHi2 0x01d46cUL 56 48 #define smnPerfMonCtrLo3 0x01d478UL 57 49 #define smnPerfMonCtrHi3 0x01d47cUL 50 + #define smnPerfMonCtrLo4 0x01d790UL 51 + #define smnPerfMonCtrHi4 0x01d794UL 52 + #define smnPerfMonCtrLo5 0x01d798UL 53 + #define smnPerfMonCtrHi5 0x01d79cUL 54 + #define smnPerfMonCtrLo6 0x01d7a0UL 55 + #define smnPerfMonCtrHi6 0x01d7a4UL 56 + #define smnPerfMonCtrLo7 0x01d7a8UL 57 + #define smnPerfMonCtrHi7 0x01d7acUL 58 58 59 59 #define smnDF_PIE_AON_FabricIndirectConfigAccessAddress3 0x1d05cUL 60 60 #define smnDF_PIE_AON_FabricIndirectConfigAccessDataLo3 0x1d098UL
+647
drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_offset.h
··· 1 + /* 2 + * Copyright (C) 2019 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included 12 + * in all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 15 + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN 18 + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 19 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 20 + */ 21 + #ifndef _dpcs_2_0_0_OFFSET_HEADER 22 + #define _dpcs_2_0_0_OFFSET_HEADER 23 + 24 + 25 + 26 + // addressBlock: dpcssys_dpcs0_dpcstx0_dispdec 27 + // base address: 0x0 28 + #define mmDPCSTX0_DPCSTX_TX_CLOCK_CNTL 0x2928 29 + #define mmDPCSTX0_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2 30 + #define mmDPCSTX0_DPCSTX_TX_CNTL 0x2929 31 + #define mmDPCSTX0_DPCSTX_TX_CNTL_BASE_IDX 2 32 + #define mmDPCSTX0_DPCSTX_CBUS_CNTL 0x292a 33 + #define mmDPCSTX0_DPCSTX_CBUS_CNTL_BASE_IDX 2 34 + #define mmDPCSTX0_DPCSTX_INTERRUPT_CNTL 0x292b 35 + #define mmDPCSTX0_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2 36 + #define mmDPCSTX0_DPCSTX_PLL_UPDATE_ADDR 0x292c 37 + #define mmDPCSTX0_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2 38 + #define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA 0x292d 39 + #define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 40 + #define mmDPCSTX0_DPCSTX_DEBUG_CONFIG 0x292e 41 + #define mmDPCSTX0_DPCSTX_DEBUG_CONFIG_BASE_IDX 2 42 + 43 + 44 + // addressBlock: dpcssys_dpcs0_rdpcstx0_dispdec 45 + // base address: 0x0 46 + #define mmRDPCSTX0_RDPCSTX_CNTL 0x2930 47 + #define mmRDPCSTX0_RDPCSTX_CNTL_BASE_IDX 2 48 + #define mmRDPCSTX0_RDPCSTX_CLOCK_CNTL 0x2931 49 + #define mmRDPCSTX0_RDPCSTX_CLOCK_CNTL_BASE_IDX 2 50 + #define mmRDPCSTX0_RDPCSTX_INTERRUPT_CONTROL 0x2932 51 + #define mmRDPCSTX0_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2 52 + #define mmRDPCSTX0_RDPCSTX_PLL_UPDATE_DATA 0x2933 53 + #define mmRDPCSTX0_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 54 + #define mmRDPCSTX0_RDPCS_TX_CR_ADDR 0x2934 55 + #define mmRDPCSTX0_RDPCS_TX_CR_ADDR_BASE_IDX 2 56 + #define mmRDPCSTX0_RDPCS_TX_CR_DATA 0x2935 57 + #define mmRDPCSTX0_RDPCS_TX_CR_DATA_BASE_IDX 2 58 + #define mmRDPCSTX0_RDPCS_TX_SRAM_CNTL 0x2936 59 + #define mmRDPCSTX0_RDPCS_TX_SRAM_CNTL_BASE_IDX 2 60 + #define mmRDPCSTX0_RDPCSTX_MEM_POWER_CTRL 0x2937 61 + #define mmRDPCSTX0_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2 62 + #define mmRDPCSTX0_RDPCSTX_MEM_POWER_CTRL2 0x2938 63 + #define mmRDPCSTX0_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2 64 + #define mmRDPCSTX0_RDPCSTX_SCRATCH 0x2939 65 + #define mmRDPCSTX0_RDPCSTX_SCRATCH_BASE_IDX 2 66 + #define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x293c 67 + #define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 68 + #define mmRDPCSTX0_RDPCSTX_DEBUG_CONFIG 0x293d 69 + #define mmRDPCSTX0_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 70 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL0 0x2940 71 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL0_BASE_IDX 2 72 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL1 0x2941 73 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL1_BASE_IDX 2 74 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL2 0x2942 75 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL2_BASE_IDX 2 76 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL3 0x2943 77 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL3_BASE_IDX 2 78 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL4 0x2944 79 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL4_BASE_IDX 2 80 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL5 0x2945 81 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL5_BASE_IDX 2 82 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL6 0x2946 83 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL6_BASE_IDX 2 84 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL7 0x2947 85 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL7_BASE_IDX 2 86 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL8 0x2948 87 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL8_BASE_IDX 2 88 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL9 0x2949 89 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL9_BASE_IDX 2 90 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL10 0x294a 91 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL10_BASE_IDX 2 92 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL11 0x294b 93 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL11_BASE_IDX 2 94 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL12 0x294c 95 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL12_BASE_IDX 2 96 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL13 0x294d 97 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL13_BASE_IDX 2 98 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL14 0x294e 99 + #define mmRDPCSTX0_RDPCSTX_PHY_CNTL14_BASE_IDX 2 100 + #define mmRDPCSTX0_RDPCSTX_PHY_FUSE0 0x294f 101 + #define mmRDPCSTX0_RDPCSTX_PHY_FUSE0_BASE_IDX 2 102 + #define mmRDPCSTX0_RDPCSTX_PHY_FUSE1 0x2950 103 + #define mmRDPCSTX0_RDPCSTX_PHY_FUSE1_BASE_IDX 2 104 + #define mmRDPCSTX0_RDPCSTX_PHY_FUSE2 0x2951 105 + #define mmRDPCSTX0_RDPCSTX_PHY_FUSE2_BASE_IDX 2 106 + #define mmRDPCSTX0_RDPCSTX_PHY_FUSE3 0x2952 107 + #define mmRDPCSTX0_RDPCSTX_PHY_FUSE3_BASE_IDX 2 108 + #define mmRDPCSTX0_RDPCSTX_PHY_RX_LD_VAL 0x2953 109 + #define mmRDPCSTX0_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2 110 + #define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2954 111 + #define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2 112 + #define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2955 113 + #define mmRDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2 114 + #define mmRDPCSTX0_RDPCSTX_DPALT_CONTROL_REG 0x2956 115 + #define mmRDPCSTX0_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2 116 + 117 + 118 + // addressBlock: dpcssys_dpcssys_cr0_dispdec 119 + // base address: 0x0 120 + #define mmDPCSSYS_CR0_DPCSSYS_CR_ADDR 0x2934 121 + #define mmDPCSSYS_CR0_DPCSSYS_CR_ADDR_BASE_IDX 2 122 + #define mmDPCSSYS_CR0_DPCSSYS_CR_DATA 0x2935 123 + #define mmDPCSSYS_CR0_DPCSSYS_CR_DATA_BASE_IDX 2 124 + 125 + 126 + // addressBlock: dpcssys_dpcs0_dpcstx1_dispdec 127 + // base address: 0x360 128 + #define mmDPCSTX1_DPCSTX_TX_CLOCK_CNTL 0x2a00 129 + #define mmDPCSTX1_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2 130 + #define mmDPCSTX1_DPCSTX_TX_CNTL 0x2a01 131 + #define mmDPCSTX1_DPCSTX_TX_CNTL_BASE_IDX 2 132 + #define mmDPCSTX1_DPCSTX_CBUS_CNTL 0x2a02 133 + #define mmDPCSTX1_DPCSTX_CBUS_CNTL_BASE_IDX 2 134 + #define mmDPCSTX1_DPCSTX_INTERRUPT_CNTL 0x2a03 135 + #define mmDPCSTX1_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2 136 + #define mmDPCSTX1_DPCSTX_PLL_UPDATE_ADDR 0x2a04 137 + #define mmDPCSTX1_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2 138 + #define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA 0x2a05 139 + #define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 140 + #define mmDPCSTX1_DPCSTX_DEBUG_CONFIG 0x2a06 141 + #define mmDPCSTX1_DPCSTX_DEBUG_CONFIG_BASE_IDX 2 142 + 143 + 144 + // addressBlock: dpcssys_dpcs0_rdpcstx1_dispdec 145 + // base address: 0x360 146 + #define mmRDPCSTX1_RDPCSTX_CNTL 0x2a08 147 + #define mmRDPCSTX1_RDPCSTX_CNTL_BASE_IDX 2 148 + #define mmRDPCSTX1_RDPCSTX_CLOCK_CNTL 0x2a09 149 + #define mmRDPCSTX1_RDPCSTX_CLOCK_CNTL_BASE_IDX 2 150 + #define mmRDPCSTX1_RDPCSTX_INTERRUPT_CONTROL 0x2a0a 151 + #define mmRDPCSTX1_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2 152 + #define mmRDPCSTX1_RDPCSTX_PLL_UPDATE_DATA 0x2a0b 153 + #define mmRDPCSTX1_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 154 + #define mmRDPCSTX1_RDPCS_TX_CR_ADDR 0x2a0c 155 + #define mmRDPCSTX1_RDPCS_TX_CR_ADDR_BASE_IDX 2 156 + #define mmRDPCSTX1_RDPCS_TX_CR_DATA 0x2a0d 157 + #define mmRDPCSTX1_RDPCS_TX_CR_DATA_BASE_IDX 2 158 + #define mmRDPCSTX1_RDPCS_TX_SRAM_CNTL 0x2a0e 159 + #define mmRDPCSTX1_RDPCS_TX_SRAM_CNTL_BASE_IDX 2 160 + #define mmRDPCSTX1_RDPCSTX_MEM_POWER_CTRL 0x2a0f 161 + #define mmRDPCSTX1_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2 162 + #define mmRDPCSTX1_RDPCSTX_MEM_POWER_CTRL2 0x2a10 163 + #define mmRDPCSTX1_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2 164 + #define mmRDPCSTX1_RDPCSTX_SCRATCH 0x2a11 165 + #define mmRDPCSTX1_RDPCSTX_SCRATCH_BASE_IDX 2 166 + #define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2a14 167 + #define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 168 + #define mmRDPCSTX1_RDPCSTX_DEBUG_CONFIG 0x2a15 169 + #define mmRDPCSTX1_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 170 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL0 0x2a18 171 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL0_BASE_IDX 2 172 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL1 0x2a19 173 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL1_BASE_IDX 2 174 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL2 0x2a1a 175 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL2_BASE_IDX 2 176 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL3 0x2a1b 177 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL3_BASE_IDX 2 178 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL4 0x2a1c 179 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL4_BASE_IDX 2 180 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL5 0x2a1d 181 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL5_BASE_IDX 2 182 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL6 0x2a1e 183 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL6_BASE_IDX 2 184 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL7 0x2a1f 185 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL7_BASE_IDX 2 186 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL8 0x2a20 187 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL8_BASE_IDX 2 188 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL9 0x2a21 189 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL9_BASE_IDX 2 190 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL10 0x2a22 191 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL10_BASE_IDX 2 192 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL11 0x2a23 193 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL11_BASE_IDX 2 194 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL12 0x2a24 195 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL12_BASE_IDX 2 196 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL13 0x2a25 197 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL13_BASE_IDX 2 198 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL14 0x2a26 199 + #define mmRDPCSTX1_RDPCSTX_PHY_CNTL14_BASE_IDX 2 200 + #define mmRDPCSTX1_RDPCSTX_PHY_FUSE0 0x2a27 201 + #define mmRDPCSTX1_RDPCSTX_PHY_FUSE0_BASE_IDX 2 202 + #define mmRDPCSTX1_RDPCSTX_PHY_FUSE1 0x2a28 203 + #define mmRDPCSTX1_RDPCSTX_PHY_FUSE1_BASE_IDX 2 204 + #define mmRDPCSTX1_RDPCSTX_PHY_FUSE2 0x2a29 205 + #define mmRDPCSTX1_RDPCSTX_PHY_FUSE2_BASE_IDX 2 206 + #define mmRDPCSTX1_RDPCSTX_PHY_FUSE3 0x2a2a 207 + #define mmRDPCSTX1_RDPCSTX_PHY_FUSE3_BASE_IDX 2 208 + #define mmRDPCSTX1_RDPCSTX_PHY_RX_LD_VAL 0x2a2b 209 + #define mmRDPCSTX1_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2 210 + #define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2a2c 211 + #define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2 212 + #define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2a2d 213 + #define mmRDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2 214 + #define mmRDPCSTX1_RDPCSTX_DPALT_CONTROL_REG 0x2a2e 215 + #define mmRDPCSTX1_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2 216 + 217 + 218 + // addressBlock: dpcssys_dpcssys_cr1_dispdec 219 + // base address: 0x360 220 + #define mmDPCSSYS_CR1_DPCSSYS_CR_ADDR 0x2a0c 221 + #define mmDPCSSYS_CR1_DPCSSYS_CR_ADDR_BASE_IDX 2 222 + #define mmDPCSSYS_CR1_DPCSSYS_CR_DATA 0x2a0d 223 + #define mmDPCSSYS_CR1_DPCSSYS_CR_DATA_BASE_IDX 2 224 + 225 + 226 + // addressBlock: dpcssys_dpcs0_dpcstx2_dispdec 227 + // base address: 0x6c0 228 + #define mmDPCSTX2_DPCSTX_TX_CLOCK_CNTL 0x2ad8 229 + #define mmDPCSTX2_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2 230 + #define mmDPCSTX2_DPCSTX_TX_CNTL 0x2ad9 231 + #define mmDPCSTX2_DPCSTX_TX_CNTL_BASE_IDX 2 232 + #define mmDPCSTX2_DPCSTX_CBUS_CNTL 0x2ada 233 + #define mmDPCSTX2_DPCSTX_CBUS_CNTL_BASE_IDX 2 234 + #define mmDPCSTX2_DPCSTX_INTERRUPT_CNTL 0x2adb 235 + #define mmDPCSTX2_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2 236 + #define mmDPCSTX2_DPCSTX_PLL_UPDATE_ADDR 0x2adc 237 + #define mmDPCSTX2_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2 238 + #define mmDPCSTX2_DPCSTX_PLL_UPDATE_DATA 0x2add 239 + #define mmDPCSTX2_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 240 + #define mmDPCSTX2_DPCSTX_DEBUG_CONFIG 0x2ade 241 + #define mmDPCSTX2_DPCSTX_DEBUG_CONFIG_BASE_IDX 2 242 + 243 + 244 + // addressBlock: dpcssys_dpcs0_rdpcstx2_dispdec 245 + // base address: 0x6c0 246 + #define mmRDPCSTX2_RDPCSTX_CNTL 0x2ae0 247 + #define mmRDPCSTX2_RDPCSTX_CNTL_BASE_IDX 2 248 + #define mmRDPCSTX2_RDPCSTX_CLOCK_CNTL 0x2ae1 249 + #define mmRDPCSTX2_RDPCSTX_CLOCK_CNTL_BASE_IDX 2 250 + #define mmRDPCSTX2_RDPCSTX_INTERRUPT_CONTROL 0x2ae2 251 + #define mmRDPCSTX2_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2 252 + #define mmRDPCSTX2_RDPCSTX_PLL_UPDATE_DATA 0x2ae3 253 + #define mmRDPCSTX2_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 254 + #define mmRDPCSTX2_RDPCS_TX_CR_ADDR 0x2ae4 255 + #define mmRDPCSTX2_RDPCS_TX_CR_ADDR_BASE_IDX 2 256 + #define mmRDPCSTX2_RDPCS_TX_CR_DATA 0x2ae5 257 + #define mmRDPCSTX2_RDPCS_TX_CR_DATA_BASE_IDX 2 258 + #define mmRDPCSTX2_RDPCS_TX_SRAM_CNTL 0x2ae6 259 + #define mmRDPCSTX2_RDPCS_TX_SRAM_CNTL_BASE_IDX 2 260 + #define mmRDPCSTX2_RDPCSTX_MEM_POWER_CTRL 0x2ae7 261 + #define mmRDPCSTX2_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2 262 + #define mmRDPCSTX2_RDPCSTX_MEM_POWER_CTRL2 0x2ae8 263 + #define mmRDPCSTX2_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2 264 + #define mmRDPCSTX2_RDPCSTX_SCRATCH 0x2ae9 265 + #define mmRDPCSTX2_RDPCSTX_SCRATCH_BASE_IDX 2 266 + #define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2aec 267 + #define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 268 + #define mmRDPCSTX2_RDPCSTX_DEBUG_CONFIG 0x2aed 269 + #define mmRDPCSTX2_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 270 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL0 0x2af0 271 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL0_BASE_IDX 2 272 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL1 0x2af1 273 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL1_BASE_IDX 2 274 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL2 0x2af2 275 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL2_BASE_IDX 2 276 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL3 0x2af3 277 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL3_BASE_IDX 2 278 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL4 0x2af4 279 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL4_BASE_IDX 2 280 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL5 0x2af5 281 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL5_BASE_IDX 2 282 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL6 0x2af6 283 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL6_BASE_IDX 2 284 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL7 0x2af7 285 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL7_BASE_IDX 2 286 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL8 0x2af8 287 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL8_BASE_IDX 2 288 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL9 0x2af9 289 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL9_BASE_IDX 2 290 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL10 0x2afa 291 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL10_BASE_IDX 2 292 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL11 0x2afb 293 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL11_BASE_IDX 2 294 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL12 0x2afc 295 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL12_BASE_IDX 2 296 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL13 0x2afd 297 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL13_BASE_IDX 2 298 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL14 0x2afe 299 + #define mmRDPCSTX2_RDPCSTX_PHY_CNTL14_BASE_IDX 2 300 + #define mmRDPCSTX2_RDPCSTX_PHY_FUSE0 0x2aff 301 + #define mmRDPCSTX2_RDPCSTX_PHY_FUSE0_BASE_IDX 2 302 + #define mmRDPCSTX2_RDPCSTX_PHY_FUSE1 0x2b00 303 + #define mmRDPCSTX2_RDPCSTX_PHY_FUSE1_BASE_IDX 2 304 + #define mmRDPCSTX2_RDPCSTX_PHY_FUSE2 0x2b01 305 + #define mmRDPCSTX2_RDPCSTX_PHY_FUSE2_BASE_IDX 2 306 + #define mmRDPCSTX2_RDPCSTX_PHY_FUSE3 0x2b02 307 + #define mmRDPCSTX2_RDPCSTX_PHY_FUSE3_BASE_IDX 2 308 + #define mmRDPCSTX2_RDPCSTX_PHY_RX_LD_VAL 0x2b03 309 + #define mmRDPCSTX2_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2 310 + #define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2b04 311 + #define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2 312 + #define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2b05 313 + #define mmRDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2 314 + #define mmRDPCSTX2_RDPCSTX_DPALT_CONTROL_REG 0x2b06 315 + #define mmRDPCSTX2_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2 316 + 317 + 318 + // addressBlock: dpcssys_dpcssys_cr2_dispdec 319 + // base address: 0x6c0 320 + #define mmDPCSSYS_CR2_DPCSSYS_CR_ADDR 0x2ae4 321 + #define mmDPCSSYS_CR2_DPCSSYS_CR_ADDR_BASE_IDX 2 322 + #define mmDPCSSYS_CR2_DPCSSYS_CR_DATA 0x2ae5 323 + #define mmDPCSSYS_CR2_DPCSSYS_CR_DATA_BASE_IDX 2 324 + 325 + 326 + // addressBlock: dpcssys_dpcs0_dpcstx3_dispdec 327 + // base address: 0xa20 328 + #define mmDPCSTX3_DPCSTX_TX_CLOCK_CNTL 0x2bb0 329 + #define mmDPCSTX3_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2 330 + #define mmDPCSTX3_DPCSTX_TX_CNTL 0x2bb1 331 + #define mmDPCSTX3_DPCSTX_TX_CNTL_BASE_IDX 2 332 + #define mmDPCSTX3_DPCSTX_CBUS_CNTL 0x2bb2 333 + #define mmDPCSTX3_DPCSTX_CBUS_CNTL_BASE_IDX 2 334 + #define mmDPCSTX3_DPCSTX_INTERRUPT_CNTL 0x2bb3 335 + #define mmDPCSTX3_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2 336 + #define mmDPCSTX3_DPCSTX_PLL_UPDATE_ADDR 0x2bb4 337 + #define mmDPCSTX3_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2 338 + #define mmDPCSTX3_DPCSTX_PLL_UPDATE_DATA 0x2bb5 339 + #define mmDPCSTX3_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 340 + #define mmDPCSTX3_DPCSTX_DEBUG_CONFIG 0x2bb6 341 + #define mmDPCSTX3_DPCSTX_DEBUG_CONFIG_BASE_IDX 2 342 + 343 + 344 + // addressBlock: dpcssys_dpcs0_rdpcstx3_dispdec 345 + // base address: 0xa20 346 + #define mmRDPCSTX3_RDPCSTX_CNTL 0x2bb8 347 + #define mmRDPCSTX3_RDPCSTX_CNTL_BASE_IDX 2 348 + #define mmRDPCSTX3_RDPCSTX_CLOCK_CNTL 0x2bb9 349 + #define mmRDPCSTX3_RDPCSTX_CLOCK_CNTL_BASE_IDX 2 350 + #define mmRDPCSTX3_RDPCSTX_INTERRUPT_CONTROL 0x2bba 351 + #define mmRDPCSTX3_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2 352 + #define mmRDPCSTX3_RDPCSTX_PLL_UPDATE_DATA 0x2bbb 353 + #define mmRDPCSTX3_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 354 + #define mmRDPCSTX3_RDPCS_TX_CR_ADDR 0x2bbc 355 + #define mmRDPCSTX3_RDPCS_TX_CR_ADDR_BASE_IDX 2 356 + #define mmRDPCSTX3_RDPCS_TX_CR_DATA 0x2bbd 357 + #define mmRDPCSTX3_RDPCS_TX_CR_DATA_BASE_IDX 2 358 + #define mmRDPCSTX3_RDPCS_TX_SRAM_CNTL 0x2bbe 359 + #define mmRDPCSTX3_RDPCS_TX_SRAM_CNTL_BASE_IDX 2 360 + #define mmRDPCSTX3_RDPCSTX_MEM_POWER_CTRL 0x2bbf 361 + #define mmRDPCSTX3_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2 362 + #define mmRDPCSTX3_RDPCSTX_MEM_POWER_CTRL2 0x2bc0 363 + #define mmRDPCSTX3_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2 364 + #define mmRDPCSTX3_RDPCSTX_SCRATCH 0x2bc1 365 + #define mmRDPCSTX3_RDPCSTX_SCRATCH_BASE_IDX 2 366 + #define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2bc4 367 + #define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 368 + #define mmRDPCSTX3_RDPCSTX_DEBUG_CONFIG 0x2bc5 369 + #define mmRDPCSTX3_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 370 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL0 0x2bc8 371 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL0_BASE_IDX 2 372 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL1 0x2bc9 373 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL1_BASE_IDX 2 374 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL2 0x2bca 375 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL2_BASE_IDX 2 376 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL3 0x2bcb 377 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL3_BASE_IDX 2 378 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL4 0x2bcc 379 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL4_BASE_IDX 2 380 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL5 0x2bcd 381 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL5_BASE_IDX 2 382 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL6 0x2bce 383 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL6_BASE_IDX 2 384 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL7 0x2bcf 385 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL7_BASE_IDX 2 386 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL8 0x2bd0 387 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL8_BASE_IDX 2 388 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL9 0x2bd1 389 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL9_BASE_IDX 2 390 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL10 0x2bd2 391 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL10_BASE_IDX 2 392 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL11 0x2bd3 393 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL11_BASE_IDX 2 394 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL12 0x2bd4 395 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL12_BASE_IDX 2 396 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL13 0x2bd5 397 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL13_BASE_IDX 2 398 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL14 0x2bd6 399 + #define mmRDPCSTX3_RDPCSTX_PHY_CNTL14_BASE_IDX 2 400 + #define mmRDPCSTX3_RDPCSTX_PHY_FUSE0 0x2bd7 401 + #define mmRDPCSTX3_RDPCSTX_PHY_FUSE0_BASE_IDX 2 402 + #define mmRDPCSTX3_RDPCSTX_PHY_FUSE1 0x2bd8 403 + #define mmRDPCSTX3_RDPCSTX_PHY_FUSE1_BASE_IDX 2 404 + #define mmRDPCSTX3_RDPCSTX_PHY_FUSE2 0x2bd9 405 + #define mmRDPCSTX3_RDPCSTX_PHY_FUSE2_BASE_IDX 2 406 + #define mmRDPCSTX3_RDPCSTX_PHY_FUSE3 0x2bda 407 + #define mmRDPCSTX3_RDPCSTX_PHY_FUSE3_BASE_IDX 2 408 + #define mmRDPCSTX3_RDPCSTX_PHY_RX_LD_VAL 0x2bdb 409 + #define mmRDPCSTX3_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2 410 + #define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2bdc 411 + #define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2 412 + #define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2bdd 413 + #define mmRDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2 414 + #define mmRDPCSTX3_RDPCSTX_DPALT_CONTROL_REG 0x2bde 415 + #define mmRDPCSTX3_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2 416 + 417 + 418 + // addressBlock: dpcssys_dpcssys_cr3_dispdec 419 + // base address: 0xa20 420 + #define mmDPCSSYS_CR3_DPCSSYS_CR_ADDR 0x2bbc 421 + #define mmDPCSSYS_CR3_DPCSSYS_CR_ADDR_BASE_IDX 2 422 + #define mmDPCSSYS_CR3_DPCSSYS_CR_DATA 0x2bbd 423 + #define mmDPCSSYS_CR3_DPCSSYS_CR_DATA_BASE_IDX 2 424 + 425 + 426 + // addressBlock: dpcssys_dpcs0_dpcsrx_dispdec 427 + // base address: 0x0 428 + #define mmDPCSRX_PHY_CNTL 0x2c76 429 + #define mmDPCSRX_PHY_CNTL_BASE_IDX 2 430 + #define mmDPCSRX_RX_CLOCK_CNTL 0x2c78 431 + #define mmDPCSRX_RX_CLOCK_CNTL_BASE_IDX 2 432 + #define mmDPCSRX_RX_CNTL 0x2c7a 433 + #define mmDPCSRX_RX_CNTL_BASE_IDX 2 434 + #define mmDPCSRX_CBUS_CNTL 0x2c7b 435 + #define mmDPCSRX_CBUS_CNTL_BASE_IDX 2 436 + #define mmDPCSRX_REG_ERROR_STATUS 0x2c7c 437 + #define mmDPCSRX_REG_ERROR_STATUS_BASE_IDX 2 438 + #define mmDPCSRX_RX_ERROR_STATUS 0x2c7d 439 + #define mmDPCSRX_RX_ERROR_STATUS_BASE_IDX 2 440 + #define mmDPCSRX_INDEX_MODE_ADDR 0x2c80 441 + #define mmDPCSRX_INDEX_MODE_ADDR_BASE_IDX 2 442 + #define mmDPCSRX_INDEX_MODE_DATA 0x2c81 443 + #define mmDPCSRX_INDEX_MODE_DATA_BASE_IDX 2 444 + #define mmDPCSRX_DEBUG_CONFIG 0x2c82 445 + #define mmDPCSRX_DEBUG_CONFIG_BASE_IDX 2 446 + 447 + 448 + // addressBlock: dpcssys_dpcs0_dpcstx4_dispdec 449 + // base address: 0xd80 450 + #define mmDPCSTX4_DPCSTX_TX_CLOCK_CNTL 0x2c88 451 + #define mmDPCSTX4_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2 452 + #define mmDPCSTX4_DPCSTX_TX_CNTL 0x2c89 453 + #define mmDPCSTX4_DPCSTX_TX_CNTL_BASE_IDX 2 454 + #define mmDPCSTX4_DPCSTX_CBUS_CNTL 0x2c8a 455 + #define mmDPCSTX4_DPCSTX_CBUS_CNTL_BASE_IDX 2 456 + #define mmDPCSTX4_DPCSTX_INTERRUPT_CNTL 0x2c8b 457 + #define mmDPCSTX4_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2 458 + #define mmDPCSTX4_DPCSTX_PLL_UPDATE_ADDR 0x2c8c 459 + #define mmDPCSTX4_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2 460 + #define mmDPCSTX4_DPCSTX_PLL_UPDATE_DATA 0x2c8d 461 + #define mmDPCSTX4_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 462 + #define mmDPCSTX4_DPCSTX_DEBUG_CONFIG 0x2c8e 463 + #define mmDPCSTX4_DPCSTX_DEBUG_CONFIG_BASE_IDX 2 464 + 465 + 466 + // addressBlock: dpcssys_dpcs0_rdpcstx4_dispdec 467 + // base address: 0xd80 468 + #define mmRDPCSTX4_RDPCSTX_CNTL 0x2c90 469 + #define mmRDPCSTX4_RDPCSTX_CNTL_BASE_IDX 2 470 + #define mmRDPCSTX4_RDPCSTX_CLOCK_CNTL 0x2c91 471 + #define mmRDPCSTX4_RDPCSTX_CLOCK_CNTL_BASE_IDX 2 472 + #define mmRDPCSTX4_RDPCSTX_INTERRUPT_CONTROL 0x2c92 473 + #define mmRDPCSTX4_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2 474 + #define mmRDPCSTX4_RDPCSTX_PLL_UPDATE_DATA 0x2c93 475 + #define mmRDPCSTX4_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 476 + #define mmRDPCSTX4_RDPCS_TX_CR_ADDR 0x2c94 477 + #define mmRDPCSTX4_RDPCS_TX_CR_ADDR_BASE_IDX 2 478 + #define mmRDPCSTX4_RDPCS_TX_CR_DATA 0x2c95 479 + #define mmRDPCSTX4_RDPCS_TX_CR_DATA_BASE_IDX 2 480 + #define mmRDPCSTX4_RDPCS_TX_SRAM_CNTL 0x2c96 481 + #define mmRDPCSTX4_RDPCS_TX_SRAM_CNTL_BASE_IDX 2 482 + #define mmRDPCSTX4_RDPCSTX_MEM_POWER_CTRL 0x2c97 483 + #define mmRDPCSTX4_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2 484 + #define mmRDPCSTX4_RDPCSTX_MEM_POWER_CTRL2 0x2c98 485 + #define mmRDPCSTX4_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2 486 + #define mmRDPCSTX4_RDPCSTX_SCRATCH 0x2c99 487 + #define mmRDPCSTX4_RDPCSTX_SCRATCH_BASE_IDX 2 488 + #define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2c9c 489 + #define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 490 + #define mmRDPCSTX4_RDPCSTX_DEBUG_CONFIG 0x2c9d 491 + #define mmRDPCSTX4_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 492 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL0 0x2ca0 493 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL0_BASE_IDX 2 494 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL1 0x2ca1 495 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL1_BASE_IDX 2 496 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL2 0x2ca2 497 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL2_BASE_IDX 2 498 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL3 0x2ca3 499 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL3_BASE_IDX 2 500 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL4 0x2ca4 501 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL4_BASE_IDX 2 502 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL5 0x2ca5 503 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL5_BASE_IDX 2 504 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL6 0x2ca6 505 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL6_BASE_IDX 2 506 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL7 0x2ca7 507 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL7_BASE_IDX 2 508 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL8 0x2ca8 509 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL8_BASE_IDX 2 510 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL9 0x2ca9 511 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL9_BASE_IDX 2 512 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL10 0x2caa 513 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL10_BASE_IDX 2 514 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL11 0x2cab 515 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL11_BASE_IDX 2 516 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL12 0x2cac 517 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL12_BASE_IDX 2 518 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL13 0x2cad 519 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL13_BASE_IDX 2 520 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL14 0x2cae 521 + #define mmRDPCSTX4_RDPCSTX_PHY_CNTL14_BASE_IDX 2 522 + #define mmRDPCSTX4_RDPCSTX_PHY_FUSE0 0x2caf 523 + #define mmRDPCSTX4_RDPCSTX_PHY_FUSE0_BASE_IDX 2 524 + #define mmRDPCSTX4_RDPCSTX_PHY_FUSE1 0x2cb0 525 + #define mmRDPCSTX4_RDPCSTX_PHY_FUSE1_BASE_IDX 2 526 + #define mmRDPCSTX4_RDPCSTX_PHY_FUSE2 0x2cb1 527 + #define mmRDPCSTX4_RDPCSTX_PHY_FUSE2_BASE_IDX 2 528 + #define mmRDPCSTX4_RDPCSTX_PHY_FUSE3 0x2cb2 529 + #define mmRDPCSTX4_RDPCSTX_PHY_FUSE3_BASE_IDX 2 530 + #define mmRDPCSTX4_RDPCSTX_PHY_RX_LD_VAL 0x2cb3 531 + #define mmRDPCSTX4_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2 532 + #define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2cb4 533 + #define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2 534 + #define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2cb5 535 + #define mmRDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2 536 + #define mmRDPCSTX4_RDPCSTX_DPALT_CONTROL_REG 0x2cb6 537 + #define mmRDPCSTX4_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2 538 + 539 + 540 + // addressBlock: dpcssys_dpcssys_cr4_dispdec 541 + // base address: 0xd80 542 + #define mmDPCSSYS_CR4_DPCSSYS_CR_ADDR 0x2c94 543 + #define mmDPCSSYS_CR4_DPCSSYS_CR_ADDR_BASE_IDX 2 544 + #define mmDPCSSYS_CR4_DPCSSYS_CR_DATA 0x2c95 545 + #define mmDPCSSYS_CR4_DPCSSYS_CR_DATA_BASE_IDX 2 546 + 547 + 548 + // addressBlock: dpcssys_dpcs0_dpcstx5_dispdec 549 + // base address: 0x10e0 550 + #define mmDPCSTX5_DPCSTX_TX_CLOCK_CNTL 0x2d60 551 + #define mmDPCSTX5_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2 552 + #define mmDPCSTX5_DPCSTX_TX_CNTL 0x2d61 553 + #define mmDPCSTX5_DPCSTX_TX_CNTL_BASE_IDX 2 554 + #define mmDPCSTX5_DPCSTX_CBUS_CNTL 0x2d62 555 + #define mmDPCSTX5_DPCSTX_CBUS_CNTL_BASE_IDX 2 556 + #define mmDPCSTX5_DPCSTX_INTERRUPT_CNTL 0x2d63 557 + #define mmDPCSTX5_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2 558 + #define mmDPCSTX5_DPCSTX_PLL_UPDATE_ADDR 0x2d64 559 + #define mmDPCSTX5_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2 560 + #define mmDPCSTX5_DPCSTX_PLL_UPDATE_DATA 0x2d65 561 + #define mmDPCSTX5_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 562 + #define mmDPCSTX5_DPCSTX_DEBUG_CONFIG 0x2d66 563 + #define mmDPCSTX5_DPCSTX_DEBUG_CONFIG_BASE_IDX 2 564 + 565 + 566 + // addressBlock: dpcssys_dpcs0_rdpcstx5_dispdec 567 + // base address: 0x10e0 568 + #define mmRDPCSTX5_RDPCSTX_CNTL 0x2d68 569 + #define mmRDPCSTX5_RDPCSTX_CNTL_BASE_IDX 2 570 + #define mmRDPCSTX5_RDPCSTX_CLOCK_CNTL 0x2d69 571 + #define mmRDPCSTX5_RDPCSTX_CLOCK_CNTL_BASE_IDX 2 572 + #define mmRDPCSTX5_RDPCSTX_INTERRUPT_CONTROL 0x2d6a 573 + #define mmRDPCSTX5_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2 574 + #define mmRDPCSTX5_RDPCSTX_PLL_UPDATE_DATA 0x2d6b 575 + #define mmRDPCSTX5_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2 576 + #define mmRDPCSTX5_RDPCS_TX_CR_ADDR 0x2d6c 577 + #define mmRDPCSTX5_RDPCS_TX_CR_ADDR_BASE_IDX 2 578 + #define mmRDPCSTX5_RDPCS_TX_CR_DATA 0x2d6d 579 + #define mmRDPCSTX5_RDPCS_TX_CR_DATA_BASE_IDX 2 580 + #define mmRDPCSTX5_RDPCS_TX_SRAM_CNTL 0x2d6e 581 + #define mmRDPCSTX5_RDPCS_TX_SRAM_CNTL_BASE_IDX 2 582 + #define mmRDPCSTX5_RDPCSTX_MEM_POWER_CTRL 0x2d6f 583 + #define mmRDPCSTX5_RDPCSTX_MEM_POWER_CTRL_BASE_IDX 2 584 + #define mmRDPCSTX5_RDPCSTX_MEM_POWER_CTRL2 0x2d70 585 + #define mmRDPCSTX5_RDPCSTX_MEM_POWER_CTRL2_BASE_IDX 2 586 + #define mmRDPCSTX5_RDPCSTX_SCRATCH 0x2d71 587 + #define mmRDPCSTX5_RDPCSTX_SCRATCH_BASE_IDX 2 588 + #define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 0x2d74 589 + #define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG_BASE_IDX 2 590 + #define mmRDPCSTX5_RDPCSTX_DEBUG_CONFIG 0x2d75 591 + #define mmRDPCSTX5_RDPCSTX_DEBUG_CONFIG_BASE_IDX 2 592 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL0 0x2d78 593 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL0_BASE_IDX 2 594 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL1 0x2d79 595 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL1_BASE_IDX 2 596 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL2 0x2d7a 597 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL2_BASE_IDX 2 598 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL3 0x2d7b 599 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL3_BASE_IDX 2 600 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL4 0x2d7c 601 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL4_BASE_IDX 2 602 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL5 0x2d7d 603 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL5_BASE_IDX 2 604 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL6 0x2d7e 605 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL6_BASE_IDX 2 606 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL7 0x2d7f 607 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL7_BASE_IDX 2 608 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL8 0x2d80 609 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL8_BASE_IDX 2 610 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL9 0x2d81 611 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL9_BASE_IDX 2 612 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL10 0x2d82 613 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL10_BASE_IDX 2 614 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL11 0x2d83 615 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL11_BASE_IDX 2 616 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL12 0x2d84 617 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL12_BASE_IDX 2 618 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL13 0x2d85 619 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL13_BASE_IDX 2 620 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL14 0x2d86 621 + #define mmRDPCSTX5_RDPCSTX_PHY_CNTL14_BASE_IDX 2 622 + #define mmRDPCSTX5_RDPCSTX_PHY_FUSE0 0x2d87 623 + #define mmRDPCSTX5_RDPCSTX_PHY_FUSE0_BASE_IDX 2 624 + #define mmRDPCSTX5_RDPCSTX_PHY_FUSE1 0x2d88 625 + #define mmRDPCSTX5_RDPCSTX_PHY_FUSE1_BASE_IDX 2 626 + #define mmRDPCSTX5_RDPCSTX_PHY_FUSE2 0x2d89 627 + #define mmRDPCSTX5_RDPCSTX_PHY_FUSE2_BASE_IDX 2 628 + #define mmRDPCSTX5_RDPCSTX_PHY_FUSE3 0x2d8a 629 + #define mmRDPCSTX5_RDPCSTX_PHY_FUSE3_BASE_IDX 2 630 + #define mmRDPCSTX5_RDPCSTX_PHY_RX_LD_VAL 0x2d8b 631 + #define mmRDPCSTX5_RDPCSTX_PHY_RX_LD_VAL_BASE_IDX 2 632 + #define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3 0x2d8c 633 + #define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3_BASE_IDX 2 634 + #define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6 0x2d8d 635 + #define mmRDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6_BASE_IDX 2 636 + #define mmRDPCSTX5_RDPCSTX_DPALT_CONTROL_REG 0x2d8e 637 + #define mmRDPCSTX5_RDPCSTX_DPALT_CONTROL_REG_BASE_IDX 2 638 + 639 + 640 + // addressBlock: dpcssys_dpcssys_cr5_dispdec 641 + // base address: 0x10e0 642 + #define mmDPCSSYS_CR5_DPCSSYS_CR_ADDR 0x2d6c 643 + #define mmDPCSSYS_CR5_DPCSSYS_CR_ADDR_BASE_IDX 2 644 + #define mmDPCSSYS_CR5_DPCSSYS_CR_DATA 0x2d6d 645 + #define mmDPCSSYS_CR5_DPCSSYS_CR_DATA_BASE_IDX 2 646 + 647 + #endif
+3912
drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_0_sh_mask.h
··· 1 + /* 2 + * Copyright (C) 2019 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included 12 + * in all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 15 + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN 18 + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 19 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 20 + */ 21 + #ifndef _dpcs_2_0_0_SH_MASK_HEADER 22 + #define _dpcs_2_0_0_SH_MASK_HEADER 23 + 24 + 25 + // addressBlock: dpcssys_dpcs0_dpcstx0_dispdec 26 + //DPCSTX0_DPCSTX_TX_CLOCK_CNTL 27 + #define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0 28 + #define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1 29 + #define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2 30 + #define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3 31 + #define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L 32 + #define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L 33 + #define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L 34 + #define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L 35 + //DPCSTX0_DPCSTX_TX_CNTL 36 + #define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc 37 + #define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd 38 + #define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe 39 + #define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf 40 + #define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10 41 + #define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11 42 + #define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 43 + #define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f 44 + #define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L 45 + #define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L 46 + #define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L 47 + #define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L 48 + #define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L 49 + #define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L 50 + #define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L 51 + #define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L 52 + //DPCSTX0_DPCSTX_CBUS_CNTL 53 + #define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0 54 + #define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f 55 + #define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL 56 + #define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L 57 + //DPCSTX0_DPCSTX_INTERRUPT_CNTL 58 + #define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 59 + #define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1 60 + #define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4 61 + #define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8 62 + #define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9 63 + #define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa 64 + #define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb 65 + #define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc 66 + #define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10 67 + #define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14 68 + #define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L 69 + #define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L 70 + #define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L 71 + #define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L 72 + #define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L 73 + #define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L 74 + #define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L 75 + #define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L 76 + #define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L 77 + #define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L 78 + //DPCSTX0_DPCSTX_PLL_UPDATE_ADDR 79 + #define DPCSTX0_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0 80 + #define DPCSTX0_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL 81 + //DPCSTX0_DPCSTX_PLL_UPDATE_DATA 82 + #define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0 83 + #define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL 84 + //DPCSTX0_DPCSTX_DEBUG_CONFIG 85 + #define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0 86 + #define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1 87 + #define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4 88 + #define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8 89 + #define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe 90 + #define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10 91 + #define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L 92 + #define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL 93 + #define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L 94 + #define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L 95 + #define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L 96 + #define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L 97 + 98 + 99 + // addressBlock: dpcssys_dpcs0_rdpcstx0_dispdec 100 + //RDPCSTX0_RDPCSTX_CNTL 101 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0 102 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4 103 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc 104 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd 105 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe 106 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf 107 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10 108 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11 109 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 110 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18 111 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19 112 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a 113 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f 114 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L 115 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L 116 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L 117 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L 118 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L 119 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L 120 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L 121 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L 122 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L 123 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L 124 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L 125 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L 126 + #define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L 127 + //RDPCSTX0_RDPCSTX_CLOCK_CNTL 128 + #define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0 129 + #define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4 130 + #define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5 131 + #define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6 132 + #define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7 133 + #define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8 134 + #define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9 135 + #define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa 136 + #define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc 137 + #define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd 138 + #define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe 139 + #define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10 140 + #define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L 141 + #define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L 142 + #define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L 143 + #define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L 144 + #define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L 145 + #define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L 146 + #define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L 147 + #define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L 148 + #define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L 149 + #define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L 150 + #define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L 151 + #define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L 152 + //RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL 153 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 154 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1 155 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2 156 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4 157 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5 158 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6 159 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7 160 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8 161 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9 162 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa 163 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc 164 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10 165 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11 166 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12 167 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14 168 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L 169 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L 170 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L 171 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L 172 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L 173 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L 174 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L 175 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L 176 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L 177 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L 178 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L 179 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L 180 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L 181 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L 182 + #define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L 183 + //RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA 184 + #define RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0 185 + #define RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L 186 + //RDPCSTX0_RDPCS_TX_CR_ADDR 187 + #define RDPCSTX0_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 188 + #define RDPCSTX0_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL 189 + //RDPCSTX0_RDPCS_TX_CR_DATA 190 + #define RDPCSTX0_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 191 + #define RDPCSTX0_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL 192 + //RDPCSTX0_RDPCS_TX_SRAM_CNTL 193 + #define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14 194 + #define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18 195 + #define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c 196 + #define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L 197 + #define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L 198 + #define RDPCSTX0_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L 199 + //RDPCSTX0_RDPCSTX_MEM_POWER_CTRL 200 + #define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0 201 + #define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc 202 + #define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a 203 + #define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b 204 + #define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c 205 + #define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d 206 + #define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e 207 + #define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL 208 + #define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L 209 + #define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L 210 + #define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L 211 + #define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L 212 + #define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L 213 + #define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L 214 + //RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2 215 + #define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0 216 + #define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2 217 + #define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L 218 + #define RDPCSTX0_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L 219 + //RDPCSTX0_RDPCSTX_SCRATCH 220 + #define RDPCSTX0_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0 221 + #define RDPCSTX0_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL 222 + //RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 223 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0 224 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4 225 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8 226 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L 227 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L 228 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L 229 + //RDPCSTX0_RDPCSTX_DEBUG_CONFIG 230 + #define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0 231 + #define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4 232 + #define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7 233 + #define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8 234 + #define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf 235 + #define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10 236 + #define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18 237 + #define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L 238 + #define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L 239 + #define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L 240 + #define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L 241 + #define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L 242 + #define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L 243 + #define RDPCSTX0_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L 244 + //RDPCSTX0_RDPCSTX_PHY_CNTL0 245 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0 246 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1 247 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2 248 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3 249 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4 250 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8 251 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9 252 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe 253 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11 254 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12 255 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14 256 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15 257 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18 258 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19 259 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c 260 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d 261 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f 262 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L 263 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L 264 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L 265 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L 266 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L 267 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L 268 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L 269 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L 270 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L 271 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L 272 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L 273 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L 274 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L 275 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L 276 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L 277 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L 278 + #define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L 279 + //RDPCSTX0_RDPCSTX_PHY_CNTL1 280 + #define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0 281 + #define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1 282 + #define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2 283 + #define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3 284 + #define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4 285 + #define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5 286 + #define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6 287 + #define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7 288 + #define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L 289 + #define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L 290 + #define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L 291 + #define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L 292 + #define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L 293 + #define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L 294 + #define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L 295 + #define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L 296 + //RDPCSTX0_RDPCSTX_PHY_CNTL2 297 + #define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3 298 + #define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4 299 + #define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5 300 + #define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6 301 + #define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7 302 + #define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8 303 + #define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9 304 + #define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa 305 + #define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb 306 + #define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L 307 + #define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L 308 + #define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L 309 + #define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L 310 + #define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L 311 + #define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L 312 + #define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L 313 + #define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L 314 + #define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L 315 + //RDPCSTX0_RDPCSTX_PHY_CNTL3 316 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0 317 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1 318 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2 319 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3 320 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4 321 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5 322 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8 323 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9 324 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa 325 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb 326 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc 327 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd 328 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10 329 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11 330 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12 331 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13 332 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14 333 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15 334 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18 335 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19 336 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a 337 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b 338 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c 339 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d 340 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L 341 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L 342 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L 343 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L 344 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L 345 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L 346 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L 347 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L 348 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L 349 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L 350 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L 351 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L 352 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L 353 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L 354 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L 355 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L 356 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L 357 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L 358 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L 359 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L 360 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L 361 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L 362 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L 363 + #define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L 364 + //RDPCSTX0_RDPCSTX_PHY_CNTL4 365 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0 366 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4 367 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6 368 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7 369 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8 370 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc 371 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe 372 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf 373 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10 374 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14 375 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16 376 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17 377 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18 378 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c 379 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e 380 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f 381 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L 382 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L 383 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L 384 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L 385 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L 386 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L 387 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L 388 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L 389 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L 390 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L 391 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L 392 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L 393 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L 394 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L 395 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L 396 + #define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L 397 + //RDPCSTX0_RDPCSTX_PHY_CNTL5 398 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0 399 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1 400 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4 401 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6 402 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7 403 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8 404 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9 405 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc 406 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe 407 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf 408 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10 409 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11 410 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14 411 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16 412 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17 413 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18 414 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19 415 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c 416 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e 417 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f 418 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L 419 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL 420 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L 421 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L 422 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L 423 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L 424 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L 425 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L 426 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L 427 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L 428 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L 429 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L 430 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L 431 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L 432 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L 433 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L 434 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L 435 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L 436 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L 437 + #define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L 438 + //RDPCSTX0_RDPCSTX_PHY_CNTL6 439 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0 440 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2 441 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4 442 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6 443 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8 444 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa 445 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc 446 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe 447 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10 448 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11 449 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12 450 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13 451 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14 452 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L 453 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L 454 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L 455 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L 456 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L 457 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L 458 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L 459 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L 460 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L 461 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L 462 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L 463 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L 464 + #define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L 465 + //RDPCSTX0_RDPCSTX_PHY_CNTL7 466 + #define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0 467 + #define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10 468 + #define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL 469 + #define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L 470 + //RDPCSTX0_RDPCSTX_PHY_CNTL8 471 + #define RDPCSTX0_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0 472 + #define RDPCSTX0_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL 473 + //RDPCSTX0_RDPCSTX_PHY_CNTL9 474 + #define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0 475 + #define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18 476 + #define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL 477 + #define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L 478 + //RDPCSTX0_RDPCSTX_PHY_CNTL10 479 + #define RDPCSTX0_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0 480 + #define RDPCSTX0_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL 481 + //RDPCSTX0_RDPCSTX_PHY_CNTL11 482 + #define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4 483 + #define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10 484 + #define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14 485 + #define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18 486 + #define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L 487 + #define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L 488 + #define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L 489 + #define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L 490 + //RDPCSTX0_RDPCSTX_PHY_CNTL12 491 + #define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0 492 + #define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2 493 + #define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4 494 + #define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7 495 + #define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8 496 + #define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L 497 + #define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L 498 + #define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L 499 + #define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L 500 + #define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L 501 + //RDPCSTX0_RDPCSTX_PHY_CNTL13 502 + #define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14 503 + #define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c 504 + #define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d 505 + #define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e 506 + #define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L 507 + #define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L 508 + #define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L 509 + #define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L 510 + //RDPCSTX0_RDPCSTX_PHY_CNTL14 511 + #define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0 512 + #define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18 513 + #define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c 514 + #define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L 515 + #define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L 516 + #define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L 517 + //RDPCSTX0_RDPCSTX_PHY_FUSE0 518 + #define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0 519 + #define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6 520 + #define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc 521 + #define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12 522 + #define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14 523 + #define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL 524 + #define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L 525 + #define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L 526 + #define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L 527 + #define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L 528 + //RDPCSTX0_RDPCSTX_PHY_FUSE1 529 + #define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0 530 + #define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6 531 + #define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc 532 + #define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12 533 + #define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19 534 + #define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL 535 + #define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L 536 + #define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L 537 + #define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L 538 + #define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L 539 + //RDPCSTX0_RDPCSTX_PHY_FUSE2 540 + #define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0 541 + #define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6 542 + #define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc 543 + #define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL 544 + #define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L 545 + #define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L 546 + //RDPCSTX0_RDPCSTX_PHY_FUSE3 547 + #define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0 548 + #define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6 549 + #define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc 550 + #define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12 551 + #define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18 552 + #define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL 553 + #define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L 554 + #define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L 555 + #define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L 556 + #define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L 557 + //RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL 558 + #define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0 559 + #define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8 560 + #define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL 561 + #define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L 562 + //RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3 563 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0 564 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1 565 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2 566 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3 567 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4 568 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5 569 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8 570 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9 571 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa 572 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb 573 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc 574 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd 575 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10 576 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11 577 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12 578 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13 579 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14 580 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15 581 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18 582 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19 583 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a 584 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b 585 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c 586 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d 587 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L 588 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L 589 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L 590 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L 591 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L 592 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L 593 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L 594 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L 595 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L 596 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L 597 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L 598 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L 599 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L 600 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L 601 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L 602 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L 603 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L 604 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L 605 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L 606 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L 607 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L 608 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L 609 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L 610 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L 611 + //RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6 612 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0 613 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2 614 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4 615 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6 616 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8 617 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa 618 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc 619 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe 620 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10 621 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11 622 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12 623 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13 624 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14 625 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L 626 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L 627 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L 628 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L 629 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L 630 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L 631 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L 632 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L 633 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L 634 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L 635 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L 636 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L 637 + #define RDPCSTX0_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L 638 + //RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG 639 + #define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0 640 + #define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4 641 + #define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8 642 + #define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L 643 + #define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L 644 + #define RDPCSTX0_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L 645 + 646 + 647 + // addressBlock: dpcssys_dpcssys_cr0_dispdec 648 + //DPCSSYS_CR0_DPCSSYS_CR_ADDR 649 + #define DPCSSYS_CR0_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 650 + #define DPCSSYS_CR0_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL 651 + //DPCSSYS_CR0_DPCSSYS_CR_DATA 652 + #define DPCSSYS_CR0_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 653 + #define DPCSSYS_CR0_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL 654 + 655 + 656 + // addressBlock: dpcssys_dpcs0_dpcstx1_dispdec 657 + //DPCSTX1_DPCSTX_TX_CLOCK_CNTL 658 + #define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0 659 + #define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1 660 + #define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2 661 + #define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3 662 + #define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L 663 + #define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L 664 + #define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L 665 + #define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L 666 + //DPCSTX1_DPCSTX_TX_CNTL 667 + #define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc 668 + #define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd 669 + #define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe 670 + #define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf 671 + #define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10 672 + #define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11 673 + #define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 674 + #define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f 675 + #define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L 676 + #define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L 677 + #define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L 678 + #define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L 679 + #define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L 680 + #define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L 681 + #define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L 682 + #define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L 683 + //DPCSTX1_DPCSTX_CBUS_CNTL 684 + #define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0 685 + #define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f 686 + #define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL 687 + #define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L 688 + //DPCSTX1_DPCSTX_INTERRUPT_CNTL 689 + #define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 690 + #define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1 691 + #define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4 692 + #define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8 693 + #define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9 694 + #define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa 695 + #define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb 696 + #define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc 697 + #define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10 698 + #define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14 699 + #define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L 700 + #define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L 701 + #define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L 702 + #define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L 703 + #define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L 704 + #define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L 705 + #define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L 706 + #define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L 707 + #define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L 708 + #define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L 709 + //DPCSTX1_DPCSTX_PLL_UPDATE_ADDR 710 + #define DPCSTX1_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0 711 + #define DPCSTX1_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL 712 + //DPCSTX1_DPCSTX_PLL_UPDATE_DATA 713 + #define DPCSTX1_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0 714 + #define DPCSTX1_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL 715 + //DPCSTX1_DPCSTX_DEBUG_CONFIG 716 + #define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0 717 + #define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1 718 + #define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4 719 + #define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8 720 + #define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe 721 + #define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10 722 + #define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L 723 + #define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL 724 + #define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L 725 + #define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L 726 + #define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L 727 + #define DPCSTX1_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L 728 + 729 + 730 + // addressBlock: dpcssys_dpcs0_rdpcstx1_dispdec 731 + //RDPCSTX1_RDPCSTX_CNTL 732 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0 733 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4 734 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc 735 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd 736 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe 737 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf 738 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10 739 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11 740 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 741 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18 742 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19 743 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a 744 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f 745 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L 746 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L 747 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L 748 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L 749 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L 750 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L 751 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L 752 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L 753 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L 754 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L 755 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L 756 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L 757 + #define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L 758 + //RDPCSTX1_RDPCSTX_CLOCK_CNTL 759 + #define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0 760 + #define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4 761 + #define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5 762 + #define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6 763 + #define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7 764 + #define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8 765 + #define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9 766 + #define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa 767 + #define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc 768 + #define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd 769 + #define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe 770 + #define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10 771 + #define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L 772 + #define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L 773 + #define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L 774 + #define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L 775 + #define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L 776 + #define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L 777 + #define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L 778 + #define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L 779 + #define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L 780 + #define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L 781 + #define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L 782 + #define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L 783 + //RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL 784 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 785 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1 786 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2 787 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4 788 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5 789 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6 790 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7 791 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8 792 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9 793 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa 794 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc 795 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10 796 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11 797 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12 798 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14 799 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L 800 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L 801 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L 802 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L 803 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L 804 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L 805 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L 806 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L 807 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L 808 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L 809 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L 810 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L 811 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L 812 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L 813 + #define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L 814 + //RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA 815 + #define RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0 816 + #define RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L 817 + //RDPCSTX1_RDPCS_TX_CR_ADDR 818 + #define RDPCSTX1_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 819 + #define RDPCSTX1_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL 820 + //RDPCSTX1_RDPCS_TX_CR_DATA 821 + #define RDPCSTX1_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 822 + #define RDPCSTX1_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL 823 + //RDPCSTX1_RDPCS_TX_SRAM_CNTL 824 + #define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14 825 + #define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18 826 + #define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c 827 + #define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L 828 + #define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L 829 + #define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L 830 + //RDPCSTX1_RDPCSTX_MEM_POWER_CTRL 831 + #define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0 832 + #define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc 833 + #define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a 834 + #define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b 835 + #define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c 836 + #define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d 837 + #define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e 838 + #define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL 839 + #define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L 840 + #define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L 841 + #define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L 842 + #define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L 843 + #define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L 844 + #define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L 845 + //RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2 846 + #define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0 847 + #define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2 848 + #define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L 849 + #define RDPCSTX1_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L 850 + //RDPCSTX1_RDPCSTX_SCRATCH 851 + #define RDPCSTX1_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0 852 + #define RDPCSTX1_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL 853 + //RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 854 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0 855 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4 856 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8 857 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L 858 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L 859 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L 860 + //RDPCSTX1_RDPCSTX_DEBUG_CONFIG 861 + #define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0 862 + #define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4 863 + #define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7 864 + #define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8 865 + #define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf 866 + #define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10 867 + #define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18 868 + #define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L 869 + #define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L 870 + #define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L 871 + #define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L 872 + #define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L 873 + #define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L 874 + #define RDPCSTX1_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L 875 + //RDPCSTX1_RDPCSTX_PHY_CNTL0 876 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0 877 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1 878 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2 879 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3 880 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4 881 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8 882 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9 883 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe 884 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11 885 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12 886 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14 887 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15 888 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18 889 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19 890 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c 891 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d 892 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f 893 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L 894 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L 895 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L 896 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L 897 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L 898 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L 899 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L 900 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L 901 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L 902 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L 903 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L 904 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L 905 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L 906 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L 907 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L 908 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L 909 + #define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L 910 + //RDPCSTX1_RDPCSTX_PHY_CNTL1 911 + #define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0 912 + #define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1 913 + #define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2 914 + #define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3 915 + #define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4 916 + #define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5 917 + #define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6 918 + #define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7 919 + #define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L 920 + #define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L 921 + #define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L 922 + #define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L 923 + #define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L 924 + #define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L 925 + #define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L 926 + #define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L 927 + //RDPCSTX1_RDPCSTX_PHY_CNTL2 928 + #define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3 929 + #define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4 930 + #define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5 931 + #define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6 932 + #define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7 933 + #define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8 934 + #define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9 935 + #define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa 936 + #define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb 937 + #define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L 938 + #define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L 939 + #define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L 940 + #define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L 941 + #define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L 942 + #define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L 943 + #define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L 944 + #define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L 945 + #define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L 946 + //RDPCSTX1_RDPCSTX_PHY_CNTL3 947 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0 948 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1 949 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2 950 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3 951 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4 952 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5 953 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8 954 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9 955 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa 956 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb 957 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc 958 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd 959 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10 960 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11 961 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12 962 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13 963 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14 964 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15 965 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18 966 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19 967 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a 968 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b 969 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c 970 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d 971 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L 972 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L 973 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L 974 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L 975 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L 976 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L 977 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L 978 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L 979 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L 980 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L 981 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L 982 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L 983 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L 984 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L 985 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L 986 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L 987 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L 988 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L 989 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L 990 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L 991 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L 992 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L 993 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L 994 + #define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L 995 + //RDPCSTX1_RDPCSTX_PHY_CNTL4 996 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0 997 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4 998 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6 999 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7 1000 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8 1001 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc 1002 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe 1003 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf 1004 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10 1005 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14 1006 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16 1007 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17 1008 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18 1009 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c 1010 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e 1011 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f 1012 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L 1013 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L 1014 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L 1015 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L 1016 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L 1017 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L 1018 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L 1019 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L 1020 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L 1021 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L 1022 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L 1023 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L 1024 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L 1025 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L 1026 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L 1027 + #define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L 1028 + //RDPCSTX1_RDPCSTX_PHY_CNTL5 1029 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0 1030 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1 1031 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4 1032 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6 1033 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7 1034 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8 1035 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9 1036 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc 1037 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe 1038 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf 1039 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10 1040 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11 1041 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14 1042 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16 1043 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17 1044 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18 1045 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19 1046 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c 1047 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e 1048 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f 1049 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L 1050 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL 1051 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L 1052 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L 1053 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L 1054 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L 1055 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L 1056 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L 1057 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L 1058 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L 1059 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L 1060 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L 1061 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L 1062 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L 1063 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L 1064 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L 1065 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L 1066 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L 1067 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L 1068 + #define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L 1069 + //RDPCSTX1_RDPCSTX_PHY_CNTL6 1070 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0 1071 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2 1072 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4 1073 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6 1074 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8 1075 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa 1076 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc 1077 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe 1078 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10 1079 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11 1080 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12 1081 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13 1082 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14 1083 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L 1084 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L 1085 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L 1086 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L 1087 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L 1088 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L 1089 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L 1090 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L 1091 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L 1092 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L 1093 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L 1094 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L 1095 + #define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L 1096 + //RDPCSTX1_RDPCSTX_PHY_CNTL7 1097 + #define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0 1098 + #define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10 1099 + #define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL 1100 + #define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L 1101 + //RDPCSTX1_RDPCSTX_PHY_CNTL8 1102 + #define RDPCSTX1_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0 1103 + #define RDPCSTX1_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL 1104 + //RDPCSTX1_RDPCSTX_PHY_CNTL9 1105 + #define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0 1106 + #define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18 1107 + #define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL 1108 + #define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L 1109 + //RDPCSTX1_RDPCSTX_PHY_CNTL10 1110 + #define RDPCSTX1_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0 1111 + #define RDPCSTX1_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL 1112 + //RDPCSTX1_RDPCSTX_PHY_CNTL11 1113 + #define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4 1114 + #define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10 1115 + #define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14 1116 + #define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18 1117 + #define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L 1118 + #define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L 1119 + #define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L 1120 + #define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L 1121 + //RDPCSTX1_RDPCSTX_PHY_CNTL12 1122 + #define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0 1123 + #define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2 1124 + #define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4 1125 + #define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7 1126 + #define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8 1127 + #define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L 1128 + #define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L 1129 + #define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L 1130 + #define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L 1131 + #define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L 1132 + //RDPCSTX1_RDPCSTX_PHY_CNTL13 1133 + #define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14 1134 + #define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c 1135 + #define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d 1136 + #define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e 1137 + #define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L 1138 + #define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L 1139 + #define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L 1140 + #define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L 1141 + //RDPCSTX1_RDPCSTX_PHY_CNTL14 1142 + #define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0 1143 + #define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18 1144 + #define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c 1145 + #define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L 1146 + #define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L 1147 + #define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L 1148 + //RDPCSTX1_RDPCSTX_PHY_FUSE0 1149 + #define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0 1150 + #define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6 1151 + #define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc 1152 + #define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12 1153 + #define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14 1154 + #define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL 1155 + #define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L 1156 + #define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L 1157 + #define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L 1158 + #define RDPCSTX1_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L 1159 + //RDPCSTX1_RDPCSTX_PHY_FUSE1 1160 + #define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0 1161 + #define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6 1162 + #define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc 1163 + #define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12 1164 + #define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19 1165 + #define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL 1166 + #define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L 1167 + #define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L 1168 + #define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L 1169 + #define RDPCSTX1_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L 1170 + //RDPCSTX1_RDPCSTX_PHY_FUSE2 1171 + #define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0 1172 + #define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6 1173 + #define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc 1174 + #define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL 1175 + #define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L 1176 + #define RDPCSTX1_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L 1177 + //RDPCSTX1_RDPCSTX_PHY_FUSE3 1178 + #define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0 1179 + #define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6 1180 + #define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc 1181 + #define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12 1182 + #define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18 1183 + #define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL 1184 + #define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L 1185 + #define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L 1186 + #define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L 1187 + #define RDPCSTX1_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L 1188 + //RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL 1189 + #define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0 1190 + #define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8 1191 + #define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL 1192 + #define RDPCSTX1_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L 1193 + //RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3 1194 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0 1195 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1 1196 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2 1197 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3 1198 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4 1199 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5 1200 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8 1201 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9 1202 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa 1203 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb 1204 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc 1205 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd 1206 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10 1207 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11 1208 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12 1209 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13 1210 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14 1211 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15 1212 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18 1213 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19 1214 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a 1215 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b 1216 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c 1217 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d 1218 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L 1219 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L 1220 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L 1221 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L 1222 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L 1223 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L 1224 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L 1225 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L 1226 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L 1227 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L 1228 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L 1229 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L 1230 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L 1231 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L 1232 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L 1233 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L 1234 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L 1235 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L 1236 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L 1237 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L 1238 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L 1239 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L 1240 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L 1241 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L 1242 + //RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6 1243 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0 1244 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2 1245 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4 1246 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6 1247 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8 1248 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa 1249 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc 1250 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe 1251 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10 1252 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11 1253 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12 1254 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13 1255 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14 1256 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L 1257 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L 1258 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L 1259 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L 1260 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L 1261 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L 1262 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L 1263 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L 1264 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L 1265 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L 1266 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L 1267 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L 1268 + #define RDPCSTX1_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L 1269 + //RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG 1270 + #define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0 1271 + #define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4 1272 + #define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8 1273 + #define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L 1274 + #define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L 1275 + #define RDPCSTX1_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L 1276 + 1277 + 1278 + // addressBlock: dpcssys_dpcssys_cr1_dispdec 1279 + //DPCSSYS_CR1_DPCSSYS_CR_ADDR 1280 + #define DPCSSYS_CR1_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 1281 + #define DPCSSYS_CR1_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL 1282 + //DPCSSYS_CR1_DPCSSYS_CR_DATA 1283 + #define DPCSSYS_CR1_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 1284 + #define DPCSSYS_CR1_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL 1285 + 1286 + 1287 + // addressBlock: dpcssys_dpcs0_dpcstx2_dispdec 1288 + //DPCSTX2_DPCSTX_TX_CLOCK_CNTL 1289 + #define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0 1290 + #define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1 1291 + #define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2 1292 + #define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3 1293 + #define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L 1294 + #define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L 1295 + #define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L 1296 + #define DPCSTX2_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L 1297 + //DPCSTX2_DPCSTX_TX_CNTL 1298 + #define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc 1299 + #define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd 1300 + #define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe 1301 + #define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf 1302 + #define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10 1303 + #define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11 1304 + #define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 1305 + #define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f 1306 + #define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L 1307 + #define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L 1308 + #define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L 1309 + #define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L 1310 + #define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L 1311 + #define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L 1312 + #define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L 1313 + #define DPCSTX2_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L 1314 + //DPCSTX2_DPCSTX_CBUS_CNTL 1315 + #define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0 1316 + #define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f 1317 + #define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL 1318 + #define DPCSTX2_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L 1319 + //DPCSTX2_DPCSTX_INTERRUPT_CNTL 1320 + #define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 1321 + #define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1 1322 + #define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4 1323 + #define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8 1324 + #define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9 1325 + #define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa 1326 + #define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb 1327 + #define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc 1328 + #define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10 1329 + #define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14 1330 + #define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L 1331 + #define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L 1332 + #define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L 1333 + #define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L 1334 + #define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L 1335 + #define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L 1336 + #define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L 1337 + #define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L 1338 + #define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L 1339 + #define DPCSTX2_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L 1340 + //DPCSTX2_DPCSTX_PLL_UPDATE_ADDR 1341 + #define DPCSTX2_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0 1342 + #define DPCSTX2_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL 1343 + //DPCSTX2_DPCSTX_PLL_UPDATE_DATA 1344 + #define DPCSTX2_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0 1345 + #define DPCSTX2_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL 1346 + //DPCSTX2_DPCSTX_DEBUG_CONFIG 1347 + #define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0 1348 + #define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1 1349 + #define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4 1350 + #define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8 1351 + #define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe 1352 + #define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10 1353 + #define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L 1354 + #define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL 1355 + #define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L 1356 + #define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L 1357 + #define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L 1358 + #define DPCSTX2_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L 1359 + 1360 + 1361 + // addressBlock: dpcssys_dpcs0_rdpcstx2_dispdec 1362 + //RDPCSTX2_RDPCSTX_CNTL 1363 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0 1364 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4 1365 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc 1366 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd 1367 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe 1368 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf 1369 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10 1370 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11 1371 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 1372 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18 1373 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19 1374 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a 1375 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f 1376 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L 1377 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L 1378 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L 1379 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L 1380 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L 1381 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L 1382 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L 1383 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L 1384 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L 1385 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L 1386 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L 1387 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L 1388 + #define RDPCSTX2_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L 1389 + //RDPCSTX2_RDPCSTX_CLOCK_CNTL 1390 + #define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0 1391 + #define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4 1392 + #define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5 1393 + #define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6 1394 + #define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7 1395 + #define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8 1396 + #define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9 1397 + #define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa 1398 + #define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc 1399 + #define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd 1400 + #define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe 1401 + #define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10 1402 + #define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L 1403 + #define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L 1404 + #define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L 1405 + #define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L 1406 + #define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L 1407 + #define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L 1408 + #define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L 1409 + #define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L 1410 + #define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L 1411 + #define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L 1412 + #define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L 1413 + #define RDPCSTX2_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L 1414 + //RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL 1415 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 1416 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1 1417 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2 1418 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4 1419 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5 1420 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6 1421 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7 1422 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8 1423 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9 1424 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa 1425 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc 1426 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10 1427 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11 1428 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12 1429 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14 1430 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L 1431 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L 1432 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L 1433 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L 1434 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L 1435 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L 1436 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L 1437 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L 1438 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L 1439 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L 1440 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L 1441 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L 1442 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L 1443 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L 1444 + #define RDPCSTX2_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L 1445 + //RDPCSTX2_RDPCSTX_PLL_UPDATE_DATA 1446 + #define RDPCSTX2_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0 1447 + #define RDPCSTX2_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L 1448 + //RDPCSTX2_RDPCS_TX_CR_ADDR 1449 + #define RDPCSTX2_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 1450 + #define RDPCSTX2_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL 1451 + //RDPCSTX2_RDPCS_TX_CR_DATA 1452 + #define RDPCSTX2_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 1453 + #define RDPCSTX2_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL 1454 + //RDPCSTX2_RDPCS_TX_SRAM_CNTL 1455 + #define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14 1456 + #define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18 1457 + #define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c 1458 + #define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L 1459 + #define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L 1460 + #define RDPCSTX2_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L 1461 + //RDPCSTX2_RDPCSTX_MEM_POWER_CTRL 1462 + #define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0 1463 + #define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc 1464 + #define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a 1465 + #define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b 1466 + #define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c 1467 + #define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d 1468 + #define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e 1469 + #define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL 1470 + #define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L 1471 + #define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L 1472 + #define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L 1473 + #define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L 1474 + #define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L 1475 + #define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L 1476 + //RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2 1477 + #define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0 1478 + #define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2 1479 + #define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L 1480 + #define RDPCSTX2_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L 1481 + //RDPCSTX2_RDPCSTX_SCRATCH 1482 + #define RDPCSTX2_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0 1483 + #define RDPCSTX2_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL 1484 + //RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 1485 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0 1486 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4 1487 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8 1488 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L 1489 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L 1490 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L 1491 + //RDPCSTX2_RDPCSTX_DEBUG_CONFIG 1492 + #define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0 1493 + #define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4 1494 + #define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7 1495 + #define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8 1496 + #define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf 1497 + #define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10 1498 + #define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18 1499 + #define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L 1500 + #define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L 1501 + #define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L 1502 + #define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L 1503 + #define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L 1504 + #define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L 1505 + #define RDPCSTX2_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L 1506 + //RDPCSTX2_RDPCSTX_PHY_CNTL0 1507 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0 1508 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1 1509 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2 1510 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3 1511 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4 1512 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8 1513 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9 1514 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe 1515 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11 1516 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12 1517 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14 1518 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15 1519 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18 1520 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19 1521 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c 1522 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d 1523 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f 1524 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L 1525 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L 1526 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L 1527 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L 1528 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L 1529 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L 1530 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L 1531 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L 1532 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L 1533 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L 1534 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L 1535 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L 1536 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L 1537 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L 1538 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L 1539 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L 1540 + #define RDPCSTX2_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L 1541 + //RDPCSTX2_RDPCSTX_PHY_CNTL1 1542 + #define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0 1543 + #define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1 1544 + #define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2 1545 + #define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3 1546 + #define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4 1547 + #define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5 1548 + #define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6 1549 + #define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7 1550 + #define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L 1551 + #define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L 1552 + #define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L 1553 + #define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L 1554 + #define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L 1555 + #define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L 1556 + #define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L 1557 + #define RDPCSTX2_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L 1558 + //RDPCSTX2_RDPCSTX_PHY_CNTL2 1559 + #define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3 1560 + #define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4 1561 + #define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5 1562 + #define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6 1563 + #define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7 1564 + #define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8 1565 + #define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9 1566 + #define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa 1567 + #define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb 1568 + #define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L 1569 + #define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L 1570 + #define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L 1571 + #define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L 1572 + #define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L 1573 + #define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L 1574 + #define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L 1575 + #define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L 1576 + #define RDPCSTX2_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L 1577 + //RDPCSTX2_RDPCSTX_PHY_CNTL3 1578 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0 1579 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1 1580 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2 1581 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3 1582 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4 1583 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5 1584 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8 1585 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9 1586 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa 1587 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb 1588 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc 1589 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd 1590 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10 1591 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11 1592 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12 1593 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13 1594 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14 1595 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15 1596 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18 1597 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19 1598 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a 1599 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b 1600 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c 1601 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d 1602 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L 1603 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L 1604 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L 1605 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L 1606 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L 1607 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L 1608 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L 1609 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L 1610 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L 1611 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L 1612 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L 1613 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L 1614 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L 1615 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L 1616 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L 1617 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L 1618 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L 1619 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L 1620 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L 1621 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L 1622 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L 1623 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L 1624 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L 1625 + #define RDPCSTX2_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L 1626 + //RDPCSTX2_RDPCSTX_PHY_CNTL4 1627 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0 1628 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4 1629 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6 1630 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7 1631 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8 1632 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc 1633 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe 1634 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf 1635 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10 1636 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14 1637 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16 1638 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17 1639 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18 1640 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c 1641 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e 1642 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f 1643 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L 1644 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L 1645 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L 1646 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L 1647 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L 1648 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L 1649 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L 1650 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L 1651 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L 1652 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L 1653 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L 1654 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L 1655 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L 1656 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L 1657 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L 1658 + #define RDPCSTX2_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L 1659 + //RDPCSTX2_RDPCSTX_PHY_CNTL5 1660 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0 1661 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1 1662 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4 1663 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6 1664 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7 1665 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8 1666 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9 1667 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc 1668 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe 1669 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf 1670 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10 1671 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11 1672 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14 1673 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16 1674 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17 1675 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18 1676 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19 1677 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c 1678 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e 1679 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f 1680 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L 1681 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL 1682 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L 1683 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L 1684 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L 1685 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L 1686 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L 1687 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L 1688 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L 1689 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L 1690 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L 1691 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L 1692 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L 1693 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L 1694 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L 1695 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L 1696 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L 1697 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L 1698 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L 1699 + #define RDPCSTX2_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L 1700 + //RDPCSTX2_RDPCSTX_PHY_CNTL6 1701 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0 1702 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2 1703 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4 1704 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6 1705 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8 1706 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa 1707 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc 1708 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe 1709 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10 1710 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11 1711 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12 1712 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13 1713 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14 1714 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L 1715 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L 1716 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L 1717 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L 1718 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L 1719 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L 1720 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L 1721 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L 1722 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L 1723 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L 1724 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L 1725 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L 1726 + #define RDPCSTX2_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L 1727 + //RDPCSTX2_RDPCSTX_PHY_CNTL7 1728 + #define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0 1729 + #define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10 1730 + #define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL 1731 + #define RDPCSTX2_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L 1732 + //RDPCSTX2_RDPCSTX_PHY_CNTL8 1733 + #define RDPCSTX2_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0 1734 + #define RDPCSTX2_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL 1735 + //RDPCSTX2_RDPCSTX_PHY_CNTL9 1736 + #define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0 1737 + #define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18 1738 + #define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL 1739 + #define RDPCSTX2_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L 1740 + //RDPCSTX2_RDPCSTX_PHY_CNTL10 1741 + #define RDPCSTX2_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0 1742 + #define RDPCSTX2_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL 1743 + //RDPCSTX2_RDPCSTX_PHY_CNTL11 1744 + #define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4 1745 + #define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10 1746 + #define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14 1747 + #define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18 1748 + #define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L 1749 + #define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L 1750 + #define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L 1751 + #define RDPCSTX2_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L 1752 + //RDPCSTX2_RDPCSTX_PHY_CNTL12 1753 + #define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0 1754 + #define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2 1755 + #define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4 1756 + #define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7 1757 + #define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8 1758 + #define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L 1759 + #define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L 1760 + #define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L 1761 + #define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L 1762 + #define RDPCSTX2_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L 1763 + //RDPCSTX2_RDPCSTX_PHY_CNTL13 1764 + #define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14 1765 + #define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c 1766 + #define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d 1767 + #define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e 1768 + #define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L 1769 + #define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L 1770 + #define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L 1771 + #define RDPCSTX2_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L 1772 + //RDPCSTX2_RDPCSTX_PHY_CNTL14 1773 + #define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0 1774 + #define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18 1775 + #define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c 1776 + #define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L 1777 + #define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L 1778 + #define RDPCSTX2_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L 1779 + //RDPCSTX2_RDPCSTX_PHY_FUSE0 1780 + #define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0 1781 + #define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6 1782 + #define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc 1783 + #define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12 1784 + #define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14 1785 + #define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL 1786 + #define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L 1787 + #define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L 1788 + #define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L 1789 + #define RDPCSTX2_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L 1790 + //RDPCSTX2_RDPCSTX_PHY_FUSE1 1791 + #define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0 1792 + #define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6 1793 + #define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc 1794 + #define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12 1795 + #define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19 1796 + #define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL 1797 + #define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L 1798 + #define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L 1799 + #define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L 1800 + #define RDPCSTX2_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L 1801 + //RDPCSTX2_RDPCSTX_PHY_FUSE2 1802 + #define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0 1803 + #define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6 1804 + #define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc 1805 + #define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL 1806 + #define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L 1807 + #define RDPCSTX2_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L 1808 + //RDPCSTX2_RDPCSTX_PHY_FUSE3 1809 + #define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0 1810 + #define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6 1811 + #define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc 1812 + #define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12 1813 + #define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18 1814 + #define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL 1815 + #define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L 1816 + #define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L 1817 + #define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L 1818 + #define RDPCSTX2_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L 1819 + //RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL 1820 + #define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0 1821 + #define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8 1822 + #define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL 1823 + #define RDPCSTX2_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L 1824 + //RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3 1825 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0 1826 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1 1827 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2 1828 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3 1829 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4 1830 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5 1831 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8 1832 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9 1833 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa 1834 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb 1835 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc 1836 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd 1837 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10 1838 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11 1839 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12 1840 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13 1841 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14 1842 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15 1843 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18 1844 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19 1845 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a 1846 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b 1847 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c 1848 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d 1849 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L 1850 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L 1851 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L 1852 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L 1853 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L 1854 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L 1855 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L 1856 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L 1857 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L 1858 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L 1859 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L 1860 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L 1861 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L 1862 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L 1863 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L 1864 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L 1865 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L 1866 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L 1867 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L 1868 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L 1869 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L 1870 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L 1871 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L 1872 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L 1873 + //RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6 1874 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0 1875 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2 1876 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4 1877 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6 1878 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8 1879 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa 1880 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc 1881 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe 1882 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10 1883 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11 1884 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12 1885 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13 1886 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14 1887 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L 1888 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L 1889 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L 1890 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L 1891 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L 1892 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L 1893 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L 1894 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L 1895 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L 1896 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L 1897 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L 1898 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L 1899 + #define RDPCSTX2_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L 1900 + //RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG 1901 + #define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0 1902 + #define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4 1903 + #define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8 1904 + #define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L 1905 + #define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L 1906 + #define RDPCSTX2_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L 1907 + 1908 + 1909 + // addressBlock: dpcssys_dpcssys_cr2_dispdec 1910 + //DPCSSYS_CR2_DPCSSYS_CR_ADDR 1911 + #define DPCSSYS_CR2_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 1912 + #define DPCSSYS_CR2_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL 1913 + //DPCSSYS_CR2_DPCSSYS_CR_DATA 1914 + #define DPCSSYS_CR2_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 1915 + #define DPCSSYS_CR2_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL 1916 + 1917 + 1918 + // addressBlock: dpcssys_dpcs0_dpcstx3_dispdec 1919 + //DPCSTX3_DPCSTX_TX_CLOCK_CNTL 1920 + #define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0 1921 + #define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1 1922 + #define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2 1923 + #define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3 1924 + #define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L 1925 + #define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L 1926 + #define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L 1927 + #define DPCSTX3_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L 1928 + //DPCSTX3_DPCSTX_TX_CNTL 1929 + #define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc 1930 + #define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd 1931 + #define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe 1932 + #define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf 1933 + #define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10 1934 + #define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11 1935 + #define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 1936 + #define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f 1937 + #define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L 1938 + #define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L 1939 + #define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L 1940 + #define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L 1941 + #define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L 1942 + #define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L 1943 + #define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L 1944 + #define DPCSTX3_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L 1945 + //DPCSTX3_DPCSTX_CBUS_CNTL 1946 + #define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0 1947 + #define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f 1948 + #define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL 1949 + #define DPCSTX3_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L 1950 + //DPCSTX3_DPCSTX_INTERRUPT_CNTL 1951 + #define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 1952 + #define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1 1953 + #define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4 1954 + #define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8 1955 + #define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9 1956 + #define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa 1957 + #define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb 1958 + #define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc 1959 + #define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10 1960 + #define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14 1961 + #define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L 1962 + #define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L 1963 + #define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L 1964 + #define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L 1965 + #define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L 1966 + #define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L 1967 + #define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L 1968 + #define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L 1969 + #define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L 1970 + #define DPCSTX3_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L 1971 + //DPCSTX3_DPCSTX_PLL_UPDATE_ADDR 1972 + #define DPCSTX3_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0 1973 + #define DPCSTX3_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL 1974 + //DPCSTX3_DPCSTX_PLL_UPDATE_DATA 1975 + #define DPCSTX3_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0 1976 + #define DPCSTX3_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL 1977 + //DPCSTX3_DPCSTX_DEBUG_CONFIG 1978 + #define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0 1979 + #define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1 1980 + #define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4 1981 + #define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8 1982 + #define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe 1983 + #define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10 1984 + #define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L 1985 + #define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL 1986 + #define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L 1987 + #define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L 1988 + #define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L 1989 + #define DPCSTX3_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L 1990 + 1991 + 1992 + // addressBlock: dpcssys_dpcs0_rdpcstx3_dispdec 1993 + //RDPCSTX3_RDPCSTX_CNTL 1994 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0 1995 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4 1996 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc 1997 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd 1998 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe 1999 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf 2000 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10 2001 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11 2002 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 2003 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18 2004 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19 2005 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a 2006 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f 2007 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L 2008 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L 2009 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L 2010 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L 2011 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L 2012 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L 2013 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L 2014 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L 2015 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L 2016 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L 2017 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L 2018 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L 2019 + #define RDPCSTX3_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L 2020 + //RDPCSTX3_RDPCSTX_CLOCK_CNTL 2021 + #define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0 2022 + #define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4 2023 + #define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5 2024 + #define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6 2025 + #define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7 2026 + #define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8 2027 + #define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9 2028 + #define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa 2029 + #define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc 2030 + #define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd 2031 + #define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe 2032 + #define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10 2033 + #define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L 2034 + #define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L 2035 + #define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L 2036 + #define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L 2037 + #define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L 2038 + #define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L 2039 + #define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L 2040 + #define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L 2041 + #define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L 2042 + #define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L 2043 + #define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L 2044 + #define RDPCSTX3_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L 2045 + //RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL 2046 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 2047 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1 2048 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2 2049 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4 2050 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5 2051 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6 2052 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7 2053 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8 2054 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9 2055 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa 2056 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc 2057 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10 2058 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11 2059 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12 2060 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14 2061 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L 2062 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L 2063 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L 2064 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L 2065 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L 2066 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L 2067 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L 2068 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L 2069 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L 2070 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L 2071 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L 2072 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L 2073 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L 2074 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L 2075 + #define RDPCSTX3_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L 2076 + //RDPCSTX3_RDPCSTX_PLL_UPDATE_DATA 2077 + #define RDPCSTX3_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0 2078 + #define RDPCSTX3_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L 2079 + //RDPCSTX3_RDPCS_TX_CR_ADDR 2080 + #define RDPCSTX3_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 2081 + #define RDPCSTX3_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL 2082 + //RDPCSTX3_RDPCS_TX_CR_DATA 2083 + #define RDPCSTX3_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 2084 + #define RDPCSTX3_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL 2085 + //RDPCSTX3_RDPCS_TX_SRAM_CNTL 2086 + #define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14 2087 + #define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18 2088 + #define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c 2089 + #define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L 2090 + #define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L 2091 + #define RDPCSTX3_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L 2092 + //RDPCSTX3_RDPCSTX_MEM_POWER_CTRL 2093 + #define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0 2094 + #define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc 2095 + #define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a 2096 + #define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b 2097 + #define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c 2098 + #define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d 2099 + #define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e 2100 + #define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL 2101 + #define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L 2102 + #define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L 2103 + #define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L 2104 + #define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L 2105 + #define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L 2106 + #define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L 2107 + //RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2 2108 + #define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0 2109 + #define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2 2110 + #define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L 2111 + #define RDPCSTX3_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L 2112 + //RDPCSTX3_RDPCSTX_SCRATCH 2113 + #define RDPCSTX3_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0 2114 + #define RDPCSTX3_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL 2115 + //RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 2116 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0 2117 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4 2118 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8 2119 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L 2120 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L 2121 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L 2122 + //RDPCSTX3_RDPCSTX_DEBUG_CONFIG 2123 + #define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0 2124 + #define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4 2125 + #define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7 2126 + #define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8 2127 + #define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf 2128 + #define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10 2129 + #define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18 2130 + #define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L 2131 + #define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L 2132 + #define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L 2133 + #define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L 2134 + #define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L 2135 + #define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L 2136 + #define RDPCSTX3_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L 2137 + //RDPCSTX3_RDPCSTX_PHY_CNTL0 2138 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0 2139 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1 2140 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2 2141 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3 2142 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4 2143 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8 2144 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9 2145 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe 2146 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11 2147 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12 2148 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14 2149 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15 2150 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18 2151 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19 2152 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c 2153 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d 2154 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f 2155 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L 2156 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L 2157 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L 2158 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L 2159 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L 2160 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L 2161 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L 2162 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L 2163 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L 2164 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L 2165 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L 2166 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L 2167 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L 2168 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L 2169 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L 2170 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L 2171 + #define RDPCSTX3_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L 2172 + //RDPCSTX3_RDPCSTX_PHY_CNTL1 2173 + #define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0 2174 + #define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1 2175 + #define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2 2176 + #define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3 2177 + #define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4 2178 + #define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5 2179 + #define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6 2180 + #define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7 2181 + #define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L 2182 + #define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L 2183 + #define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L 2184 + #define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L 2185 + #define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L 2186 + #define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L 2187 + #define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L 2188 + #define RDPCSTX3_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L 2189 + //RDPCSTX3_RDPCSTX_PHY_CNTL2 2190 + #define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3 2191 + #define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4 2192 + #define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5 2193 + #define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6 2194 + #define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7 2195 + #define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8 2196 + #define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9 2197 + #define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa 2198 + #define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb 2199 + #define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L 2200 + #define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L 2201 + #define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L 2202 + #define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L 2203 + #define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L 2204 + #define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L 2205 + #define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L 2206 + #define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L 2207 + #define RDPCSTX3_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L 2208 + //RDPCSTX3_RDPCSTX_PHY_CNTL3 2209 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0 2210 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1 2211 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2 2212 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3 2213 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4 2214 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5 2215 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8 2216 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9 2217 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa 2218 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb 2219 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc 2220 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd 2221 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10 2222 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11 2223 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12 2224 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13 2225 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14 2226 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15 2227 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18 2228 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19 2229 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a 2230 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b 2231 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c 2232 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d 2233 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L 2234 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L 2235 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L 2236 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L 2237 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L 2238 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L 2239 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L 2240 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L 2241 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L 2242 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L 2243 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L 2244 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L 2245 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L 2246 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L 2247 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L 2248 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L 2249 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L 2250 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L 2251 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L 2252 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L 2253 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L 2254 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L 2255 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L 2256 + #define RDPCSTX3_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L 2257 + //RDPCSTX3_RDPCSTX_PHY_CNTL4 2258 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0 2259 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4 2260 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6 2261 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7 2262 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8 2263 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc 2264 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe 2265 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf 2266 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10 2267 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14 2268 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16 2269 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17 2270 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18 2271 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c 2272 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e 2273 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f 2274 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L 2275 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L 2276 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L 2277 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L 2278 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L 2279 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L 2280 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L 2281 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L 2282 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L 2283 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L 2284 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L 2285 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L 2286 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L 2287 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L 2288 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L 2289 + #define RDPCSTX3_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L 2290 + //RDPCSTX3_RDPCSTX_PHY_CNTL5 2291 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0 2292 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1 2293 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4 2294 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6 2295 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7 2296 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8 2297 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9 2298 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc 2299 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe 2300 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf 2301 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10 2302 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11 2303 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14 2304 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16 2305 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17 2306 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18 2307 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19 2308 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c 2309 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e 2310 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f 2311 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L 2312 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL 2313 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L 2314 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L 2315 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L 2316 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L 2317 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L 2318 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L 2319 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L 2320 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L 2321 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L 2322 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L 2323 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L 2324 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L 2325 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L 2326 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L 2327 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L 2328 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L 2329 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L 2330 + #define RDPCSTX3_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L 2331 + //RDPCSTX3_RDPCSTX_PHY_CNTL6 2332 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0 2333 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2 2334 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4 2335 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6 2336 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8 2337 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa 2338 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc 2339 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe 2340 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10 2341 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11 2342 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12 2343 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13 2344 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14 2345 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L 2346 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L 2347 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L 2348 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L 2349 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L 2350 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L 2351 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L 2352 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L 2353 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L 2354 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L 2355 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L 2356 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L 2357 + #define RDPCSTX3_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L 2358 + //RDPCSTX3_RDPCSTX_PHY_CNTL7 2359 + #define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0 2360 + #define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10 2361 + #define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL 2362 + #define RDPCSTX3_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L 2363 + //RDPCSTX3_RDPCSTX_PHY_CNTL8 2364 + #define RDPCSTX3_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0 2365 + #define RDPCSTX3_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL 2366 + //RDPCSTX3_RDPCSTX_PHY_CNTL9 2367 + #define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0 2368 + #define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18 2369 + #define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL 2370 + #define RDPCSTX3_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L 2371 + //RDPCSTX3_RDPCSTX_PHY_CNTL10 2372 + #define RDPCSTX3_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0 2373 + #define RDPCSTX3_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL 2374 + //RDPCSTX3_RDPCSTX_PHY_CNTL11 2375 + #define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4 2376 + #define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10 2377 + #define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14 2378 + #define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18 2379 + #define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L 2380 + #define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L 2381 + #define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L 2382 + #define RDPCSTX3_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L 2383 + //RDPCSTX3_RDPCSTX_PHY_CNTL12 2384 + #define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0 2385 + #define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2 2386 + #define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4 2387 + #define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7 2388 + #define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8 2389 + #define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L 2390 + #define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L 2391 + #define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L 2392 + #define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L 2393 + #define RDPCSTX3_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L 2394 + //RDPCSTX3_RDPCSTX_PHY_CNTL13 2395 + #define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14 2396 + #define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c 2397 + #define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d 2398 + #define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e 2399 + #define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L 2400 + #define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L 2401 + #define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L 2402 + #define RDPCSTX3_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L 2403 + //RDPCSTX3_RDPCSTX_PHY_CNTL14 2404 + #define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0 2405 + #define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18 2406 + #define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c 2407 + #define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L 2408 + #define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L 2409 + #define RDPCSTX3_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L 2410 + //RDPCSTX3_RDPCSTX_PHY_FUSE0 2411 + #define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0 2412 + #define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6 2413 + #define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc 2414 + #define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12 2415 + #define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14 2416 + #define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL 2417 + #define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L 2418 + #define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L 2419 + #define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L 2420 + #define RDPCSTX3_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L 2421 + //RDPCSTX3_RDPCSTX_PHY_FUSE1 2422 + #define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0 2423 + #define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6 2424 + #define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc 2425 + #define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12 2426 + #define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19 2427 + #define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL 2428 + #define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L 2429 + #define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L 2430 + #define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L 2431 + #define RDPCSTX3_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L 2432 + //RDPCSTX3_RDPCSTX_PHY_FUSE2 2433 + #define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0 2434 + #define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6 2435 + #define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc 2436 + #define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL 2437 + #define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L 2438 + #define RDPCSTX3_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L 2439 + //RDPCSTX3_RDPCSTX_PHY_FUSE3 2440 + #define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0 2441 + #define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6 2442 + #define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc 2443 + #define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12 2444 + #define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18 2445 + #define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL 2446 + #define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L 2447 + #define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L 2448 + #define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L 2449 + #define RDPCSTX3_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L 2450 + //RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL 2451 + #define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0 2452 + #define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8 2453 + #define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL 2454 + #define RDPCSTX3_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L 2455 + //RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3 2456 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0 2457 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1 2458 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2 2459 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3 2460 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4 2461 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5 2462 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8 2463 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9 2464 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa 2465 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb 2466 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc 2467 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd 2468 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10 2469 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11 2470 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12 2471 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13 2472 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14 2473 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15 2474 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18 2475 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19 2476 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a 2477 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b 2478 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c 2479 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d 2480 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L 2481 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L 2482 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L 2483 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L 2484 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L 2485 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L 2486 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L 2487 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L 2488 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L 2489 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L 2490 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L 2491 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L 2492 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L 2493 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L 2494 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L 2495 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L 2496 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L 2497 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L 2498 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L 2499 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L 2500 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L 2501 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L 2502 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L 2503 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L 2504 + //RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6 2505 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0 2506 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2 2507 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4 2508 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6 2509 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8 2510 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa 2511 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc 2512 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe 2513 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10 2514 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11 2515 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12 2516 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13 2517 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14 2518 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L 2519 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L 2520 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L 2521 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L 2522 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L 2523 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L 2524 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L 2525 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L 2526 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L 2527 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L 2528 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L 2529 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L 2530 + #define RDPCSTX3_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L 2531 + //RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG 2532 + #define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0 2533 + #define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4 2534 + #define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8 2535 + #define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L 2536 + #define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L 2537 + #define RDPCSTX3_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L 2538 + 2539 + 2540 + // addressBlock: dpcssys_dpcssys_cr3_dispdec 2541 + //DPCSSYS_CR3_DPCSSYS_CR_ADDR 2542 + #define DPCSSYS_CR3_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 2543 + #define DPCSSYS_CR3_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL 2544 + //DPCSSYS_CR3_DPCSSYS_CR_DATA 2545 + #define DPCSSYS_CR3_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 2546 + #define DPCSSYS_CR3_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL 2547 + 2548 + 2549 + // addressBlock: dpcssys_dpcs0_dpcsrx_dispdec 2550 + //DPCSRX_PHY_CNTL 2551 + #define DPCSRX_PHY_CNTL__DPCS_PHY_RESET__SHIFT 0x0 2552 + #define DPCSRX_PHY_CNTL__DPCS_PHY_RESET_MASK 0x00000001L 2553 + //DPCSRX_RX_CLOCK_CNTL 2554 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_GATE_DIS__SHIFT 0x0 2555 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_EN__SHIFT 0x1 2556 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_SEL__SHIFT 0x2 2557 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_CLOCK_ON__SHIFT 0x4 2558 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_GATE_DIS__SHIFT 0x10 2559 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_EN__SHIFT 0x11 2560 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_CLOCK_ON__SHIFT 0x12 2561 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_GATE_DIS__SHIFT 0x14 2562 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_EN__SHIFT 0x15 2563 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_CLOCK_ON__SHIFT 0x16 2564 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_GATE_DIS__SHIFT 0x18 2565 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_EN__SHIFT 0x19 2566 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_CLOCK_ON__SHIFT 0x1a 2567 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_GATE_DIS__SHIFT 0x1c 2568 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_EN__SHIFT 0x1d 2569 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_CLOCK_ON__SHIFT 0x1e 2570 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_GATE_DIS_MASK 0x00000001L 2571 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_EN_MASK 0x00000002L 2572 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_SEL_MASK 0x0000000CL 2573 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX_CLOCK_ON_MASK 0x00000010L 2574 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_GATE_DIS_MASK 0x00010000L 2575 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_EN_MASK 0x00020000L 2576 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX0_CLOCK_ON_MASK 0x00040000L 2577 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_GATE_DIS_MASK 0x00100000L 2578 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_EN_MASK 0x00200000L 2579 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX1_CLOCK_ON_MASK 0x00400000L 2580 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_GATE_DIS_MASK 0x01000000L 2581 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_EN_MASK 0x02000000L 2582 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX2_CLOCK_ON_MASK 0x04000000L 2583 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_GATE_DIS_MASK 0x10000000L 2584 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_EN_MASK 0x20000000L 2585 + #define DPCSRX_RX_CLOCK_CNTL__DPCS_SYMCLK_RX3_CLOCK_ON_MASK 0x40000000L 2586 + //DPCSRX_RX_CNTL 2587 + #define DPCSRX_RX_CNTL__DPCS_RX_LANE0_EN__SHIFT 0x0 2588 + #define DPCSRX_RX_CNTL__DPCS_RX_LANE1_EN__SHIFT 0x1 2589 + #define DPCSRX_RX_CNTL__DPCS_RX_LANE2_EN__SHIFT 0x2 2590 + #define DPCSRX_RX_CNTL__DPCS_RX_LANE3_EN__SHIFT 0x3 2591 + #define DPCSRX_RX_CNTL__DPCS_RX_FIFO_EN__SHIFT 0x4 2592 + #define DPCSRX_RX_CNTL__DPCS_RX_FIFO_START__SHIFT 0x5 2593 + #define DPCSRX_RX_CNTL__DPCS_RX_FIFO_RD_START_DELAY__SHIFT 0x8 2594 + #define DPCSRX_RX_CNTL__DPCS_RX_SOFT_RESET__SHIFT 0x1f 2595 + #define DPCSRX_RX_CNTL__DPCS_RX_LANE0_EN_MASK 0x00000001L 2596 + #define DPCSRX_RX_CNTL__DPCS_RX_LANE1_EN_MASK 0x00000002L 2597 + #define DPCSRX_RX_CNTL__DPCS_RX_LANE2_EN_MASK 0x00000004L 2598 + #define DPCSRX_RX_CNTL__DPCS_RX_LANE3_EN_MASK 0x00000008L 2599 + #define DPCSRX_RX_CNTL__DPCS_RX_FIFO_EN_MASK 0x00000010L 2600 + #define DPCSRX_RX_CNTL__DPCS_RX_FIFO_START_MASK 0x00000020L 2601 + #define DPCSRX_RX_CNTL__DPCS_RX_FIFO_RD_START_DELAY_MASK 0x00000F00L 2602 + #define DPCSRX_RX_CNTL__DPCS_RX_SOFT_RESET_MASK 0x80000000L 2603 + //DPCSRX_CBUS_CNTL 2604 + #define DPCSRX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0 2605 + #define DPCSRX_CBUS_CNTL__DPCS_PHY_MASTER_REQ_DELAY__SHIFT 0x8 2606 + #define DPCSRX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f 2607 + #define DPCSRX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x0000000FL 2608 + #define DPCSRX_CBUS_CNTL__DPCS_PHY_MASTER_REQ_DELAY_MASK 0x0000FF00L 2609 + #define DPCSRX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L 2610 + //DPCSRX_REG_ERROR_STATUS 2611 + #define DPCSRX_REG_ERROR_STATUS__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 2612 + #define DPCSRX_REG_ERROR_STATUS__DPCS_REG_ERROR_CLR__SHIFT 0x1 2613 + #define DPCSRX_REG_ERROR_STATUS__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4 2614 + #define DPCSRX_REG_ERROR_STATUS__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L 2615 + #define DPCSRX_REG_ERROR_STATUS__DPCS_REG_ERROR_CLR_MASK 0x00000002L 2616 + #define DPCSRX_REG_ERROR_STATUS__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L 2617 + //DPCSRX_RX_ERROR_STATUS 2618 + #define DPCSRX_RX_ERROR_STATUS__DPCS_RX0_FIFO_ERROR__SHIFT 0x0 2619 + #define DPCSRX_RX_ERROR_STATUS__DPCS_RX1_FIFO_ERROR__SHIFT 0x1 2620 + #define DPCSRX_RX_ERROR_STATUS__DPCS_RX2_FIFO_ERROR__SHIFT 0x2 2621 + #define DPCSRX_RX_ERROR_STATUS__DPCS_RX3_FIFO_ERROR__SHIFT 0x3 2622 + #define DPCSRX_RX_ERROR_STATUS__DPCS_RX_ERROR_CLR__SHIFT 0x8 2623 + #define DPCSRX_RX_ERROR_STATUS__DPCS_RX_FIFO_ERROR_MASK__SHIFT 0xc 2624 + #define DPCSRX_RX_ERROR_STATUS__DPCS_RX0_FIFO_ERROR_MASK 0x00000001L 2625 + #define DPCSRX_RX_ERROR_STATUS__DPCS_RX1_FIFO_ERROR_MASK 0x00000002L 2626 + #define DPCSRX_RX_ERROR_STATUS__DPCS_RX2_FIFO_ERROR_MASK 0x00000004L 2627 + #define DPCSRX_RX_ERROR_STATUS__DPCS_RX3_FIFO_ERROR_MASK 0x00000008L 2628 + #define DPCSRX_RX_ERROR_STATUS__DPCS_RX_ERROR_CLR_MASK 0x00000100L 2629 + #define DPCSRX_RX_ERROR_STATUS__DPCS_RX_FIFO_ERROR_MASK_MASK 0x00001000L 2630 + //DPCSRX_INDEX_MODE_ADDR 2631 + #define DPCSRX_INDEX_MODE_ADDR__DPCS_INDEX_MODE_ADDR__SHIFT 0x0 2632 + #define DPCSRX_INDEX_MODE_ADDR__DPCS_INDEX_MODE_ADDR_MASK 0x0003FFFFL 2633 + //DPCSRX_INDEX_MODE_DATA 2634 + #define DPCSRX_INDEX_MODE_DATA__DPCS_INDEX_MODE_DATA__SHIFT 0x0 2635 + #define DPCSRX_INDEX_MODE_DATA__DPCS_INDEX_MODE_DATA_MASK 0xFFFFFFFFL 2636 + //DPCSRX_DEBUG_CONFIG 2637 + #define DPCSRX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0 2638 + #define DPCSRX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1 2639 + #define DPCSRX_DEBUG_CONFIG__DPCS_DBG_RX_SYMCLK_SEL__SHIFT 0x6 2640 + #define DPCSRX_DEBUG_CONFIG__DPCS_DBG_BLOCK_SEL__SHIFT 0xb 2641 + #define DPCSRX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe 2642 + #define DPCSRX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10 2643 + #define DPCSRX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L 2644 + #define DPCSRX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL 2645 + #define DPCSRX_DEBUG_CONFIG__DPCS_DBG_RX_SYMCLK_SEL_MASK 0x000000C0L 2646 + #define DPCSRX_DEBUG_CONFIG__DPCS_DBG_BLOCK_SEL_MASK 0x00003800L 2647 + #define DPCSRX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L 2648 + #define DPCSRX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L 2649 + 2650 + 2651 + // addressBlock: dpcssys_dpcs0_dpcstx4_dispdec 2652 + //DPCSTX4_DPCSTX_TX_CLOCK_CNTL 2653 + #define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0 2654 + #define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1 2655 + #define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2 2656 + #define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3 2657 + #define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L 2658 + #define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L 2659 + #define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L 2660 + #define DPCSTX4_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L 2661 + //DPCSTX4_DPCSTX_TX_CNTL 2662 + #define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc 2663 + #define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd 2664 + #define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe 2665 + #define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf 2666 + #define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10 2667 + #define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11 2668 + #define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 2669 + #define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f 2670 + #define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L 2671 + #define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L 2672 + #define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L 2673 + #define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L 2674 + #define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L 2675 + #define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L 2676 + #define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L 2677 + #define DPCSTX4_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L 2678 + //DPCSTX4_DPCSTX_CBUS_CNTL 2679 + #define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0 2680 + #define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f 2681 + #define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL 2682 + #define DPCSTX4_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L 2683 + //DPCSTX4_DPCSTX_INTERRUPT_CNTL 2684 + #define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 2685 + #define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1 2686 + #define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4 2687 + #define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8 2688 + #define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9 2689 + #define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa 2690 + #define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb 2691 + #define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc 2692 + #define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10 2693 + #define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14 2694 + #define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L 2695 + #define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L 2696 + #define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L 2697 + #define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L 2698 + #define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L 2699 + #define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L 2700 + #define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L 2701 + #define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L 2702 + #define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L 2703 + #define DPCSTX4_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L 2704 + //DPCSTX4_DPCSTX_PLL_UPDATE_ADDR 2705 + #define DPCSTX4_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0 2706 + #define DPCSTX4_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL 2707 + //DPCSTX4_DPCSTX_PLL_UPDATE_DATA 2708 + #define DPCSTX4_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0 2709 + #define DPCSTX4_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL 2710 + //DPCSTX4_DPCSTX_DEBUG_CONFIG 2711 + #define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0 2712 + #define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1 2713 + #define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4 2714 + #define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8 2715 + #define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe 2716 + #define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10 2717 + #define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L 2718 + #define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL 2719 + #define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L 2720 + #define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L 2721 + #define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L 2722 + #define DPCSTX4_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L 2723 + 2724 + 2725 + // addressBlock: dpcssys_dpcs0_rdpcstx4_dispdec 2726 + //RDPCSTX4_RDPCSTX_CNTL 2727 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0 2728 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4 2729 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc 2730 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd 2731 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe 2732 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf 2733 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10 2734 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11 2735 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 2736 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18 2737 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19 2738 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a 2739 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f 2740 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L 2741 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L 2742 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L 2743 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L 2744 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L 2745 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L 2746 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L 2747 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L 2748 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L 2749 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L 2750 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L 2751 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L 2752 + #define RDPCSTX4_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L 2753 + //RDPCSTX4_RDPCSTX_CLOCK_CNTL 2754 + #define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0 2755 + #define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4 2756 + #define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5 2757 + #define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6 2758 + #define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7 2759 + #define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8 2760 + #define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9 2761 + #define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa 2762 + #define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc 2763 + #define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd 2764 + #define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe 2765 + #define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10 2766 + #define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L 2767 + #define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L 2768 + #define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L 2769 + #define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L 2770 + #define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L 2771 + #define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L 2772 + #define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L 2773 + #define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L 2774 + #define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L 2775 + #define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L 2776 + #define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L 2777 + #define RDPCSTX4_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L 2778 + //RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL 2779 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 2780 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1 2781 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2 2782 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4 2783 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5 2784 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6 2785 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7 2786 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8 2787 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9 2788 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa 2789 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc 2790 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10 2791 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11 2792 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12 2793 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14 2794 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L 2795 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L 2796 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L 2797 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L 2798 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L 2799 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L 2800 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L 2801 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L 2802 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L 2803 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L 2804 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L 2805 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L 2806 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L 2807 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L 2808 + #define RDPCSTX4_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L 2809 + //RDPCSTX4_RDPCSTX_PLL_UPDATE_DATA 2810 + #define RDPCSTX4_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0 2811 + #define RDPCSTX4_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L 2812 + //RDPCSTX4_RDPCS_TX_CR_ADDR 2813 + #define RDPCSTX4_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 2814 + #define RDPCSTX4_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL 2815 + //RDPCSTX4_RDPCS_TX_CR_DATA 2816 + #define RDPCSTX4_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 2817 + #define RDPCSTX4_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL 2818 + //RDPCSTX4_RDPCS_TX_SRAM_CNTL 2819 + #define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14 2820 + #define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18 2821 + #define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c 2822 + #define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L 2823 + #define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L 2824 + #define RDPCSTX4_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L 2825 + //RDPCSTX4_RDPCSTX_MEM_POWER_CTRL 2826 + #define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0 2827 + #define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc 2828 + #define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a 2829 + #define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b 2830 + #define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c 2831 + #define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d 2832 + #define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e 2833 + #define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL 2834 + #define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L 2835 + #define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L 2836 + #define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L 2837 + #define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L 2838 + #define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L 2839 + #define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L 2840 + //RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2 2841 + #define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0 2842 + #define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2 2843 + #define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L 2844 + #define RDPCSTX4_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L 2845 + //RDPCSTX4_RDPCSTX_SCRATCH 2846 + #define RDPCSTX4_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0 2847 + #define RDPCSTX4_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL 2848 + //RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 2849 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0 2850 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4 2851 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8 2852 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L 2853 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L 2854 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L 2855 + //RDPCSTX4_RDPCSTX_DEBUG_CONFIG 2856 + #define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0 2857 + #define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4 2858 + #define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7 2859 + #define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8 2860 + #define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf 2861 + #define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10 2862 + #define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18 2863 + #define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L 2864 + #define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L 2865 + #define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L 2866 + #define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L 2867 + #define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L 2868 + #define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L 2869 + #define RDPCSTX4_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L 2870 + //RDPCSTX4_RDPCSTX_PHY_CNTL0 2871 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0 2872 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1 2873 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2 2874 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3 2875 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4 2876 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8 2877 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9 2878 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe 2879 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11 2880 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12 2881 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14 2882 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15 2883 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18 2884 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19 2885 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c 2886 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d 2887 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f 2888 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L 2889 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L 2890 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L 2891 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L 2892 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L 2893 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L 2894 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L 2895 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L 2896 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L 2897 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L 2898 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L 2899 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L 2900 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L 2901 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L 2902 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L 2903 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L 2904 + #define RDPCSTX4_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L 2905 + //RDPCSTX4_RDPCSTX_PHY_CNTL1 2906 + #define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0 2907 + #define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1 2908 + #define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2 2909 + #define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3 2910 + #define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4 2911 + #define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5 2912 + #define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6 2913 + #define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7 2914 + #define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L 2915 + #define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L 2916 + #define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L 2917 + #define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L 2918 + #define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L 2919 + #define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L 2920 + #define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L 2921 + #define RDPCSTX4_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L 2922 + //RDPCSTX4_RDPCSTX_PHY_CNTL2 2923 + #define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3 2924 + #define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4 2925 + #define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5 2926 + #define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6 2927 + #define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7 2928 + #define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8 2929 + #define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9 2930 + #define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa 2931 + #define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb 2932 + #define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L 2933 + #define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L 2934 + #define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L 2935 + #define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L 2936 + #define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L 2937 + #define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L 2938 + #define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L 2939 + #define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L 2940 + #define RDPCSTX4_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L 2941 + //RDPCSTX4_RDPCSTX_PHY_CNTL3 2942 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0 2943 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1 2944 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2 2945 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3 2946 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4 2947 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5 2948 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8 2949 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9 2950 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa 2951 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb 2952 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc 2953 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd 2954 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10 2955 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11 2956 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12 2957 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13 2958 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14 2959 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15 2960 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18 2961 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19 2962 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a 2963 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b 2964 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c 2965 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d 2966 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L 2967 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L 2968 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L 2969 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L 2970 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L 2971 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L 2972 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L 2973 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L 2974 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L 2975 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L 2976 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L 2977 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L 2978 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L 2979 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L 2980 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L 2981 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L 2982 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L 2983 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L 2984 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L 2985 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L 2986 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L 2987 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L 2988 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L 2989 + #define RDPCSTX4_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L 2990 + //RDPCSTX4_RDPCSTX_PHY_CNTL4 2991 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0 2992 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4 2993 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6 2994 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7 2995 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8 2996 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc 2997 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe 2998 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf 2999 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10 3000 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14 3001 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16 3002 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17 3003 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18 3004 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c 3005 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e 3006 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f 3007 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L 3008 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L 3009 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L 3010 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L 3011 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L 3012 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L 3013 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L 3014 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L 3015 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L 3016 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L 3017 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L 3018 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L 3019 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L 3020 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L 3021 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L 3022 + #define RDPCSTX4_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L 3023 + //RDPCSTX4_RDPCSTX_PHY_CNTL5 3024 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0 3025 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1 3026 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4 3027 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6 3028 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7 3029 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8 3030 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9 3031 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc 3032 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe 3033 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf 3034 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10 3035 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11 3036 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14 3037 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16 3038 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17 3039 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18 3040 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19 3041 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c 3042 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e 3043 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f 3044 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L 3045 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL 3046 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L 3047 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L 3048 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L 3049 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L 3050 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L 3051 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L 3052 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L 3053 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L 3054 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L 3055 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L 3056 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L 3057 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L 3058 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L 3059 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L 3060 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L 3061 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L 3062 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L 3063 + #define RDPCSTX4_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L 3064 + //RDPCSTX4_RDPCSTX_PHY_CNTL6 3065 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0 3066 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2 3067 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4 3068 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6 3069 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8 3070 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa 3071 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc 3072 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe 3073 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10 3074 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11 3075 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12 3076 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13 3077 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14 3078 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L 3079 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L 3080 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L 3081 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L 3082 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L 3083 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L 3084 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L 3085 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L 3086 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L 3087 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L 3088 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L 3089 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L 3090 + #define RDPCSTX4_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L 3091 + //RDPCSTX4_RDPCSTX_PHY_CNTL7 3092 + #define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0 3093 + #define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10 3094 + #define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL 3095 + #define RDPCSTX4_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L 3096 + //RDPCSTX4_RDPCSTX_PHY_CNTL8 3097 + #define RDPCSTX4_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0 3098 + #define RDPCSTX4_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL 3099 + //RDPCSTX4_RDPCSTX_PHY_CNTL9 3100 + #define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0 3101 + #define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18 3102 + #define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL 3103 + #define RDPCSTX4_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L 3104 + //RDPCSTX4_RDPCSTX_PHY_CNTL10 3105 + #define RDPCSTX4_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0 3106 + #define RDPCSTX4_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL 3107 + //RDPCSTX4_RDPCSTX_PHY_CNTL11 3108 + #define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4 3109 + #define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10 3110 + #define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14 3111 + #define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18 3112 + #define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L 3113 + #define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L 3114 + #define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L 3115 + #define RDPCSTX4_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L 3116 + //RDPCSTX4_RDPCSTX_PHY_CNTL12 3117 + #define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0 3118 + #define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2 3119 + #define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4 3120 + #define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7 3121 + #define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8 3122 + #define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L 3123 + #define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L 3124 + #define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L 3125 + #define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L 3126 + #define RDPCSTX4_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L 3127 + //RDPCSTX4_RDPCSTX_PHY_CNTL13 3128 + #define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14 3129 + #define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c 3130 + #define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d 3131 + #define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e 3132 + #define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L 3133 + #define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L 3134 + #define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L 3135 + #define RDPCSTX4_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L 3136 + //RDPCSTX4_RDPCSTX_PHY_CNTL14 3137 + #define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0 3138 + #define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18 3139 + #define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c 3140 + #define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L 3141 + #define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L 3142 + #define RDPCSTX4_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L 3143 + //RDPCSTX4_RDPCSTX_PHY_FUSE0 3144 + #define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0 3145 + #define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6 3146 + #define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc 3147 + #define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12 3148 + #define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14 3149 + #define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL 3150 + #define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L 3151 + #define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L 3152 + #define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L 3153 + #define RDPCSTX4_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L 3154 + //RDPCSTX4_RDPCSTX_PHY_FUSE1 3155 + #define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0 3156 + #define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6 3157 + #define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc 3158 + #define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12 3159 + #define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19 3160 + #define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL 3161 + #define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L 3162 + #define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L 3163 + #define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L 3164 + #define RDPCSTX4_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L 3165 + //RDPCSTX4_RDPCSTX_PHY_FUSE2 3166 + #define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0 3167 + #define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6 3168 + #define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc 3169 + #define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL 3170 + #define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L 3171 + #define RDPCSTX4_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L 3172 + //RDPCSTX4_RDPCSTX_PHY_FUSE3 3173 + #define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0 3174 + #define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6 3175 + #define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc 3176 + #define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12 3177 + #define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18 3178 + #define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL 3179 + #define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L 3180 + #define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L 3181 + #define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L 3182 + #define RDPCSTX4_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L 3183 + //RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL 3184 + #define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0 3185 + #define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8 3186 + #define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL 3187 + #define RDPCSTX4_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L 3188 + //RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3 3189 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0 3190 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1 3191 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2 3192 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3 3193 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4 3194 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5 3195 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8 3196 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9 3197 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa 3198 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb 3199 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc 3200 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd 3201 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10 3202 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11 3203 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12 3204 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13 3205 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14 3206 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15 3207 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18 3208 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19 3209 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a 3210 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b 3211 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c 3212 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d 3213 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L 3214 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L 3215 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L 3216 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L 3217 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L 3218 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L 3219 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L 3220 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L 3221 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L 3222 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L 3223 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L 3224 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L 3225 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L 3226 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L 3227 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L 3228 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L 3229 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L 3230 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L 3231 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L 3232 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L 3233 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L 3234 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L 3235 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L 3236 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L 3237 + //RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6 3238 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0 3239 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2 3240 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4 3241 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6 3242 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8 3243 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa 3244 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc 3245 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe 3246 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10 3247 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11 3248 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12 3249 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13 3250 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14 3251 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L 3252 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L 3253 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L 3254 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L 3255 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L 3256 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L 3257 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L 3258 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L 3259 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L 3260 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L 3261 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L 3262 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L 3263 + #define RDPCSTX4_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L 3264 + //RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG 3265 + #define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0 3266 + #define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4 3267 + #define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8 3268 + #define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L 3269 + #define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L 3270 + #define RDPCSTX4_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L 3271 + 3272 + 3273 + // addressBlock: dpcssys_dpcssys_cr4_dispdec 3274 + //DPCSSYS_CR4_DPCSSYS_CR_ADDR 3275 + #define DPCSSYS_CR4_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 3276 + #define DPCSSYS_CR4_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL 3277 + //DPCSSYS_CR4_DPCSSYS_CR_DATA 3278 + #define DPCSSYS_CR4_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 3279 + #define DPCSSYS_CR4_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL 3280 + 3281 + 3282 + // addressBlock: dpcssys_dpcs0_dpcstx5_dispdec 3283 + //DPCSTX5_DPCSTX_TX_CLOCK_CNTL 3284 + #define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0 3285 + #define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1 3286 + #define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2 3287 + #define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3 3288 + #define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L 3289 + #define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L 3290 + #define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L 3291 + #define DPCSTX5_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L 3292 + //DPCSTX5_DPCSTX_TX_CNTL 3293 + #define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc 3294 + #define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd 3295 + #define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe 3296 + #define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf 3297 + #define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10 3298 + #define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11 3299 + #define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 3300 + #define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f 3301 + #define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L 3302 + #define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L 3303 + #define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L 3304 + #define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L 3305 + #define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L 3306 + #define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L 3307 + #define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L 3308 + #define DPCSTX5_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L 3309 + //DPCSTX5_DPCSTX_CBUS_CNTL 3310 + #define DPCSTX5_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0 3311 + #define DPCSTX5_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f 3312 + #define DPCSTX5_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL 3313 + #define DPCSTX5_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L 3314 + //DPCSTX5_DPCSTX_INTERRUPT_CNTL 3315 + #define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 3316 + #define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1 3317 + #define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4 3318 + #define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8 3319 + #define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9 3320 + #define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa 3321 + #define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb 3322 + #define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc 3323 + #define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10 3324 + #define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14 3325 + #define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L 3326 + #define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L 3327 + #define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L 3328 + #define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L 3329 + #define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L 3330 + #define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L 3331 + #define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L 3332 + #define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L 3333 + #define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L 3334 + #define DPCSTX5_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L 3335 + //DPCSTX5_DPCSTX_PLL_UPDATE_ADDR 3336 + #define DPCSTX5_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0 3337 + #define DPCSTX5_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL 3338 + //DPCSTX5_DPCSTX_PLL_UPDATE_DATA 3339 + #define DPCSTX5_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0 3340 + #define DPCSTX5_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL 3341 + //DPCSTX5_DPCSTX_DEBUG_CONFIG 3342 + #define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0 3343 + #define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1 3344 + #define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4 3345 + #define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8 3346 + #define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe 3347 + #define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10 3348 + #define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L 3349 + #define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL 3350 + #define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L 3351 + #define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L 3352 + #define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L 3353 + #define DPCSTX5_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L 3354 + 3355 + 3356 + // addressBlock: dpcssys_dpcs0_rdpcstx5_dispdec 3357 + //RDPCSTX5_RDPCSTX_CNTL 3358 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0 3359 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4 3360 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc 3361 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd 3362 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe 3363 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf 3364 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10 3365 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11 3366 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14 3367 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN__SHIFT 0x18 3368 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN__SHIFT 0x19 3369 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS__SHIFT 0x1a 3370 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f 3371 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L 3372 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L 3373 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L 3374 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L 3375 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L 3376 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L 3377 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L 3378 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L 3379 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L 3380 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_CR_REGISTER_BLOCK_EN_MASK 0x01000000L 3381 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_NON_DPALT_REGISTER_BLOCK_EN_MASK 0x02000000L 3382 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_DPALT_BLOCK_STATUS_MASK 0x04000000L 3383 + #define RDPCSTX5_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L 3384 + //RDPCSTX5_RDPCSTX_CLOCK_CNTL 3385 + #define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0 3386 + #define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4 3387 + #define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5 3388 + #define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6 3389 + #define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7 3390 + #define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8 3391 + #define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9 3392 + #define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa 3393 + #define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc 3394 + #define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd 3395 + #define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe 3396 + #define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10 3397 + #define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L 3398 + #define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L 3399 + #define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L 3400 + #define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L 3401 + #define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L 3402 + #define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L 3403 + #define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L 3404 + #define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L 3405 + #define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L 3406 + #define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L 3407 + #define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L 3408 + #define RDPCSTX5_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L 3409 + //RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL 3410 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0 3411 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1 3412 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2 3413 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4 3414 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5 3415 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6 3416 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7 3417 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8 3418 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9 3419 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa 3420 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc 3421 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10 3422 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11 3423 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12 3424 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14 3425 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L 3426 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L 3427 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L 3428 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L 3429 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L 3430 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L 3431 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L 3432 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L 3433 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L 3434 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L 3435 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L 3436 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L 3437 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L 3438 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L 3439 + #define RDPCSTX5_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L 3440 + //RDPCSTX5_RDPCSTX_PLL_UPDATE_DATA 3441 + #define RDPCSTX5_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0 3442 + #define RDPCSTX5_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L 3443 + //RDPCSTX5_RDPCS_TX_CR_ADDR 3444 + #define RDPCSTX5_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 3445 + #define RDPCSTX5_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL 3446 + //RDPCSTX5_RDPCS_TX_CR_DATA 3447 + #define RDPCSTX5_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 3448 + #define RDPCSTX5_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL 3449 + //RDPCSTX5_RDPCS_TX_SRAM_CNTL 3450 + #define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14 3451 + #define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18 3452 + #define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c 3453 + #define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L 3454 + #define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L 3455 + #define RDPCSTX5_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L 3456 + //RDPCSTX5_RDPCSTX_MEM_POWER_CTRL 3457 + #define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES__SHIFT 0x0 3458 + #define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES__SHIFT 0xc 3459 + #define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1__SHIFT 0x1a 3460 + #define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2__SHIFT 0x1b 3461 + #define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1__SHIFT 0x1c 3462 + #define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2__SHIFT 0x1d 3463 + #define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM__SHIFT 0x1e 3464 + #define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_RM_FUSES_MASK 0x00000FFFL 3465 + #define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_FUSE_CUSTOM_RM_FUSES_MASK 0x03FFF000L 3466 + #define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC1_MASK 0x04000000L 3467 + #define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_PDP_BC2_MASK 0x08000000L 3468 + #define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC1_MASK 0x10000000L 3469 + #define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_MEM_POWER_CTRL_HD_BC2_MASK 0x20000000L 3470 + #define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL__RDPCS_LIVMIN_DIS_SRAM_MASK 0x40000000L 3471 + //RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2 3472 + #define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF__SHIFT 0x0 3473 + #define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO__SHIFT 0x2 3474 + #define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_POFF_MASK 0x00000003L 3475 + #define RDPCSTX5_RDPCSTX_MEM_POWER_CTRL2__RDPCS_MEM_POWER_CTRL_FISO_MASK 0x00000004L 3476 + //RDPCSTX5_RDPCSTX_SCRATCH 3477 + #define RDPCSTX5_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0 3478 + #define RDPCSTX5_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL 3479 + //RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG 3480 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG__SHIFT 0x0 3481 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS__SHIFT 0x4 3482 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE__SHIFT 0x8 3483 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_DIS_BLOCK_REG_MASK 0x00000001L 3484 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_FORCE_SYMCLK_DIV2_DIS_MASK 0x00000010L 3485 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_DIS_BLOCK_REG__RDPCS_DMCU_DPALT_CONTROL_SPARE_MASK 0x0000FF00L 3486 + //RDPCSTX5_RDPCSTX_DEBUG_CONFIG 3487 + #define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN__SHIFT 0x0 3488 + #define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT__SHIFT 0x4 3489 + #define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP__SHIFT 0x7 3490 + #define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK__SHIFT 0x8 3491 + #define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE__SHIFT 0xf 3492 + #define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX__SHIFT 0x10 3493 + #define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT__SHIFT 0x18 3494 + #define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_EN_MASK 0x00000001L 3495 + #define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_8BIT_MASK 0x00000070L 3496 + #define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_ASYNC_SWAP_MASK 0x00000080L 3497 + #define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_SEL_TEST_CLK_MASK 0x00001F00L 3498 + #define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_EXPIRE_MASK 0x00008000L 3499 + #define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MAX_MASK 0x00FF0000L 3500 + #define RDPCSTX5_RDPCSTX_DEBUG_CONFIG__RDPCS_DBG_CR_COUNT_MASK 0xFF000000L 3501 + //RDPCSTX5_RDPCSTX_PHY_CNTL0 3502 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0 3503 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1 3504 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2 3505 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3 3506 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4 3507 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8 3508 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9 3509 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe 3510 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11 3511 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12 3512 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14 3513 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15 3514 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18 3515 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19 3516 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c 3517 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d 3518 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f 3519 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L 3520 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L 3521 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L 3522 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L 3523 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L 3524 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L 3525 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L 3526 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L 3527 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L 3528 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L 3529 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L 3530 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L 3531 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L 3532 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L 3533 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L 3534 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L 3535 + #define RDPCSTX5_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L 3536 + //RDPCSTX5_RDPCSTX_PHY_CNTL1 3537 + #define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0 3538 + #define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1 3539 + #define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2 3540 + #define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3 3541 + #define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4 3542 + #define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5 3543 + #define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6 3544 + #define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7 3545 + #define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L 3546 + #define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L 3547 + #define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L 3548 + #define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L 3549 + #define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L 3550 + #define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L 3551 + #define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L 3552 + #define RDPCSTX5_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L 3553 + //RDPCSTX5_RDPCSTX_PHY_CNTL2 3554 + #define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3 3555 + #define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4 3556 + #define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5 3557 + #define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6 3558 + #define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7 3559 + #define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8 3560 + #define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9 3561 + #define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa 3562 + #define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb 3563 + #define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L 3564 + #define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L 3565 + #define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L 3566 + #define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L 3567 + #define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L 3568 + #define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L 3569 + #define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L 3570 + #define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L 3571 + #define RDPCSTX5_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L 3572 + //RDPCSTX5_RDPCSTX_PHY_CNTL3 3573 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0 3574 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1 3575 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2 3576 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3 3577 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4 3578 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5 3579 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8 3580 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9 3581 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa 3582 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb 3583 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc 3584 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd 3585 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10 3586 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11 3587 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12 3588 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13 3589 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14 3590 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15 3591 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18 3592 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19 3593 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a 3594 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b 3595 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c 3596 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d 3597 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L 3598 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L 3599 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L 3600 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L 3601 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L 3602 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L 3603 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L 3604 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L 3605 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L 3606 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L 3607 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L 3608 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L 3609 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L 3610 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L 3611 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L 3612 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L 3613 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L 3614 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L 3615 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L 3616 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L 3617 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L 3618 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L 3619 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L 3620 + #define RDPCSTX5_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L 3621 + //RDPCSTX5_RDPCSTX_PHY_CNTL4 3622 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0 3623 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4 3624 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6 3625 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7 3626 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8 3627 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc 3628 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe 3629 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf 3630 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10 3631 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14 3632 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16 3633 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17 3634 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18 3635 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c 3636 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e 3637 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f 3638 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L 3639 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L 3640 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L 3641 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L 3642 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L 3643 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L 3644 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L 3645 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L 3646 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L 3647 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L 3648 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L 3649 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L 3650 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L 3651 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L 3652 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L 3653 + #define RDPCSTX5_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L 3654 + //RDPCSTX5_RDPCSTX_PHY_CNTL5 3655 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x0 3656 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x1 3657 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x4 3658 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0x6 3659 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0x7 3660 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x8 3661 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x9 3662 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0xc 3663 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0xe 3664 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0xf 3665 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x10 3666 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x11 3667 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x14 3668 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0x16 3669 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0x17 3670 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x18 3671 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x19 3672 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x1c 3673 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1e 3674 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1f 3675 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000001L 3676 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x0000000EL 3677 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000030L 3678 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000040L 3679 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00000080L 3680 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00000100L 3681 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00000E00L 3682 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x00003000L 3683 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x00004000L 3684 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x00008000L 3685 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_LPD_MASK 0x00010000L 3686 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_RATE_MASK 0x000E0000L 3687 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00300000L 3688 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00400000L 3689 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00800000L 3690 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_LPD_MASK 0x01000000L 3691 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_RATE_MASK 0x0E000000L 3692 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x30000000L 3693 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x40000000L 3694 + #define RDPCSTX5_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x80000000L 3695 + //RDPCSTX5_RDPCSTX_PHY_CNTL6 3696 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x0 3697 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0x2 3698 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x4 3699 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x6 3700 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x8 3701 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa 3702 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0xc 3703 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0xe 3704 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4__SHIFT 0x10 3705 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x11 3706 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x12 3707 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x13 3708 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x14 3709 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x00000003L 3710 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000004L 3711 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x00000030L 3712 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x00000040L 3713 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x00000300L 3714 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L 3715 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x00003000L 3716 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x00004000L 3717 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_MASK 0x00010000L 3718 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_MASK 0x00020000L 3719 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00040000L 3720 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00080000L 3721 + #define RDPCSTX5_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00100000L 3722 + //RDPCSTX5_RDPCSTX_PHY_CNTL7 3723 + #define RDPCSTX5_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0 3724 + #define RDPCSTX5_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10 3725 + #define RDPCSTX5_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL 3726 + #define RDPCSTX5_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L 3727 + //RDPCSTX5_RDPCSTX_PHY_CNTL8 3728 + #define RDPCSTX5_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0 3729 + #define RDPCSTX5_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL 3730 + //RDPCSTX5_RDPCSTX_PHY_CNTL9 3731 + #define RDPCSTX5_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0 3732 + #define RDPCSTX5_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18 3733 + #define RDPCSTX5_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL 3734 + #define RDPCSTX5_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L 3735 + //RDPCSTX5_RDPCSTX_PHY_CNTL10 3736 + #define RDPCSTX5_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0 3737 + #define RDPCSTX5_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL 3738 + //RDPCSTX5_RDPCSTX_PHY_CNTL11 3739 + #define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4 3740 + #define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10 3741 + #define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14 3742 + #define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18 3743 + #define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L 3744 + #define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L 3745 + #define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L 3746 + #define RDPCSTX5_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L 3747 + //RDPCSTX5_RDPCSTX_PHY_CNTL12 3748 + #define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0 3749 + #define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2 3750 + #define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4 3751 + #define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7 3752 + #define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8 3753 + #define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L 3754 + #define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L 3755 + #define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L 3756 + #define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L 3757 + #define RDPCSTX5_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L 3758 + //RDPCSTX5_RDPCSTX_PHY_CNTL13 3759 + #define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14 3760 + #define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c 3761 + #define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d 3762 + #define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e 3763 + #define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L 3764 + #define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L 3765 + #define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L 3766 + #define RDPCSTX5_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L 3767 + //RDPCSTX5_RDPCSTX_PHY_CNTL14 3768 + #define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0 3769 + #define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18 3770 + #define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c 3771 + #define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L 3772 + #define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L 3773 + #define RDPCSTX5_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L 3774 + //RDPCSTX5_RDPCSTX_PHY_FUSE0 3775 + #define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0 3776 + #define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6 3777 + #define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc 3778 + #define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12 3779 + #define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14 3780 + #define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL 3781 + #define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L 3782 + #define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L 3783 + #define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L 3784 + #define RDPCSTX5_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L 3785 + //RDPCSTX5_RDPCSTX_PHY_FUSE1 3786 + #define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0 3787 + #define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6 3788 + #define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc 3789 + #define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12 3790 + #define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19 3791 + #define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL 3792 + #define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L 3793 + #define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L 3794 + #define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L 3795 + #define RDPCSTX5_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L 3796 + //RDPCSTX5_RDPCSTX_PHY_FUSE2 3797 + #define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0 3798 + #define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6 3799 + #define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc 3800 + #define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL 3801 + #define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L 3802 + #define RDPCSTX5_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L 3803 + //RDPCSTX5_RDPCSTX_PHY_FUSE3 3804 + #define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0 3805 + #define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6 3806 + #define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc 3807 + #define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12 3808 + #define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18 3809 + #define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL 3810 + #define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L 3811 + #define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L 3812 + #define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L 3813 + #define RDPCSTX5_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L 3814 + //RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL 3815 + #define RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0 3816 + #define RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8 3817 + #define RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL 3818 + #define RDPCSTX5_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L 3819 + //RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3 3820 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED__SHIFT 0x0 3821 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED__SHIFT 0x1 3822 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED__SHIFT 0x2 3823 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED__SHIFT 0x3 3824 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED__SHIFT 0x4 3825 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED__SHIFT 0x5 3826 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED__SHIFT 0x8 3827 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED__SHIFT 0x9 3828 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED__SHIFT 0xa 3829 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED__SHIFT 0xb 3830 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED__SHIFT 0xc 3831 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED__SHIFT 0xd 3832 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED__SHIFT 0x10 3833 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED__SHIFT 0x11 3834 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED__SHIFT 0x12 3835 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED__SHIFT 0x13 3836 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED__SHIFT 0x14 3837 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED__SHIFT 0x15 3838 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED__SHIFT 0x18 3839 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED__SHIFT 0x19 3840 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED__SHIFT 0x1a 3841 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED__SHIFT 0x1b 3842 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED__SHIFT 0x1c 3843 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED__SHIFT 0x1d 3844 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_RESERVED_MASK 0x00000001L 3845 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_RESERVED_MASK 0x00000002L 3846 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_RESERVED_MASK 0x00000004L 3847 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_RESERVED_MASK 0x00000008L 3848 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_RESERVED_MASK 0x00000010L 3849 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_RESERVED_MASK 0x00000020L 3850 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_RESERVED_MASK 0x00000100L 3851 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_RESERVED_MASK 0x00000200L 3852 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_RESERVED_MASK 0x00000400L 3853 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_RESERVED_MASK 0x00000800L 3854 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_RESERVED_MASK 0x00001000L 3855 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_RESERVED_MASK 0x00002000L 3856 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_RESERVED_MASK 0x00010000L 3857 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_RESERVED_MASK 0x00020000L 3858 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_RESERVED_MASK 0x00040000L 3859 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_RESERVED_MASK 0x00080000L 3860 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_RESERVED_MASK 0x00100000L 3861 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_RESERVED_MASK 0x00200000L 3862 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_RESERVED_MASK 0x01000000L 3863 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_RESERVED_MASK 0x02000000L 3864 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_RESERVED_MASK 0x04000000L 3865 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_RESERVED_MASK 0x08000000L 3866 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_RESERVED_MASK 0x10000000L 3867 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_RESERVED_MASK 0x20000000L 3868 + //RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6 3869 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED__SHIFT 0x0 3870 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED__SHIFT 0x2 3871 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED__SHIFT 0x4 3872 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED__SHIFT 0x6 3873 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED__SHIFT 0x8 3874 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED__SHIFT 0xa 3875 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED__SHIFT 0xc 3876 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED__SHIFT 0xe 3877 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED__SHIFT 0x10 3878 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED__SHIFT 0x11 3879 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED__SHIFT 0x12 3880 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED__SHIFT 0x13 3881 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED__SHIFT 0x14 3882 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_PSTATE_RESERVED_MASK 0x00000003L 3883 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX0_MPLL_EN_RESERVED_MASK 0x00000004L 3884 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_PSTATE_RESERVED_MASK 0x00000030L 3885 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX1_MPLL_EN_RESERVED_MASK 0x00000040L 3886 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_RESERVED_MASK 0x00000300L 3887 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_RESERVED_MASK 0x00000400L 3888 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_RESERVED_MASK 0x00003000L 3889 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_RESERVED_MASK 0x00004000L 3890 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DP4_RESERVED_MASK 0x00010000L 3891 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_RESERVED_MASK 0x00020000L 3892 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DPALT_DISABLE_ACK_RESERVED_MASK 0x00040000L 3893 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_EN_RESERVED_MASK 0x00080000L 3894 + #define RDPCSTX5_RDPCSTX_DMCU_DPALT_PHY_CNTL6__RDPCS_PHY_DP_REF_CLK_REQ_RESERVED_MASK 0x00100000L 3895 + //RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG 3896 + #define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS__SHIFT 0x0 3897 + #define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED__SHIFT 0x4 3898 + #define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE__SHIFT 0x8 3899 + #define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_ALLOW_DRIVER_ACCESS_MASK 0x00000001L 3900 + #define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DRIVER_ACCESS_BLOCKED_MASK 0x00000010L 3901 + #define RDPCSTX5_RDPCSTX_DPALT_CONTROL_REG__RDPCS_DPALT_CONTROL_SPARE_MASK 0x0000FF00L 3902 + 3903 + 3904 + // addressBlock: dpcssys_dpcssys_cr5_dispdec 3905 + //DPCSSYS_CR5_DPCSSYS_CR_ADDR 3906 + #define DPCSSYS_CR5_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0 3907 + #define DPCSSYS_CR5_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL 3908 + //DPCSSYS_CR5_DPCSSYS_CR_DATA 3909 + #define DPCSSYS_CR5_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0 3910 + #define DPCSSYS_CR5_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL 3911 + 3912 + #endif
+8
drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h
··· 2224 2224 #define mmCOMPUTE_STATIC_THREAD_MGMT_SE2_BASE_IDX 0 2225 2225 #define mmCOMPUTE_STATIC_THREAD_MGMT_SE3 0x0e1a 2226 2226 #define mmCOMPUTE_STATIC_THREAD_MGMT_SE3_BASE_IDX 0 2227 + #define mmCOMPUTE_STATIC_THREAD_MGMT_SE4 0x0e25 2228 + #define mmCOMPUTE_STATIC_THREAD_MGMT_SE4_BASE_IDX 0 2229 + #define mmCOMPUTE_STATIC_THREAD_MGMT_SE5 0x0e26 2230 + #define mmCOMPUTE_STATIC_THREAD_MGMT_SE5_BASE_IDX 0 2231 + #define mmCOMPUTE_STATIC_THREAD_MGMT_SE6 0x0e27 2232 + #define mmCOMPUTE_STATIC_THREAD_MGMT_SE6_BASE_IDX 0 2233 + #define mmCOMPUTE_STATIC_THREAD_MGMT_SE7 0x0e28 2234 + #define mmCOMPUTE_STATIC_THREAD_MGMT_SE7_BASE_IDX 0 2227 2235 #define mmCOMPUTE_RESTART_X 0x0e1b 2228 2236 #define mmCOMPUTE_RESTART_X_BASE_IDX 0 2229 2237 #define mmCOMPUTE_RESTART_Y 0x0e1c
+31
drivers/gpu/drm/amd/include/asic_reg/umc/umc_6_1_2_offset.h
··· 1 + /* 2 + * Copyright (C) 2019 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included 12 + * in all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 15 + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN 18 + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 19 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 20 + */ 21 + #ifndef _umc_6_1_2_OFFSET_HEADER 22 + #define _umc_6_1_2_OFFSET_HEADER 23 + 24 + #define mmUMCCH0_0_EccErrCntSel_ARCT 0x0360 25 + #define mmUMCCH0_0_EccErrCntSel_ARCT_BASE_IDX 1 26 + #define mmUMCCH0_0_EccErrCnt_ARCT 0x0361 27 + #define mmUMCCH0_0_EccErrCnt_ARCT_BASE_IDX 1 28 + #define mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT 0x03c2 29 + #define mmMCA_UMC_UMC0_MCUMC_STATUST0_ARCT_BASE_IDX 1 30 + 31 + #endif
-14
drivers/gpu/drm/amd/include/atomfirmware.h
··· 672 672 uint16_t used_by_driver_in_kb; 673 673 }; 674 674 675 - /* This is part of vram_usagebyfirmware_v2_1 */ 676 - struct vram_reserve_block 677 - { 678 - uint32_t start_address_in_kb; 679 - uint16_t used_by_firmware_in_kb; 680 - uint16_t used_by_driver_in_kb; 681 - }; 682 - 683 - /* Definitions for constance */ 684 - enum atomfirmware_internal_constants 685 - { 686 - ONE_KiB = 0x400, 687 - ONE_MiB = 0x100000, 688 - }; 689 675 690 676 /* 691 677 ***************************************************************************
+4 -4
drivers/gpu/drm/amd/powerplay/amd_powerplay.c
··· 275 275 { 276 276 struct pp_hwmgr *hwmgr = handle; 277 277 278 - if (!hwmgr->not_vf) 279 - return 0; 280 - 281 278 if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu) 282 279 return -EINVAL; 283 280 ··· 927 930 { 928 931 struct pp_hwmgr *hwmgr = handle; 929 932 930 - if (!hwmgr || !hwmgr->pm_en) 933 + if (!hwmgr) 931 934 return -EINVAL; 935 + 936 + if (!hwmgr->pm_en) 937 + return 0; 932 938 933 939 if (hwmgr->hwmgr_func->set_mp1_state) 934 940 return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
+185 -154
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
··· 356 356 return smu_get_dpm_freq_by_index(smu, clk_type, 0xff, value); 357 357 } 358 358 359 + int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type, 360 + uint32_t *min_value, uint32_t *max_value) 361 + { 362 + int ret = 0; 363 + uint32_t level_count = 0; 364 + 365 + if (!min_value && !max_value) 366 + return -EINVAL; 367 + 368 + if (min_value) { 369 + /* by default, level 0 clock value as min value */ 370 + ret = smu_get_dpm_freq_by_index(smu, clk_type, 0, min_value); 371 + if (ret) 372 + return ret; 373 + } 374 + 375 + if (max_value) { 376 + ret = smu_get_dpm_level_count(smu, clk_type, &level_count); 377 + if (ret) 378 + return ret; 379 + 380 + ret = smu_get_dpm_freq_by_index(smu, clk_type, level_count - 1, max_value); 381 + if (ret) 382 + return ret; 383 + } 384 + 385 + return ret; 386 + } 387 + 359 388 bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type) 360 389 { 361 390 enum smu_feature_mask feature_id = 0; ··· 433 404 434 405 switch (block_type) { 435 406 case AMD_IP_BLOCK_TYPE_UVD: 436 - ret = smu_dpm_set_uvd_enable(smu, gate); 407 + ret = smu_dpm_set_uvd_enable(smu, !gate); 437 408 break; 438 409 case AMD_IP_BLOCK_TYPE_VCE: 439 - ret = smu_dpm_set_vce_enable(smu, gate); 410 + ret = smu_dpm_set_vce_enable(smu, !gate); 440 411 break; 441 412 case AMD_IP_BLOCK_TYPE_GFX: 442 413 ret = smu_gfx_off_control(smu, gate); ··· 445 416 ret = smu_powergate_sdma(smu, gate); 446 417 break; 447 418 case AMD_IP_BLOCK_TYPE_JPEG: 448 - ret = smu_dpm_set_jpeg_enable(smu, gate); 419 + ret = smu_dpm_set_jpeg_enable(smu, !gate); 449 420 break; 450 421 default: 451 422 break; ··· 519 490 { 520 491 struct smu_table_context *smu_table = &smu->smu_table; 521 492 struct amdgpu_device *adev = smu->adev; 522 - struct smu_table *table = NULL; 523 - int ret = 0; 493 + struct smu_table *table = &smu_table->driver_table; 524 494 int table_id = smu_table_get_index(smu, table_index); 495 + uint32_t table_size; 496 + int ret = 0; 525 497 526 498 if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0) 527 499 return -EINVAL; 528 500 529 - table = &smu_table->tables[table_index]; 501 + table_size = smu_table->tables[table_index].size; 530 502 531 - if (drv2smu) 532 - memcpy(table->cpu_addr, table_data, table->size); 503 + if (drv2smu) { 504 + memcpy(table->cpu_addr, table_data, table_size); 505 + /* 506 + * Flush hdp cache: to guard the content seen by 507 + * GPU is consitent with CPU. 508 + */ 509 + amdgpu_asic_flush_hdp(adev, NULL); 510 + } 533 511 534 - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrHigh, 535 - upper_32_bits(table->mc_address)); 536 - if (ret) 537 - return ret; 538 - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetDriverDramAddrLow, 539 - lower_32_bits(table->mc_address)); 540 - if (ret) 541 - return ret; 542 512 ret = smu_send_smc_msg_with_param(smu, drv2smu ? 543 513 SMU_MSG_TransferTableDram2Smu : 544 514 SMU_MSG_TransferTableSmu2Dram, ··· 545 517 if (ret) 546 518 return ret; 547 519 548 - /* flush hdp cache */ 549 - adev->nbio.funcs->hdp_flush(adev, NULL); 550 - 551 - if (!drv2smu) 552 - memcpy(table_data, table->cpu_addr, table->size); 520 + if (!drv2smu) { 521 + amdgpu_asic_flush_hdp(adev, NULL); 522 + memcpy(table_data, table->cpu_addr, table_size); 523 + } 553 524 554 525 return ret; 555 526 } ··· 558 531 if (adev->asic_type == CHIP_VEGA20) 559 532 return (amdgpu_dpm == 2) ? true : false; 560 533 else if (adev->asic_type >= CHIP_ARCTURUS) { 561 - if (amdgpu_sriov_vf(adev)) 534 + if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 562 535 return false; 563 536 else 564 537 return true; ··· 670 643 671 644 int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask) 672 645 { 673 - struct amdgpu_device *adev = smu->adev; 674 646 struct smu_feature *feature = &smu->smu_feature; 675 647 int feature_id; 676 648 int ret = 0; 677 649 678 - if (adev->flags & AMD_IS_APU) 650 + if (smu->is_apu) 679 651 return 1; 680 652 681 653 feature_id = smu_feature_get_index(smu, mask); ··· 898 872 smu->smu_baco.platform_support = false; 899 873 900 874 mutex_init(&smu->sensor_lock); 875 + mutex_init(&smu->metrics_lock); 901 876 902 877 smu->watermarks_bitmap = 0; 903 878 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; ··· 974 947 struct amdgpu_device *adev = smu->adev; 975 948 struct smu_table_context *smu_table = &smu->smu_table; 976 949 struct smu_table *tables = smu_table->tables; 950 + struct smu_table *driver_table = &(smu_table->driver_table); 951 + uint32_t max_table_size = 0; 977 952 int ret, i; 978 953 954 + /* VRAM allocation for tool table */ 955 + if (tables[SMU_TABLE_PMSTATUSLOG].size) { 956 + ret = amdgpu_bo_create_kernel(adev, 957 + tables[SMU_TABLE_PMSTATUSLOG].size, 958 + tables[SMU_TABLE_PMSTATUSLOG].align, 959 + tables[SMU_TABLE_PMSTATUSLOG].domain, 960 + &tables[SMU_TABLE_PMSTATUSLOG].bo, 961 + &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 962 + &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 963 + if (ret) { 964 + pr_err("VRAM allocation for tool table failed!\n"); 965 + return ret; 966 + } 967 + } 968 + 969 + /* VRAM allocation for driver table */ 979 970 for (i = 0; i < SMU_TABLE_COUNT; i++) { 980 971 if (tables[i].size == 0) 981 972 continue; 982 - ret = amdgpu_bo_create_kernel(adev, 983 - tables[i].size, 984 - tables[i].align, 985 - tables[i].domain, 986 - &tables[i].bo, 987 - &tables[i].mc_address, 988 - &tables[i].cpu_addr); 989 - if (ret) 990 - goto failed; 991 - } 992 973 993 - return 0; 994 - failed: 995 - while (--i >= 0) { 996 - if (tables[i].size == 0) 974 + if (i == SMU_TABLE_PMSTATUSLOG) 997 975 continue; 998 - amdgpu_bo_free_kernel(&tables[i].bo, 999 - &tables[i].mc_address, 1000 - &tables[i].cpu_addr); 1001 976 977 + if (max_table_size < tables[i].size) 978 + max_table_size = tables[i].size; 1002 979 } 980 + 981 + driver_table->size = max_table_size; 982 + driver_table->align = PAGE_SIZE; 983 + driver_table->domain = AMDGPU_GEM_DOMAIN_VRAM; 984 + 985 + ret = amdgpu_bo_create_kernel(adev, 986 + driver_table->size, 987 + driver_table->align, 988 + driver_table->domain, 989 + &driver_table->bo, 990 + &driver_table->mc_address, 991 + &driver_table->cpu_addr); 992 + if (ret) { 993 + pr_err("VRAM allocation for driver table failed!\n"); 994 + if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 995 + amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 996 + &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 997 + &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 998 + } 999 + 1003 1000 return ret; 1004 1001 } 1005 1002 ··· 1031 980 { 1032 981 struct smu_table_context *smu_table = &smu->smu_table; 1033 982 struct smu_table *tables = smu_table->tables; 1034 - uint32_t i = 0; 983 + struct smu_table *driver_table = &(smu_table->driver_table); 1035 984 1036 985 if (!tables) 1037 986 return 0; 1038 987 1039 - for (i = 0; i < SMU_TABLE_COUNT; i++) { 1040 - if (tables[i].size == 0) 1041 - continue; 1042 - amdgpu_bo_free_kernel(&tables[i].bo, 1043 - &tables[i].mc_address, 1044 - &tables[i].cpu_addr); 1045 - } 988 + if (tables[SMU_TABLE_PMSTATUSLOG].mc_address) 989 + amdgpu_bo_free_kernel(&tables[SMU_TABLE_PMSTATUSLOG].bo, 990 + &tables[SMU_TABLE_PMSTATUSLOG].mc_address, 991 + &tables[SMU_TABLE_PMSTATUSLOG].cpu_addr); 992 + 993 + amdgpu_bo_free_kernel(&driver_table->bo, 994 + &driver_table->mc_address, 995 + &driver_table->cpu_addr); 1046 996 1047 997 return 0; 1048 998 } ··· 1113 1061 } 1114 1062 1115 1063 /* smu_dump_pptable(smu); */ 1064 + if (!amdgpu_sriov_vf(adev)) { 1065 + ret = smu_set_driver_table_location(smu); 1066 + if (ret) 1067 + return ret; 1116 1068 1117 - /* 1118 - * Copy pptable bo in the vram to smc with SMU MSGs such as 1119 - * SetDriverDramAddr and TransferTableDram2Smu. 1120 - */ 1121 - ret = smu_write_pptable(smu); 1122 - if (ret) 1123 - return ret; 1069 + /* 1070 + * Copy pptable bo in the vram to smc with SMU MSGs such as 1071 + * SetDriverDramAddr and TransferTableDram2Smu. 1072 + */ 1073 + ret = smu_write_pptable(smu); 1074 + if (ret) 1075 + return ret; 1124 1076 1125 - /* issue Run*Btc msg */ 1126 - ret = smu_run_btc(smu); 1127 - if (ret) 1128 - return ret; 1077 + /* issue Run*Btc msg */ 1078 + ret = smu_run_btc(smu); 1079 + if (ret) 1080 + return ret; 1081 + ret = smu_feature_set_allowed_mask(smu); 1082 + if (ret) 1083 + return ret; 1129 1084 1130 - ret = smu_feature_set_allowed_mask(smu); 1131 - if (ret) 1132 - return ret; 1133 - 1134 - ret = smu_system_features_control(smu, true); 1135 - if (ret) 1136 - return ret; 1137 - 1085 + ret = smu_system_features_control(smu, true); 1086 + if (ret) 1087 + return ret; 1088 + } 1138 1089 if (adev->asic_type != CHIP_ARCTURUS) { 1139 1090 ret = smu_notify_display_change(smu); 1140 1091 if (ret) ··· 1190 1135 /* 1191 1136 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools. 1192 1137 */ 1193 - ret = smu_set_tool_table_location(smu); 1194 - 1138 + if (!amdgpu_sriov_vf(adev)) { 1139 + ret = smu_set_tool_table_location(smu); 1140 + } 1195 1141 if (!smu_is_dpm_running(smu)) 1196 1142 pr_info("dpm has been disabled\n"); 1197 1143 ··· 1297 1241 return ret; 1298 1242 } 1299 1243 1300 - if (adev->flags & AMD_IS_APU) { 1244 + if (smu->is_apu) { 1301 1245 smu_powergate_sdma(&adev->smu, false); 1302 1246 smu_powergate_vcn(&adev->smu, false); 1303 1247 smu_powergate_jpeg(&adev->smu, false); 1304 1248 smu_set_gfx_cgpg(&adev->smu, true); 1305 1249 } 1250 + 1251 + if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1252 + return 0; 1306 1253 1307 1254 if (!smu->pm_enabled) 1308 1255 return 0; ··· 1349 1290 1350 1291 static int smu_stop_dpms(struct smu_context *smu) 1351 1292 { 1352 - return smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures); 1293 + return smu_system_features_control(smu, false); 1353 1294 } 1354 1295 1355 1296 static int smu_hw_fini(void *handle) ··· 1359 1300 struct smu_table_context *table_context = &smu->smu_table; 1360 1301 int ret = 0; 1361 1302 1362 - if (adev->flags & AMD_IS_APU) { 1303 + if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1304 + return 0; 1305 + 1306 + if (smu->is_apu) { 1363 1307 smu_powergate_sdma(&adev->smu, true); 1364 1308 smu_powergate_vcn(&adev->smu, true); 1365 1309 smu_powergate_jpeg(&adev->smu, true); 1366 1310 } 1367 1311 1368 - ret = smu_stop_thermal_control(smu); 1369 - if (ret) { 1370 - pr_warn("Fail to stop thermal control!\n"); 1371 - return ret; 1372 - } 1312 + if (!smu->pm_enabled) 1313 + return 0; 1373 1314 1374 - /* 1375 - * For custom pptable uploading, skip the DPM features 1376 - * disable process on Navi1x ASICs. 1377 - * - As the gfx related features are under control of 1378 - * RLC on those ASICs. RLC reinitialization will be 1379 - * needed to reenable them. That will cost much more 1380 - * efforts. 1381 - * 1382 - * - SMU firmware can handle the DPM reenablement 1383 - * properly. 1384 - */ 1385 - if (!smu->uploading_custom_pp_table || 1386 - !((adev->asic_type >= CHIP_NAVI10) && 1387 - (adev->asic_type <= CHIP_NAVI12))) { 1388 - ret = smu_stop_dpms(smu); 1315 + if (!amdgpu_sriov_vf(adev)){ 1316 + ret = smu_stop_thermal_control(smu); 1389 1317 if (ret) { 1390 - pr_warn("Fail to stop Dpms!\n"); 1318 + pr_warn("Fail to stop thermal control!\n"); 1391 1319 return ret; 1320 + } 1321 + 1322 + /* 1323 + * For custom pptable uploading, skip the DPM features 1324 + * disable process on Navi1x ASICs. 1325 + * - As the gfx related features are under control of 1326 + * RLC on those ASICs. RLC reinitialization will be 1327 + * needed to reenable them. That will cost much more 1328 + * efforts. 1329 + * 1330 + * - SMU firmware can handle the DPM reenablement 1331 + * properly. 1332 + */ 1333 + if (!smu->uploading_custom_pp_table || 1334 + !((adev->asic_type >= CHIP_NAVI10) && 1335 + (adev->asic_type <= CHIP_NAVI12))) { 1336 + ret = smu_stop_dpms(smu); 1337 + if (ret) { 1338 + pr_warn("Fail to stop Dpms!\n"); 1339 + return ret; 1340 + } 1392 1341 } 1393 1342 } 1394 1343 ··· 1443 1376 struct smu_context *smu = &adev->smu; 1444 1377 bool baco_feature_is_enabled = false; 1445 1378 1446 - if(!(adev->flags & AMD_IS_APU)) 1379 + if (!smu->pm_enabled) 1380 + return 0; 1381 + 1382 + if(!smu->is_apu) 1447 1383 baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT); 1448 1384 1449 1385 ret = smu_system_features_control(smu, false); ··· 1477 1407 int ret; 1478 1408 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 1479 1409 struct smu_context *smu = &adev->smu; 1410 + 1411 + if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) 1412 + return 0; 1413 + 1414 + if (!smu->pm_enabled) 1415 + return 0; 1480 1416 1481 1417 pr_info("SMU is resuming...\n"); 1482 1418 ··· 1682 1606 return 0; 1683 1607 } 1684 1608 1685 - static int smu_default_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) 1686 - { 1687 - int ret = 0; 1688 - uint32_t sclk_mask, mclk_mask, soc_mask; 1689 - 1690 - switch (level) { 1691 - case AMD_DPM_FORCED_LEVEL_HIGH: 1692 - ret = smu_force_dpm_limit_value(smu, true); 1693 - break; 1694 - case AMD_DPM_FORCED_LEVEL_LOW: 1695 - ret = smu_force_dpm_limit_value(smu, false); 1696 - break; 1697 - case AMD_DPM_FORCED_LEVEL_AUTO: 1698 - case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1699 - ret = smu_unforce_dpm_levels(smu); 1700 - break; 1701 - case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1702 - case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1703 - case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1704 - ret = smu_get_profiling_clk_mask(smu, level, 1705 - &sclk_mask, 1706 - &mclk_mask, 1707 - &soc_mask); 1708 - if (ret) 1709 - return ret; 1710 - smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false); 1711 - smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false); 1712 - smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false); 1713 - break; 1714 - case AMD_DPM_FORCED_LEVEL_MANUAL: 1715 - case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1716 - default: 1717 - break; 1718 - } 1719 - return ret; 1720 - } 1721 - 1722 1609 int smu_adjust_power_state_dynamic(struct smu_context *smu, 1723 1610 enum amd_dpm_forced_level level, 1724 1611 bool skip_display_settings) ··· 1709 1670 } 1710 1671 1711 1672 if (!skip_display_settings) { 1712 - ret = smu_notify_smc_dispaly_config(smu); 1673 + ret = smu_notify_smc_display_config(smu); 1713 1674 if (ret) { 1714 1675 pr_err("Failed to notify smc display config!"); 1715 1676 return ret; ··· 1719 1680 if (smu_dpm_ctx->dpm_level != level) { 1720 1681 ret = smu_asic_set_performance_level(smu, level); 1721 1682 if (ret) { 1722 - ret = smu_default_set_performance_level(smu, level); 1723 - if (ret) { 1724 - pr_err("Failed to set performance level!"); 1725 - return ret; 1726 - } 1683 + pr_err("Failed to set performance level!"); 1684 + return ret; 1727 1685 } 1728 1686 1729 1687 /* update the saved copy */ ··· 1962 1926 1963 1927 int smu_write_watermarks_table(struct smu_context *smu) 1964 1928 { 1965 - int ret = 0; 1966 - struct smu_table_context *smu_table = &smu->smu_table; 1967 - struct smu_table *table = NULL; 1929 + void *watermarks_table = smu->smu_table.watermarks_table; 1968 1930 1969 - table = &smu_table->tables[SMU_TABLE_WATERMARKS]; 1970 - 1971 - if (!table->cpu_addr) 1931 + if (!watermarks_table) 1972 1932 return -EINVAL; 1973 1933 1974 - ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr, 1934 + return smu_update_table(smu, 1935 + SMU_TABLE_WATERMARKS, 1936 + 0, 1937 + watermarks_table, 1975 1938 true); 1976 - 1977 - return ret; 1978 1939 } 1979 1940 1980 1941 int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, 1981 1942 struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges) 1982 1943 { 1983 - struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS]; 1984 - void *table = watermarks->cpu_addr; 1944 + void *table = smu->smu_table.watermarks_table; 1945 + 1946 + if (!table) 1947 + return -EINVAL; 1985 1948 1986 1949 mutex_lock(&smu->mutex); 1987 1950 ··· 2319 2284 { 2320 2285 int ret = 0; 2321 2286 2322 - mutex_lock(&smu->mutex); 2323 - 2324 2287 if (smu->ppt_funcs->set_active_display_count) 2325 2288 ret = smu->ppt_funcs->set_active_display_count(smu, count); 2326 - 2327 - mutex_unlock(&smu->mutex); 2328 2289 2329 2290 return ret; 2330 2291 } ··· 2468 2437 2469 2438 mutex_lock(&smu->mutex); 2470 2439 2471 - if (smu->ppt_funcs->baco_is_support) 2440 + if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support) 2472 2441 ret = smu->ppt_funcs->baco_is_support(smu); 2473 2442 2474 2443 mutex_unlock(&smu->mutex);
+142 -7
drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
··· 179 179 TAB_MAP(DRIVER_SMU_CONFIG), 180 180 TAB_MAP(OVERDRIVE), 181 181 TAB_MAP(I2C_COMMANDS), 182 + TAB_MAP(ACTIVITY_MONITOR_COEFF), 182 183 }; 183 184 184 185 static struct smu_11_0_cmn2aisc_mapping arcturus_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { ··· 302 301 303 302 SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), 304 303 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); 304 + 305 + SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, 306 + sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE, 307 + AMDGPU_GEM_DOMAIN_VRAM); 305 308 306 309 smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); 307 310 if (!smu_table->metrics_table) ··· 872 867 struct smu_table_context *smu_table= &smu->smu_table; 873 868 int ret = 0; 874 869 870 + mutex_lock(&smu->metrics_lock); 875 871 if (!smu_table->metrics_time || 876 872 time_after(jiffies, smu_table->metrics_time + HZ / 1000)) { 877 873 ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, 878 874 (void *)smu_table->metrics_table, false); 879 875 if (ret) { 880 876 pr_info("Failed to export SMU metrics table!\n"); 877 + mutex_unlock(&smu->metrics_lock); 881 878 return ret; 882 879 } 883 880 smu_table->metrics_time = jiffies; 884 881 } 885 882 886 883 memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); 884 + mutex_unlock(&smu->metrics_lock); 887 885 888 886 return ret; 889 887 } ··· 1318 1310 static int arcturus_get_power_profile_mode(struct smu_context *smu, 1319 1311 char *buf) 1320 1312 { 1313 + DpmActivityMonitorCoeffInt_t activity_monitor; 1321 1314 static const char *profile_name[] = { 1322 1315 "BOOTUP_DEFAULT", 1323 1316 "3D_FULL_SCREEN", ··· 1328 1319 "COMPUTE", 1329 1320 "CUSTOM"}; 1330 1321 static const char *title[] = { 1331 - "PROFILE_INDEX(NAME)"}; 1322 + "PROFILE_INDEX(NAME)", 1323 + "CLOCK_TYPE(NAME)", 1324 + "FPS", 1325 + "UseRlcBusy", 1326 + "MinActiveFreqType", 1327 + "MinActiveFreq", 1328 + "BoosterFreqType", 1329 + "BoosterFreq", 1330 + "PD_Data_limit_c", 1331 + "PD_Data_error_coeff", 1332 + "PD_Data_error_rate_coeff"}; 1332 1333 uint32_t i, size = 0; 1333 1334 int16_t workload_type = 0; 1335 + int result = 0; 1336 + uint32_t smu_version; 1334 1337 1335 - if (!smu->pm_enabled || !buf) 1338 + if (!buf) 1336 1339 return -EINVAL; 1337 1340 1338 - size += sprintf(buf + size, "%16s\n", 1341 + result = smu_get_smc_version(smu, NULL, &smu_version); 1342 + if (result) 1343 + return result; 1344 + 1345 + if (smu_version >= 0x360d00) 1346 + size += sprintf(buf + size, "%16s %s %s %s %s %s %s %s %s %s %s\n", 1347 + title[0], title[1], title[2], title[3], title[4], title[5], 1348 + title[6], title[7], title[8], title[9], title[10]); 1349 + else 1350 + size += sprintf(buf + size, "%16s\n", 1339 1351 title[0]); 1340 1352 1341 1353 for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { ··· 1368 1338 if (workload_type < 0) 1369 1339 continue; 1370 1340 1341 + if (smu_version >= 0x360d00) { 1342 + result = smu_update_table(smu, 1343 + SMU_TABLE_ACTIVITY_MONITOR_COEFF, 1344 + workload_type, 1345 + (void *)(&activity_monitor), 1346 + false); 1347 + if (result) { 1348 + pr_err("[%s] Failed to get activity monitor!", __func__); 1349 + return result; 1350 + } 1351 + } 1352 + 1371 1353 size += sprintf(buf + size, "%2d %14s%s\n", 1372 1354 i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); 1355 + 1356 + if (smu_version >= 0x360d00) { 1357 + size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", 1358 + " ", 1359 + 0, 1360 + "GFXCLK", 1361 + activity_monitor.Gfx_FPS, 1362 + activity_monitor.Gfx_UseRlcBusy, 1363 + activity_monitor.Gfx_MinActiveFreqType, 1364 + activity_monitor.Gfx_MinActiveFreq, 1365 + activity_monitor.Gfx_BoosterFreqType, 1366 + activity_monitor.Gfx_BoosterFreq, 1367 + activity_monitor.Gfx_PD_Data_limit_c, 1368 + activity_monitor.Gfx_PD_Data_error_coeff, 1369 + activity_monitor.Gfx_PD_Data_error_rate_coeff); 1370 + 1371 + size += sprintf(buf + size, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n", 1372 + " ", 1373 + 1, 1374 + "UCLK", 1375 + activity_monitor.Mem_FPS, 1376 + activity_monitor.Mem_UseRlcBusy, 1377 + activity_monitor.Mem_MinActiveFreqType, 1378 + activity_monitor.Mem_MinActiveFreq, 1379 + activity_monitor.Mem_BoosterFreqType, 1380 + activity_monitor.Mem_BoosterFreq, 1381 + activity_monitor.Mem_PD_Data_limit_c, 1382 + activity_monitor.Mem_PD_Data_error_coeff, 1383 + activity_monitor.Mem_PD_Data_error_rate_coeff); 1384 + } 1373 1385 } 1374 1386 1375 1387 return size; ··· 1421 1349 long *input, 1422 1350 uint32_t size) 1423 1351 { 1352 + DpmActivityMonitorCoeffInt_t activity_monitor; 1424 1353 int workload_type = 0; 1425 1354 uint32_t profile_mode = input[size]; 1426 1355 int ret = 0; 1427 - 1428 - if (!smu->pm_enabled) 1429 - return -EINVAL; 1356 + uint32_t smu_version; 1430 1357 1431 1358 if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { 1432 1359 pr_err("Invalid power profile mode %d\n", profile_mode); 1433 1360 return -EINVAL; 1361 + } 1362 + 1363 + ret = smu_get_smc_version(smu, NULL, &smu_version); 1364 + if (ret) 1365 + return ret; 1366 + 1367 + if ((profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) && 1368 + (smu_version >=0x360d00)) { 1369 + ret = smu_update_table(smu, 1370 + SMU_TABLE_ACTIVITY_MONITOR_COEFF, 1371 + WORKLOAD_PPLIB_CUSTOM_BIT, 1372 + (void *)(&activity_monitor), 1373 + false); 1374 + if (ret) { 1375 + pr_err("[%s] Failed to get activity monitor!", __func__); 1376 + return ret; 1377 + } 1378 + 1379 + switch (input[0]) { 1380 + case 0: /* Gfxclk */ 1381 + activity_monitor.Gfx_FPS = input[1]; 1382 + activity_monitor.Gfx_UseRlcBusy = input[2]; 1383 + activity_monitor.Gfx_MinActiveFreqType = input[3]; 1384 + activity_monitor.Gfx_MinActiveFreq = input[4]; 1385 + activity_monitor.Gfx_BoosterFreqType = input[5]; 1386 + activity_monitor.Gfx_BoosterFreq = input[6]; 1387 + activity_monitor.Gfx_PD_Data_limit_c = input[7]; 1388 + activity_monitor.Gfx_PD_Data_error_coeff = input[8]; 1389 + activity_monitor.Gfx_PD_Data_error_rate_coeff = input[9]; 1390 + break; 1391 + case 1: /* Uclk */ 1392 + activity_monitor.Mem_FPS = input[1]; 1393 + activity_monitor.Mem_UseRlcBusy = input[2]; 1394 + activity_monitor.Mem_MinActiveFreqType = input[3]; 1395 + activity_monitor.Mem_MinActiveFreq = input[4]; 1396 + activity_monitor.Mem_BoosterFreqType = input[5]; 1397 + activity_monitor.Mem_BoosterFreq = input[6]; 1398 + activity_monitor.Mem_PD_Data_limit_c = input[7]; 1399 + activity_monitor.Mem_PD_Data_error_coeff = input[8]; 1400 + activity_monitor.Mem_PD_Data_error_rate_coeff = input[9]; 1401 + break; 1402 + } 1403 + 1404 + ret = smu_update_table(smu, 1405 + SMU_TABLE_ACTIVITY_MONITOR_COEFF, 1406 + WORKLOAD_PPLIB_CUSTOM_BIT, 1407 + (void *)(&activity_monitor), 1408 + true); 1409 + if (ret) { 1410 + pr_err("[%s] Failed to set activity monitor!", __func__); 1411 + return ret; 1412 + } 1434 1413 } 1435 1414 1436 1415 /* ··· 2022 1899 SwI2cRequest_t req; 2023 1900 struct amdgpu_device *adev = to_amdgpu_device(control); 2024 1901 struct smu_table_context *smu_table = &adev->smu.smu_table; 2025 - struct smu_table *table = &smu_table->tables[SMU_TABLE_I2C_COMMANDS]; 1902 + struct smu_table *table = &smu_table->driver_table; 2026 1903 2027 1904 memset(&req, 0, sizeof(req)); 2028 1905 arcturus_fill_eeprom_i2c_req(&req, false, address, numbytes, data); ··· 2176 2053 static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control) 2177 2054 { 2178 2055 struct amdgpu_device *adev = to_amdgpu_device(control); 2056 + struct smu_context *smu = &adev->smu; 2179 2057 int res; 2058 + 2059 + if (!smu->pm_enabled) 2060 + return -EOPNOTSUPP; 2180 2061 2181 2062 control->owner = THIS_MODULE; 2182 2063 control->class = I2C_CLASS_SPD; ··· 2197 2070 2198 2071 static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control) 2199 2072 { 2073 + struct amdgpu_device *adev = to_amdgpu_device(control); 2074 + struct smu_context *smu = &adev->smu; 2075 + 2076 + if (!smu->pm_enabled) 2077 + return; 2078 + 2200 2079 i2c_del_adapter(control); 2201 2080 } 2202 2081 ··· 2247 2114 .get_profiling_clk_mask = arcturus_get_profiling_clk_mask, 2248 2115 .get_power_profile_mode = arcturus_get_power_profile_mode, 2249 2116 .set_power_profile_mode = arcturus_set_power_profile_mode, 2117 + .set_performance_level = smu_v11_0_set_performance_level, 2250 2118 /* debug (internal used) */ 2251 2119 .dump_pptable = arcturus_dump_pptable, 2252 2120 .get_power_limit = arcturus_get_power_limit, ··· 2271 2137 .check_fw_version = smu_v11_0_check_fw_version, 2272 2138 .write_pptable = smu_v11_0_write_pptable, 2273 2139 .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep, 2140 + .set_driver_table_location = smu_v11_0_set_driver_table_location, 2274 2141 .set_tool_table_location = smu_v11_0_set_tool_table_location, 2275 2142 .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, 2276 2143 .system_features_control = smu_v11_0_system_features_control,
+3
drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
··· 99 99 100 100 PHM_FUNC_CHECK(hwmgr); 101 101 102 + if (!hwmgr->not_vf) 103 + return 0; 104 + 102 105 if (!smum_is_dpm_running(hwmgr)) { 103 106 pr_info("dpm has been disabled\n"); 104 107 return 0;
+3 -12
drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
··· 282 282 283 283 int hwmgr_hw_fini(struct pp_hwmgr *hwmgr) 284 284 { 285 - if (!hwmgr->not_vf) 286 - return 0; 287 - 288 - if (!hwmgr || !hwmgr->pm_en) 285 + if (!hwmgr || !hwmgr->pm_en || !hwmgr->not_vf) 289 286 return 0; 290 287 291 288 phm_stop_thermal_controller(hwmgr); ··· 302 305 { 303 306 int ret = 0; 304 307 305 - if (!hwmgr->not_vf) 306 - return 0; 307 - 308 - if (!hwmgr || !hwmgr->pm_en) 308 + if (!hwmgr || !hwmgr->pm_en || !hwmgr->not_vf) 309 309 return 0; 310 310 311 311 phm_disable_smc_firmware_ctf(hwmgr); ··· 321 327 { 322 328 int ret = 0; 323 329 324 - if (!hwmgr->not_vf) 325 - return 0; 326 - 327 330 if (!hwmgr) 328 331 return -EINVAL; 329 332 330 - if (!hwmgr->pm_en) 333 + if (!hwmgr->not_vf || !hwmgr->pm_en) 331 334 return 0; 332 335 333 336 ret = phm_setup_asic(hwmgr);
+2 -1
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
··· 3538 3538 if (!data->registry_data.mclk_dpm_key_disabled) { 3539 3539 if (data->smc_state_table.mem_boot_level != 3540 3540 data->dpm_table.mem_table.dpm_state.soft_min_level) { 3541 - if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) { 3541 + if ((data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) 3542 + && hwmgr->not_vf) { 3542 3543 socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr); 3543 3544 smum_send_msg_to_smc_with_parameter(hwmgr, 3544 3545 PPSMC_MSG_SetSoftMinSocclkByIndex,
+1 -1
drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
··· 872 872 "[OverridePcieParameters] Attempt to override pcie params failed!", 873 873 return ret); 874 874 875 - data->pcie_parameters_override = 1; 875 + data->pcie_parameters_override = true; 876 876 data->pcie_gen_level1 = pcie_gen; 877 877 data->pcie_width_level1 = pcie_width; 878 878
+15 -1
drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
··· 254 254 unsigned long metrics_time; 255 255 void *metrics_table; 256 256 void *clocks_table; 257 + void *watermarks_table; 257 258 258 259 void *max_sustainable_clocks; 259 260 struct smu_bios_boot_up_values boot_values; 260 261 void *driver_pptable; 261 262 struct smu_table *tables; 263 + /* 264 + * The driver table is just a staging buffer for 265 + * uploading/downloading content from the SMU. 266 + * 267 + * And the table_id for SMU_MSG_TransferTableSmu2Dram/ 268 + * SMU_MSG_TransferTableDram2Smu instructs SMU 269 + * which content driver is interested. 270 + */ 271 + struct smu_table driver_table; 262 272 struct smu_table memory_pool; 263 273 uint8_t thermal_controller_type; 264 274 ··· 360 350 const struct pptable_funcs *ppt_funcs; 361 351 struct mutex mutex; 362 352 struct mutex sensor_lock; 353 + struct mutex metrics_lock; 363 354 uint64_t pool_size; 364 355 365 356 struct smu_table_context smu_table; ··· 454 443 int (*pre_display_config_changed)(struct smu_context *smu); 455 444 int (*display_config_changed)(struct smu_context *smu); 456 445 int (*apply_clocks_adjust_rules)(struct smu_context *smu); 457 - int (*notify_smc_dispaly_config)(struct smu_context *smu); 446 + int (*notify_smc_display_config)(struct smu_context *smu); 458 447 int (*force_dpm_limit_value)(struct smu_context *smu, bool highest); 459 448 int (*unforce_dpm_levels)(struct smu_context *smu); 460 449 int (*get_profiling_clk_mask)(struct smu_context *smu, ··· 507 496 int (*set_gfx_cgpg)(struct smu_context *smu, bool enable); 508 497 int (*write_pptable)(struct smu_context *smu); 509 498 int (*set_min_dcef_deep_sleep)(struct smu_context *smu); 499 + int (*set_driver_table_location)(struct smu_context *smu); 510 500 int (*set_tool_table_location)(struct smu_context *smu); 511 501 int (*notify_memory_pool_location)(struct smu_context *smu); 512 502 int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu); ··· 708 696 uint32_t min, uint32_t max); 709 697 int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, 710 698 uint32_t min, uint32_t max); 699 + int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type, 700 + uint32_t *min_value, uint32_t *max_value); 711 701 enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu); 712 702 int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level); 713 703 int smu_set_display_count(struct smu_context *smu, uint32_t count);
+9 -5
drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h
··· 622 622 uint16_t PccThresholdHigh; 623 623 uint32_t PaddingAPCC[6]; //FIXME pending SPEC 624 624 625 + // OOB Settings 626 + uint16_t BasePerformanceCardPower; 627 + uint16_t MaxPerformanceCardPower; 628 + uint16_t BasePerformanceFrequencyCap; //In Mhz 629 + uint16_t MaxPerformanceFrequencyCap; //In Mhz 630 + 625 631 // SECTION: Reserved 626 - uint32_t Reserved[11]; 632 + uint32_t Reserved[9]; 627 633 628 634 // SECTION: BOARD PARAMETERS 629 635 ··· 829 823 uint32_t MmHubPadding[8]; // SMU internal use 830 824 } AvfsFuseOverride_t; 831 825 832 - /* NOT CURRENTLY USED 833 826 typedef struct { 834 827 uint8_t Gfx_ActiveHystLimit; 835 828 uint8_t Gfx_IdleHystLimit; ··· 871 866 872 867 uint32_t MmHubPadding[8]; // SMU internal use 873 868 } DpmActivityMonitorCoeffInt_t; 874 - */ 875 869 876 870 // These defines are used with the following messages: 877 871 // SMC_MSG_TransferTableDram2Smu ··· 882 878 #define TABLE_PMSTATUSLOG 4 883 879 #define TABLE_SMU_METRICS 5 884 880 #define TABLE_DRIVER_SMU_CONFIG 6 885 - //#define TABLE_ACTIVITY_MONITOR_COEFF 7 886 881 #define TABLE_OVERDRIVE 7 887 882 #define TABLE_WAFL_XGMI_TOPOLOGY 8 888 883 #define TABLE_I2C_COMMANDS 9 889 - #define TABLE_COUNT 10 884 + #define TABLE_ACTIVITY_MONITOR_COEFF 10 885 + #define TABLE_COUNT 11 890 886 891 887 // These defines are used with the SMC_MSG_SetUclkFastSwitch message. 892 888 typedef enum {
+6 -1
drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
··· 27 27 28 28 #define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF 29 29 #define SMU11_DRIVER_IF_VERSION_VG20 0x13 30 - #define SMU11_DRIVER_IF_VERSION_ARCT 0x10 30 + #define SMU11_DRIVER_IF_VERSION_ARCT 0x12 31 31 #define SMU11_DRIVER_IF_VERSION_NV10 0x33 32 32 #define SMU11_DRIVER_IF_VERSION_NV14 0x34 33 33 ··· 170 170 171 171 int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu); 172 172 173 + int smu_v11_0_set_driver_table_location(struct smu_context *smu); 174 + 173 175 int smu_v11_0_set_tool_table_location(struct smu_context *smu); 174 176 175 177 int smu_v11_0_notify_memory_pool_location(struct smu_context *smu); ··· 263 261 int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size); 264 262 265 263 uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu); 264 + 265 + int smu_v11_0_set_performance_level(struct smu_context *smu, 266 + enum amd_dpm_forced_level level); 266 267 267 268 #endif
+2
drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
··· 90 90 int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, 91 91 uint32_t min, uint32_t max); 92 92 93 + int smu_v12_0_set_driver_table_location(struct smu_context *smu); 94 + 93 95 #endif
+82 -14
drivers/gpu/drm/amd/powerplay/navi10_ppt.c
··· 555 555 return -ENOMEM; 556 556 smu_table->metrics_time = 0; 557 557 558 + smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 559 + if (!smu_table->watermarks_table) 560 + return -ENOMEM; 561 + 558 562 return 0; 559 563 } 560 564 ··· 568 564 struct smu_table_context *smu_table= &smu->smu_table; 569 565 int ret = 0; 570 566 567 + mutex_lock(&smu->metrics_lock); 571 568 if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) { 572 569 ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, 573 570 (void *)smu_table->metrics_table, false); 574 571 if (ret) { 575 572 pr_info("Failed to export SMU metrics table!\n"); 573 + mutex_unlock(&smu->metrics_lock); 576 574 return ret; 577 575 } 578 576 smu_table->metrics_time = jiffies; 579 577 } 580 578 581 579 memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); 580 + mutex_unlock(&smu->metrics_lock); 582 581 583 582 return ret; 584 583 } ··· 1381 1374 return ret; 1382 1375 } 1383 1376 1384 - static int navi10_notify_smc_dispaly_config(struct smu_context *smu) 1377 + static int navi10_notify_smc_display_config(struct smu_context *smu) 1385 1378 { 1386 1379 struct smu_clocks min_clocks = {0}; 1387 1380 struct pp_display_clock_request clock_req; ··· 1586 1579 return 0; 1587 1580 } 1588 1581 1589 - static int navi10_set_peak_clock_by_device(struct smu_context *smu) 1582 + static int navi10_set_performance_level(struct smu_context *smu, 1583 + enum amd_dpm_forced_level level); 1584 + 1585 + static int navi10_set_standard_performance_level(struct smu_context *smu) 1590 1586 { 1591 1587 struct amdgpu_device *adev = smu->adev; 1592 1588 int ret = 0; 1593 1589 uint32_t sclk_freq = 0, uclk_freq = 0; 1594 - uint32_t uclk_level = 0; 1590 + 1591 + switch (adev->asic_type) { 1592 + case CHIP_NAVI10: 1593 + sclk_freq = NAVI10_UMD_PSTATE_PROFILING_GFXCLK; 1594 + uclk_freq = NAVI10_UMD_PSTATE_PROFILING_MEMCLK; 1595 + break; 1596 + case CHIP_NAVI14: 1597 + sclk_freq = NAVI14_UMD_PSTATE_PROFILING_GFXCLK; 1598 + uclk_freq = NAVI14_UMD_PSTATE_PROFILING_MEMCLK; 1599 + break; 1600 + default: 1601 + /* by default, this is same as auto performance level */ 1602 + return navi10_set_performance_level(smu, AMD_DPM_FORCED_LEVEL_AUTO); 1603 + } 1604 + 1605 + ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq); 1606 + if (ret) 1607 + return ret; 1608 + ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq); 1609 + if (ret) 1610 + return ret; 1611 + 1612 + return ret; 1613 + } 1614 + 1615 + static int navi10_set_peak_performance_level(struct smu_context *smu) 1616 + { 1617 + struct amdgpu_device *adev = smu->adev; 1618 + int ret = 0; 1619 + uint32_t sclk_freq = 0, uclk_freq = 0; 1595 1620 1596 1621 switch (adev->asic_type) { 1597 1622 case CHIP_NAVI10: ··· 1664 1625 break; 1665 1626 } 1666 1627 break; 1628 + case CHIP_NAVI12: 1629 + sclk_freq = NAVI12_UMD_PSTATE_PEAK_GFXCLK; 1630 + break; 1667 1631 default: 1668 - return -EINVAL; 1632 + ret = smu_get_dpm_level_range(smu, SMU_SCLK, NULL, &sclk_freq); 1633 + if (ret) 1634 + return ret; 1669 1635 } 1670 1636 1671 - ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_level); 1672 - if (ret) 1673 - return ret; 1674 - ret = smu_get_dpm_freq_by_index(smu, SMU_UCLK, uclk_level - 1, &uclk_freq); 1637 + ret = smu_get_dpm_level_range(smu, SMU_UCLK, NULL, &uclk_freq); 1675 1638 if (ret) 1676 1639 return ret; 1677 1640 ··· 1687 1646 return ret; 1688 1647 } 1689 1648 1690 - static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) 1649 + static int navi10_set_performance_level(struct smu_context *smu, 1650 + enum amd_dpm_forced_level level) 1691 1651 { 1692 1652 int ret = 0; 1653 + uint32_t sclk_mask, mclk_mask, soc_mask; 1693 1654 1694 1655 switch (level) { 1695 - case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1696 - ret = navi10_set_peak_clock_by_device(smu); 1656 + case AMD_DPM_FORCED_LEVEL_HIGH: 1657 + ret = smu_force_dpm_limit_value(smu, true); 1697 1658 break; 1659 + case AMD_DPM_FORCED_LEVEL_LOW: 1660 + ret = smu_force_dpm_limit_value(smu, false); 1661 + break; 1662 + case AMD_DPM_FORCED_LEVEL_AUTO: 1663 + ret = smu_unforce_dpm_levels(smu); 1664 + break; 1665 + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1666 + ret = navi10_set_standard_performance_level(smu); 1667 + break; 1668 + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1669 + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1670 + ret = smu_get_profiling_clk_mask(smu, level, 1671 + &sclk_mask, 1672 + &mclk_mask, 1673 + &soc_mask); 1674 + if (ret) 1675 + return ret; 1676 + smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false); 1677 + smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false); 1678 + smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false); 1679 + break; 1680 + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1681 + ret = navi10_set_peak_performance_level(smu); 1682 + break; 1683 + case AMD_DPM_FORCED_LEVEL_MANUAL: 1684 + case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1698 1685 default: 1699 - ret = -EINVAL; 1700 1686 break; 1701 1687 } 1702 - 1703 1688 return ret; 1704 1689 } 1705 1690 ··· 2114 2047 .get_clock_by_type_with_latency = navi10_get_clock_by_type_with_latency, 2115 2048 .pre_display_config_changed = navi10_pre_display_config_changed, 2116 2049 .display_config_changed = navi10_display_config_changed, 2117 - .notify_smc_dispaly_config = navi10_notify_smc_dispaly_config, 2050 + .notify_smc_display_config = navi10_notify_smc_display_config, 2118 2051 .force_dpm_limit_value = navi10_force_dpm_limit_value, 2119 2052 .unforce_dpm_levels = navi10_unforce_dpm_levels, 2120 2053 .is_dpm_running = navi10_is_dpm_running, ··· 2147 2080 .check_fw_version = smu_v11_0_check_fw_version, 2148 2081 .write_pptable = smu_v11_0_write_pptable, 2149 2082 .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep, 2083 + .set_driver_table_location = smu_v11_0_set_driver_table_location, 2150 2084 .set_tool_table_location = smu_v11_0_set_tool_table_location, 2151 2085 .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, 2152 2086 .system_features_control = smu_v11_0_system_features_control,
+14
drivers/gpu/drm/amd/powerplay/navi10_ppt.h
··· 27 27 #define NAVI10_PEAK_SCLK_XT (1755) 28 28 #define NAVI10_PEAK_SCLK_XL (1625) 29 29 30 + #define NAVI10_UMD_PSTATE_PROFILING_GFXCLK (1300) 31 + #define NAVI10_UMD_PSTATE_PROFILING_SOCCLK (980) 32 + #define NAVI10_UMD_PSTATE_PROFILING_MEMCLK (625) 33 + #define NAVI10_UMD_PSTATE_PROFILING_VCLK (980) 34 + #define NAVI10_UMD_PSTATE_PROFILING_DCLK (850) 35 + 30 36 #define NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK (1670) 31 37 #define NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK (1448) 32 38 #define NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK (1181) 33 39 #define NAVI14_UMD_PSTATE_PEAK_XTX_GFXCLK (1717) 34 40 #define NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK (1448) 41 + 42 + #define NAVI14_UMD_PSTATE_PROFILING_GFXCLK (1200) 43 + #define NAVI14_UMD_PSTATE_PROFILING_SOCCLK (900) 44 + #define NAVI14_UMD_PSTATE_PROFILING_MEMCLK (600) 45 + #define NAVI14_UMD_PSTATE_PROFILING_VCLK (900) 46 + #define NAVI14_UMD_PSTATE_PROFILING_DCLK (800) 47 + 48 + #define NAVI12_UMD_PSTATE_PEAK_GFXCLK (1100) 35 49 36 50 #define NAVI10_VOLTAGE_SCALE (4) 37 51
+46 -7
drivers/gpu/drm/amd/powerplay/renoir_ppt.c
··· 171 171 struct smu_table_context *smu_table= &smu->smu_table; 172 172 int ret = 0; 173 173 174 + mutex_lock(&smu->metrics_lock); 174 175 if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) { 175 176 ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, 176 177 (void *)smu_table->metrics_table, false); 177 178 if (ret) { 178 179 pr_info("Failed to export SMU metrics table!\n"); 180 + mutex_unlock(&smu->metrics_lock); 179 181 return ret; 180 182 } 181 183 smu_table->metrics_time = jiffies; 182 184 } 183 185 184 186 memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); 187 + mutex_unlock(&smu->metrics_lock); 185 188 186 189 return ret; 187 190 } ··· 208 205 if (!smu_table->metrics_table) 209 206 return -ENOMEM; 210 207 smu_table->metrics_time = 0; 208 + 209 + smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 210 + if (!smu_table->watermarks_table) 211 + return -ENOMEM; 211 212 212 213 return 0; 213 214 } ··· 246 239 247 240 memset(&metrics, 0, sizeof(metrics)); 248 241 249 - ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, 250 - (void *)&metrics, false); 242 + ret = renoir_get_metrics_table(smu, &metrics); 251 243 if (ret) 252 244 return ret; 253 245 ··· 712 706 return ret; 713 707 } 714 708 715 - static int renoir_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) 709 + static int renoir_set_performance_level(struct smu_context *smu, 710 + enum amd_dpm_forced_level level) 716 711 { 717 712 int ret = 0; 713 + uint32_t sclk_mask, mclk_mask, soc_mask; 718 714 719 715 switch (level) { 716 + case AMD_DPM_FORCED_LEVEL_HIGH: 717 + ret = smu_force_dpm_limit_value(smu, true); 718 + break; 719 + case AMD_DPM_FORCED_LEVEL_LOW: 720 + ret = smu_force_dpm_limit_value(smu, false); 721 + break; 722 + case AMD_DPM_FORCED_LEVEL_AUTO: 723 + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 724 + ret = smu_unforce_dpm_levels(smu); 725 + break; 726 + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 727 + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 728 + ret = smu_get_profiling_clk_mask(smu, level, 729 + &sclk_mask, 730 + &mclk_mask, 731 + &soc_mask); 732 + if (ret) 733 + return ret; 734 + smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false); 735 + smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false); 736 + smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false); 737 + break; 720 738 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 721 739 ret = renoir_set_peak_clock_by_device(smu); 722 740 break; 741 + case AMD_DPM_FORCED_LEVEL_MANUAL: 742 + case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 723 743 default: 724 - ret = -EINVAL; 725 744 break; 726 745 } 727 - 728 746 return ret; 729 747 } 730 748 ··· 807 777 } 808 778 809 779 /* pass data to smu controller */ 810 - ret = smu_write_watermarks_table(smu); 780 + if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && 781 + !(smu->watermarks_bitmap & WATERMARKS_LOADED)) { 782 + ret = smu_write_watermarks_table(smu); 783 + if (ret) { 784 + pr_err("Failed to update WMTABLE!"); 785 + return ret; 786 + } 787 + smu->watermarks_bitmap |= WATERMARKS_LOADED; 788 + } 811 789 812 - return ret; 790 + return 0; 813 791 } 814 792 815 793 static int renoir_get_power_profile_mode(struct smu_context *smu, ··· 920 882 .get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq, 921 883 .mode2_reset = smu_v12_0_mode2_reset, 922 884 .set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range, 885 + .set_driver_table_location = smu_v12_0_set_driver_table_location, 923 886 }; 924 887 925 888 void renoir_set_ppt_funcs(struct smu_context *smu)
+4 -2
drivers/gpu/drm/amd/powerplay/smu_internal.h
··· 61 61 ((smu)->ppt_funcs->write_pptable ? (smu)->ppt_funcs->write_pptable((smu)) : 0) 62 62 #define smu_set_min_dcef_deep_sleep(smu) \ 63 63 ((smu)->ppt_funcs->set_min_dcef_deep_sleep ? (smu)->ppt_funcs->set_min_dcef_deep_sleep((smu)) : 0) 64 + #define smu_set_driver_table_location(smu) \ 65 + ((smu)->ppt_funcs->set_driver_table_location ? (smu)->ppt_funcs->set_driver_table_location((smu)) : 0) 64 66 #define smu_set_tool_table_location(smu) \ 65 67 ((smu)->ppt_funcs->set_tool_table_location ? (smu)->ppt_funcs->set_tool_table_location((smu)) : 0) 66 68 #define smu_notify_memory_pool_location(smu) \ ··· 131 129 ((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed((smu)) : 0) 132 130 #define smu_apply_clocks_adjust_rules(smu) \ 133 131 ((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules((smu)) : 0) 134 - #define smu_notify_smc_dispaly_config(smu) \ 135 - ((smu)->ppt_funcs->notify_smc_dispaly_config ? (smu)->ppt_funcs->notify_smc_dispaly_config((smu)) : 0) 132 + #define smu_notify_smc_display_config(smu) \ 133 + ((smu)->ppt_funcs->notify_smc_display_config ? (smu)->ppt_funcs->notify_smc_display_config((smu)) : 0) 136 134 #define smu_force_dpm_limit_value(smu, highest) \ 137 135 ((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0) 138 136 #define smu_unforce_dpm_levels(smu) \
+94 -26
drivers/gpu/drm/amd/powerplay/smu_v11_0.c
··· 450 450 451 451 kfree(smu_table->tables); 452 452 kfree(smu_table->metrics_table); 453 + kfree(smu_table->watermarks_table); 453 454 smu_table->tables = NULL; 454 455 smu_table->metrics_table = NULL; 456 + smu_table->watermarks_table = NULL; 455 457 smu_table->metrics_time = 0; 456 458 457 459 ret = smu_v11_0_fini_dpm_context(smu); ··· 776 774 return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100); 777 775 } 778 776 777 + int smu_v11_0_set_driver_table_location(struct smu_context *smu) 778 + { 779 + struct smu_table *driver_table = &smu->smu_table.driver_table; 780 + int ret = 0; 781 + 782 + if (driver_table->mc_address) { 783 + ret = smu_send_smc_msg_with_param(smu, 784 + SMU_MSG_SetDriverDramAddrHigh, 785 + upper_32_bits(driver_table->mc_address)); 786 + if (!ret) 787 + ret = smu_send_smc_msg_with_param(smu, 788 + SMU_MSG_SetDriverDramAddrLow, 789 + lower_32_bits(driver_table->mc_address)); 790 + } 791 + 792 + return ret; 793 + } 794 + 779 795 int smu_v11_0_set_tool_table_location(struct smu_context *smu) 780 796 { 781 797 int ret = 0; ··· 855 835 uint32_t *feature_mask, uint32_t num) 856 836 { 857 837 uint32_t feature_mask_high = 0, feature_mask_low = 0; 838 + struct smu_feature *feature = &smu->smu_feature; 858 839 int ret = 0; 859 840 860 841 if (!feature_mask || num < 2) 861 842 return -EINVAL; 862 843 863 - ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh); 864 - if (ret) 865 - return ret; 866 - ret = smu_read_smc_arg(smu, &feature_mask_high); 867 - if (ret) 868 - return ret; 844 + if (bitmap_empty(feature->enabled, feature->feature_num)) { 845 + ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh); 846 + if (ret) 847 + return ret; 848 + ret = smu_read_smc_arg(smu, &feature_mask_high); 849 + if (ret) 850 + return ret; 869 851 870 - ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow); 871 - if (ret) 872 - return ret; 873 - ret = smu_read_smc_arg(smu, &feature_mask_low); 874 - if (ret) 875 - return ret; 852 + ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow); 853 + if (ret) 854 + return ret; 855 + ret = smu_read_smc_arg(smu, &feature_mask_low); 856 + if (ret) 857 + return ret; 876 858 877 - feature_mask[0] = feature_mask_low; 878 - feature_mask[1] = feature_mask_high; 859 + feature_mask[0] = feature_mask_low; 860 + feature_mask[1] = feature_mask_high; 861 + } else { 862 + bitmap_copy((unsigned long *)feature_mask, feature->enabled, 863 + feature->feature_num); 864 + } 879 865 880 866 return ret; 881 867 } ··· 893 867 uint32_t feature_mask[2]; 894 868 int ret = 0; 895 869 896 - if (smu->pm_enabled) { 897 - ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures : 898 - SMU_MSG_DisableAllSmuFeatures)); 899 - if (ret) 900 - return ret; 901 - } 902 - 903 - ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); 870 + ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures : 871 + SMU_MSG_DisableAllSmuFeatures)); 904 872 if (ret) 905 873 return ret; 906 874 907 - bitmap_copy(feature->enabled, (unsigned long *)&feature_mask, 908 - feature->feature_num); 909 - bitmap_copy(feature->supported, (unsigned long *)&feature_mask, 910 - feature->feature_num); 875 + if (en) { 876 + ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); 877 + if (ret) 878 + return ret; 879 + 880 + bitmap_copy(feature->enabled, (unsigned long *)&feature_mask, 881 + feature->feature_num); 882 + bitmap_copy(feature->supported, (unsigned long *)&feature_mask, 883 + feature->feature_num); 884 + } else { 885 + bitmap_zero(feature->enabled, feature->feature_num); 886 + bitmap_zero(feature->supported, feature->feature_num); 887 + } 911 888 912 889 return ret; 913 890 } ··· 1889 1860 } 1890 1861 return ret; 1891 1862 } 1863 + 1864 + int smu_v11_0_set_performance_level(struct smu_context *smu, 1865 + enum amd_dpm_forced_level level) 1866 + { 1867 + int ret = 0; 1868 + uint32_t sclk_mask, mclk_mask, soc_mask; 1869 + 1870 + switch (level) { 1871 + case AMD_DPM_FORCED_LEVEL_HIGH: 1872 + ret = smu_force_dpm_limit_value(smu, true); 1873 + break; 1874 + case AMD_DPM_FORCED_LEVEL_LOW: 1875 + ret = smu_force_dpm_limit_value(smu, false); 1876 + break; 1877 + case AMD_DPM_FORCED_LEVEL_AUTO: 1878 + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: 1879 + ret = smu_unforce_dpm_levels(smu); 1880 + break; 1881 + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: 1882 + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: 1883 + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: 1884 + ret = smu_get_profiling_clk_mask(smu, level, 1885 + &sclk_mask, 1886 + &mclk_mask, 1887 + &soc_mask); 1888 + if (ret) 1889 + return ret; 1890 + smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false); 1891 + smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false); 1892 + smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false); 1893 + break; 1894 + case AMD_DPM_FORCED_LEVEL_MANUAL: 1895 + case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: 1896 + default: 1897 + break; 1898 + } 1899 + return ret; 1900 + } 1901 +
+21 -11
drivers/gpu/drm/amd/powerplay/smu_v12_0.c
··· 159 159 160 160 int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate) 161 161 { 162 - if (!(smu->adev->flags & AMD_IS_APU)) 162 + if (!smu->is_apu) 163 163 return 0; 164 164 165 165 if (gate) ··· 170 170 171 171 int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate) 172 172 { 173 - if (!(smu->adev->flags & AMD_IS_APU)) 173 + if (!smu->is_apu) 174 174 return 0; 175 175 176 176 if (gate) ··· 181 181 182 182 int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate) 183 183 { 184 - if (!(smu->adev->flags & AMD_IS_APU)) 184 + if (!smu->is_apu) 185 185 return 0; 186 186 187 187 if (gate) ··· 318 318 int smu_v12_0_populate_smc_tables(struct smu_context *smu) 319 319 { 320 320 struct smu_table_context *smu_table = &smu->smu_table; 321 - struct smu_table *table = NULL; 322 - 323 - table = &smu_table->tables[SMU_TABLE_DPMCLOCKS]; 324 - if (!table) 325 - return -EINVAL; 326 - 327 - if (!table->cpu_addr) 328 - return -EINVAL; 329 321 330 322 return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); 331 323 } ··· 502 510 break; 503 511 default: 504 512 return -EINVAL; 513 + } 514 + 515 + return ret; 516 + } 517 + 518 + int smu_v12_0_set_driver_table_location(struct smu_context *smu) 519 + { 520 + struct smu_table *driver_table = &smu->smu_table.driver_table; 521 + int ret = 0; 522 + 523 + if (driver_table->mc_address) { 524 + ret = smu_send_smc_msg_with_param(smu, 525 + SMU_MSG_SetDriverDramAddrHigh, 526 + upper_32_bits(driver_table->mc_address)); 527 + if (!ret) 528 + ret = smu_send_smc_msg_with_param(smu, 529 + SMU_MSG_SetDriverDramAddrLow, 530 + lower_32_bits(driver_table->mc_address)); 505 531 } 506 532 507 533 return ret;
+4 -1
drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c
··· 137 137 priv->smu_tables.entry[table_id].table_id); 138 138 139 139 /* flush hdp cache */ 140 - adev->nbio.funcs->hdp_flush(adev, NULL); 140 + amdgpu_asic_flush_hdp(adev, NULL); 141 141 142 142 memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table, 143 143 priv->smu_tables.entry[table_id].size); ··· 150 150 { 151 151 struct smu10_smumgr *priv = 152 152 (struct smu10_smumgr *)(hwmgr->smu_backend); 153 + struct amdgpu_device *adev = hwmgr->adev; 153 154 154 155 PP_ASSERT_WITH_CODE(table_id < MAX_SMU_TABLE, 155 156 "Invalid SMU Table ID!", return -EINVAL;); ··· 161 160 162 161 memcpy(priv->smu_tables.entry[table_id].table, table, 163 162 priv->smu_tables.entry[table_id].size); 163 + 164 + amdgpu_asic_flush_hdp(adev, NULL); 164 165 165 166 smu10_send_msg_to_smc_with_parameter(hwmgr, 166 167 PPSMC_MSG_SetDriverDramAddrHigh,
+4 -1
drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c
··· 58 58 priv->smu_tables.entry[table_id].table_id); 59 59 60 60 /* flush hdp cache */ 61 - adev->nbio.funcs->hdp_flush(adev, NULL); 61 + amdgpu_asic_flush_hdp(adev, NULL); 62 62 63 63 memcpy(table, priv->smu_tables.entry[table_id].table, 64 64 priv->smu_tables.entry[table_id].size); ··· 70 70 uint8_t *table, int16_t table_id) 71 71 { 72 72 struct vega10_smumgr *priv = hwmgr->smu_backend; 73 + struct amdgpu_device *adev = hwmgr->adev; 73 74 74 75 /* under sriov, vbios or hypervisor driver 75 76 * has already copy table to smc so here only skip it ··· 87 86 88 87 memcpy(priv->smu_tables.entry[table_id].table, table, 89 88 priv->smu_tables.entry[table_id].size); 89 + 90 + amdgpu_asic_flush_hdp(adev, NULL); 90 91 91 92 smu9_send_msg_to_smc_with_parameter(hwmgr, 92 93 PPSMC_MSG_SetDriverDramAddrHigh,
+4 -1
drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c
··· 66 66 return -EINVAL); 67 67 68 68 /* flush hdp cache */ 69 - adev->nbio.funcs->hdp_flush(adev, NULL); 69 + amdgpu_asic_flush_hdp(adev, NULL); 70 70 71 71 memcpy(table, priv->smu_tables.entry[table_id].table, 72 72 priv->smu_tables.entry[table_id].size); ··· 84 84 { 85 85 struct vega12_smumgr *priv = 86 86 (struct vega12_smumgr *)(hwmgr->smu_backend); 87 + struct amdgpu_device *adev = hwmgr->adev; 87 88 88 89 PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT, 89 90 "Invalid SMU Table ID!", return -EINVAL); ··· 95 94 96 95 memcpy(priv->smu_tables.entry[table_id].table, table, 97 96 priv->smu_tables.entry[table_id].size); 97 + 98 + amdgpu_asic_flush_hdp(adev, NULL); 98 99 99 100 PP_ASSERT_WITH_CODE(smu9_send_msg_to_smc_with_parameter(hwmgr, 100 101 PPSMC_MSG_SetDriverDramAddrHigh,
+8 -2
drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
··· 189 189 return ret); 190 190 191 191 /* flush hdp cache */ 192 - adev->nbio.funcs->hdp_flush(adev, NULL); 192 + amdgpu_asic_flush_hdp(adev, NULL); 193 193 194 194 memcpy(table, priv->smu_tables.entry[table_id].table, 195 195 priv->smu_tables.entry[table_id].size); ··· 207 207 { 208 208 struct vega20_smumgr *priv = 209 209 (struct vega20_smumgr *)(hwmgr->smu_backend); 210 + struct amdgpu_device *adev = hwmgr->adev; 210 211 int ret = 0; 211 212 212 213 PP_ASSERT_WITH_CODE(table_id < TABLE_COUNT, ··· 219 218 220 219 memcpy(priv->smu_tables.entry[table_id].table, table, 221 220 priv->smu_tables.entry[table_id].size); 221 + 222 + amdgpu_asic_flush_hdp(adev, NULL); 222 223 223 224 PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, 224 225 PPSMC_MSG_SetDriverDramAddrHigh, ··· 245 242 { 246 243 struct vega20_smumgr *priv = 247 244 (struct vega20_smumgr *)(hwmgr->smu_backend); 245 + struct amdgpu_device *adev = hwmgr->adev; 248 246 int ret = 0; 249 247 250 248 memcpy(priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, table, 251 249 priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size); 250 + 251 + amdgpu_asic_flush_hdp(adev, NULL); 252 252 253 253 PP_ASSERT_WITH_CODE((ret = vega20_send_msg_to_smc_with_parameter(hwmgr, 254 254 PPSMC_MSG_SetDriverDramAddrHigh, ··· 296 290 return ret); 297 291 298 292 /* flush hdp cache */ 299 - adev->nbio.funcs->hdp_flush(adev, NULL); 293 + amdgpu_asic_flush_hdp(adev, NULL); 300 294 301 295 memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, 302 296 priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size);
+11 -2
drivers/gpu/drm/amd/powerplay/vega20_ppt.c
··· 338 338 return -ENOMEM; 339 339 smu_table->metrics_time = 0; 340 340 341 + smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL); 342 + if (!smu_table->watermarks_table) 343 + return -ENOMEM; 344 + 341 345 return 0; 342 346 } 343 347 ··· 1682 1678 struct smu_table_context *smu_table= &smu->smu_table; 1683 1679 int ret = 0; 1684 1680 1681 + mutex_lock(&smu->metrics_lock); 1685 1682 if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) { 1686 1683 ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, 1687 1684 (void *)smu_table->metrics_table, false); 1688 1685 if (ret) { 1689 1686 pr_info("Failed to export SMU metrics table!\n"); 1687 + mutex_unlock(&smu->metrics_lock); 1690 1688 return ret; 1691 1689 } 1692 1690 smu_table->metrics_time = jiffies; 1693 1691 } 1694 1692 1695 1693 memcpy(metrics_table, smu_table->metrics_table, sizeof(SmuMetrics_t)); 1694 + mutex_unlock(&smu->metrics_lock); 1696 1695 1697 1696 return ret; 1698 1697 } ··· 2239 2232 } 2240 2233 2241 2234 static int 2242 - vega20_notify_smc_dispaly_config(struct smu_context *smu) 2235 + vega20_notify_smc_display_config(struct smu_context *smu) 2243 2236 { 2244 2237 struct vega20_dpm_table *dpm_table = smu->smu_dpm.dpm_context; 2245 2238 struct vega20_single_dpm_table *memtable = &dpm_table->mem_table; ··· 3198 3191 .get_od_percentage = vega20_get_od_percentage, 3199 3192 .get_power_profile_mode = vega20_get_power_profile_mode, 3200 3193 .set_power_profile_mode = vega20_set_power_profile_mode, 3194 + .set_performance_level = smu_v11_0_set_performance_level, 3201 3195 .set_od_percentage = vega20_set_od_percentage, 3202 3196 .set_default_od_settings = vega20_set_default_od_settings, 3203 3197 .od_edit_dpm_table = vega20_odn_edit_dpm_table, ··· 3208 3200 .pre_display_config_changed = vega20_pre_display_config_changed, 3209 3201 .display_config_changed = vega20_display_config_changed, 3210 3202 .apply_clocks_adjust_rules = vega20_apply_clocks_adjust_rules, 3211 - .notify_smc_dispaly_config = vega20_notify_smc_dispaly_config, 3203 + .notify_smc_display_config = vega20_notify_smc_display_config, 3212 3204 .force_dpm_limit_value = vega20_force_dpm_limit_value, 3213 3205 .unforce_dpm_levels = vega20_unforce_dpm_levels, 3214 3206 .get_profiling_clk_mask = vega20_get_profiling_clk_mask, ··· 3236 3228 .check_fw_version = smu_v11_0_check_fw_version, 3237 3229 .write_pptable = smu_v11_0_write_pptable, 3238 3230 .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep, 3231 + .set_driver_table_location = smu_v11_0_set_driver_table_location, 3239 3232 .set_tool_table_location = smu_v11_0_set_tool_table_location, 3240 3233 .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, 3241 3234 .system_features_control = smu_v11_0_system_features_control,
+4 -3
drivers/gpu/drm/etnaviv/etnaviv_drv.c
··· 65 65 66 66 for (i = 0; i < ETNA_MAX_PIPES; i++) { 67 67 struct etnaviv_gpu *gpu = priv->gpu[i]; 68 - struct drm_sched_rq *rq; 68 + struct drm_gpu_scheduler *sched; 69 69 70 70 if (gpu) { 71 - rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; 71 + sched = &gpu->sched; 72 72 drm_sched_entity_init(&ctx->sched_entity[i], 73 - &rq, 1, NULL); 73 + DRM_SCHED_PRIORITY_NORMAL, &sched, 74 + 1, NULL); 74 75 } 75 76 } 76 77
+3 -2
drivers/gpu/drm/lima/lima_sched.c
··· 159 159 struct lima_sched_context *context, 160 160 atomic_t *guilty) 161 161 { 162 - struct drm_sched_rq *rq = pipe->base.sched_rq + DRM_SCHED_PRIORITY_NORMAL; 162 + struct drm_gpu_scheduler *sched = &pipe->base; 163 163 164 - return drm_sched_entity_init(&context->base, &rq, 1, guilty); 164 + return drm_sched_entity_init(&context->base, DRM_SCHED_PRIORITY_NORMAL, 165 + &sched, 1, guilty); 165 166 } 166 167 167 168 void lima_sched_context_fini(struct lima_sched_pipe *pipe,
+5 -3
drivers/gpu/drm/panfrost/panfrost_job.c
··· 542 542 { 543 543 struct panfrost_device *pfdev = panfrost_priv->pfdev; 544 544 struct panfrost_job_slot *js = pfdev->js; 545 - struct drm_sched_rq *rq; 545 + struct drm_gpu_scheduler *sched; 546 546 int ret, i; 547 547 548 548 for (i = 0; i < NUM_JOB_SLOTS; i++) { 549 - rq = &js->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; 550 - ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], &rq, 1, NULL); 549 + sched = &js->queue[i].sched; 550 + ret = drm_sched_entity_init(&panfrost_priv->sched_entity[i], 551 + DRM_SCHED_PRIORITY_NORMAL, &sched, 552 + 1, NULL); 551 553 if (WARN_ON(ret)) 552 554 return ret; 553 555 }
+1 -2
drivers/gpu/drm/radeon/atombios_crtc.c
··· 244 244 245 245 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 246 246 247 - if (ASIC_IS_DCE8(rdev)) { 247 + if (ASIC_IS_DCE8(rdev)) 248 248 WREG32(vga_control_regs[radeon_crtc->crtc_id], vga_control); 249 - } 250 249 } 251 250 252 251 static void atombios_powergate_crtc(struct drm_crtc *crtc, int state)
+1 -2
drivers/gpu/drm/radeon/atombios_dp.c
··· 813 813 dp_info.use_dpencoder = true; 814 814 index = GetIndexIntoMasterTable(COMMAND, DPEncoderService); 815 815 if (atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) { 816 - if (crev > 1) { 816 + if (crev > 1) 817 817 dp_info.use_dpencoder = false; 818 - } 819 818 } 820 819 821 820 dp_info.enc_id = 0;
+4 -5
drivers/gpu/drm/radeon/atombios_encoders.c
··· 1885 1885 if (ASIC_IS_AVIVO(rdev)) 1886 1886 args.v1.ucCRTC = radeon_crtc->crtc_id; 1887 1887 else { 1888 - if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) { 1888 + if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DAC1) 1889 1889 args.v1.ucCRTC = radeon_crtc->crtc_id; 1890 - } else { 1890 + else 1891 1891 args.v1.ucCRTC = radeon_crtc->crtc_id << 2; 1892 - } 1893 1892 } 1894 1893 switch (radeon_encoder->encoder_id) { 1895 1894 case ENCODER_OBJECT_ID_INTERNAL_TMDS1: ··· 2233 2234 DRM_ERROR("Got encoder index incorrect - returning 0\n"); 2234 2235 return 0; 2235 2236 } 2236 - if (rdev->mode_info.active_encoders & (1 << enc_idx)) { 2237 + if (rdev->mode_info.active_encoders & (1 << enc_idx)) 2237 2238 DRM_ERROR("chosen encoder in use %d\n", enc_idx); 2238 - } 2239 + 2239 2240 rdev->mode_info.active_encoders |= (1 << enc_idx); 2240 2241 return enc_idx; 2241 2242 }
-5
drivers/gpu/drm/radeon/atombios_i2c.c
··· 68 68 memcpy(&out, &buf[1], num); 69 69 args.lpI2CDataOut = cpu_to_le16(out); 70 70 } else { 71 - if (num > ATOM_MAX_HW_I2C_READ) { 72 - DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num); 73 - r = -EINVAL; 74 - goto done; 75 - } 76 71 args.ucRegIndex = 0; 77 72 args.lpI2CDataOut = 0; 78 73 }
+2 -2
drivers/gpu/drm/radeon/cik.c
··· 8137 8137 * there. So it is pointless to try to go through that code 8138 8138 * hence why we disable uvd here. 8139 8139 */ 8140 - rdev->has_uvd = 0; 8140 + rdev->has_uvd = false; 8141 8141 return; 8142 8142 } 8143 8143 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL; ··· 8209 8209 * there. So it is pointless to try to go through that code 8210 8210 * hence why we disable vce here. 8211 8211 */ 8212 - rdev->has_vce = 0; 8212 + rdev->has_vce = false; 8213 8213 return; 8214 8214 } 8215 8215 rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
+1 -1
drivers/gpu/drm/radeon/cik_sdma.c
··· 333 333 u32 me_cntl, reg_offset; 334 334 int i; 335 335 336 - if (enable == false) { 336 + if (!enable) { 337 337 cik_sdma_gfx_stop(rdev); 338 338 cik_sdma_rlc_stop(rdev); 339 339 }
+1 -1
drivers/gpu/drm/radeon/evergreen.c
··· 4945 4945 * there. So it is pointless to try to go through that code 4946 4946 * hence why we disable uvd here. 4947 4947 */ 4948 - rdev->has_uvd = 0; 4948 + rdev->has_uvd = false; 4949 4949 return; 4950 4950 } 4951 4951 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+2 -2
drivers/gpu/drm/radeon/ni.c
··· 2017 2017 * there. So it is pointless to try to go through that code 2018 2018 * hence why we disable uvd here. 2019 2019 */ 2020 - rdev->has_uvd = 0; 2020 + rdev->has_uvd = false; 2021 2021 return; 2022 2022 } 2023 2023 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL; ··· 2085 2085 * there. So it is pointless to try to go through that code 2086 2086 * hence why we disable vce here. 2087 2087 */ 2088 - rdev->has_vce = 0; 2088 + rdev->has_vce = false; 2089 2089 return; 2090 2090 } 2091 2091 rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
+5 -5
drivers/gpu/drm/radeon/r100.c
··· 1823 1823 case RADEON_PP_TXFORMAT_2: 1824 1824 i = (reg - RADEON_PP_TXFORMAT_0) / 24; 1825 1825 if (idx_value & RADEON_TXFORMAT_NON_POWER2) { 1826 - track->textures[i].use_pitch = 1; 1826 + track->textures[i].use_pitch = true; 1827 1827 } else { 1828 - track->textures[i].use_pitch = 0; 1828 + track->textures[i].use_pitch = false; 1829 1829 track->textures[i].width = 1 << ((idx_value & RADEON_TXFORMAT_WIDTH_MASK) >> RADEON_TXFORMAT_WIDTH_SHIFT); 1830 1830 track->textures[i].height = 1 << ((idx_value & RADEON_TXFORMAT_HEIGHT_MASK) >> RADEON_TXFORMAT_HEIGHT_SHIFT); 1831 1831 } ··· 2387 2387 else 2388 2388 track->num_texture = 6; 2389 2389 track->maxy = 2048; 2390 - track->separate_cube = 1; 2390 + track->separate_cube = true; 2391 2391 } else { 2392 2392 track->num_cb = 4; 2393 2393 track->num_texture = 16; 2394 2394 track->maxy = 4096; 2395 - track->separate_cube = 0; 2395 + track->separate_cube = false; 2396 2396 track->aaresolve = false; 2397 2397 track->aa.robj = NULL; 2398 2398 } ··· 2815 2815 uint32_t temp; 2816 2816 2817 2817 temp = RREG32(RADEON_CONFIG_CNTL); 2818 - if (state == false) { 2818 + if (!state) { 2819 2819 temp &= ~RADEON_CFG_VGA_RAM_EN; 2820 2820 temp |= RADEON_CFG_VGA_IO_DIS; 2821 2821 } else {
+2 -2
drivers/gpu/drm/radeon/r600.c
··· 3053 3053 * there. So it is pointless to try to go through that code 3054 3054 * hence why we disable uvd here. 3055 3055 */ 3056 - rdev->has_uvd = 0; 3056 + rdev->has_uvd = false; 3057 3057 return; 3058 3058 } 3059 3059 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL; ··· 3191 3191 uint32_t temp; 3192 3192 3193 3193 temp = RREG32(CONFIG_CNTL); 3194 - if (state == false) { 3194 + if (!state) { 3195 3195 temp &= ~(1<<0); 3196 3196 temp |= (1<<1); 3197 3197 } else {
+2 -13
drivers/gpu/drm/radeon/radeon_atombios.c
··· 570 570 path_size += le16_to_cpu(path->usSize); 571 571 572 572 if (device_support & le16_to_cpu(path->usDeviceTag)) { 573 - uint8_t con_obj_id, con_obj_num, con_obj_type; 573 + uint8_t con_obj_id, con_obj_num; 574 574 575 575 con_obj_id = 576 576 (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK) ··· 578 578 con_obj_num = 579 579 (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK) 580 580 >> ENUM_ID_SHIFT; 581 - con_obj_type = 582 - (le16_to_cpu(path->usConnObjectId) & 583 - OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; 584 581 585 582 /* TODO CV support */ 586 583 if (le16_to_cpu(path->usDeviceTag) == ··· 645 648 router.ddc_valid = false; 646 649 router.cd_valid = false; 647 650 for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) { 648 - uint8_t grph_obj_id, grph_obj_num, grph_obj_type; 649 - 650 - grph_obj_id = 651 - (le16_to_cpu(path->usGraphicObjIds[j]) & 652 - OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; 653 - grph_obj_num = 654 - (le16_to_cpu(path->usGraphicObjIds[j]) & 655 - ENUM_ID_MASK) >> ENUM_ID_SHIFT; 656 - grph_obj_type = 651 + uint8_t grph_obj_type = 657 652 (le16_to_cpu(path->usGraphicObjIds[j]) & 658 653 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; 659 654
+6 -6
drivers/gpu/drm/radeon/radeon_bios.c
··· 664 664 uint16_t tmp; 665 665 666 666 r = radeon_atrm_get_bios(rdev); 667 - if (r == false) 667 + if (!r) 668 668 r = radeon_acpi_vfct_bios(rdev); 669 - if (r == false) 669 + if (!r) 670 670 r = igp_read_bios_from_vram(rdev); 671 - if (r == false) 671 + if (!r) 672 672 r = radeon_read_bios(rdev); 673 - if (r == false) 673 + if (!r) 674 674 r = radeon_read_disabled_bios(rdev); 675 - if (r == false) 675 + if (!r) 676 676 r = radeon_read_platform_bios(rdev); 677 - if (r == false || rdev->bios == NULL) { 677 + if (!r || rdev->bios == NULL) { 678 678 DRM_ERROR("Unable to locate a BIOS ROM\n"); 679 679 rdev->bios = NULL; 680 680 return false;
+3 -3
drivers/gpu/drm/radeon/radeon_connectors.c
··· 440 440 if (radeon_conflict->use_digital) 441 441 continue; 442 442 443 - if (priority == true) { 443 + if (priority) { 444 444 DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", 445 445 conflict->name); 446 446 DRM_DEBUG_KMS("in favor of %s\n", ··· 700 700 else 701 701 ret = radeon_legacy_get_tmds_info_from_combios(radeon_encoder, tmds); 702 702 } 703 - if (val == 1 || ret == false) { 703 + if (val == 1 || !ret) 704 704 radeon_legacy_get_tmds_info_from_table(radeon_encoder, tmds); 705 - } 705 + 706 706 radeon_property_change_mode(&radeon_encoder->base); 707 707 } 708 708
+2 -2
drivers/gpu/drm/radeon/radeon_display.c
··· 847 847 if (rdev->bios) { 848 848 if (rdev->is_atom_bios) { 849 849 ret = radeon_get_atom_connector_info_from_supported_devices_table(dev); 850 - if (ret == false) 850 + if (!ret) 851 851 ret = radeon_get_atom_connector_info_from_object_table(dev); 852 852 } else { 853 853 ret = radeon_get_legacy_connector_info_from_bios(dev); 854 - if (ret == false) 854 + if (!ret) 855 855 ret = radeon_get_legacy_connector_info_from_table(dev); 856 856 } 857 857 } else {
+2 -2
drivers/gpu/drm/radeon/radeon_legacy_encoders.c
··· 1712 1712 else 1713 1713 ret = radeon_legacy_get_tmds_info_from_combios(encoder, tmds); 1714 1714 1715 - if (ret == false) 1715 + if (!ret) 1716 1716 radeon_legacy_get_tmds_info_from_table(encoder, tmds); 1717 1717 1718 1718 return tmds; ··· 1735 1735 1736 1736 ret = radeon_legacy_get_ext_tmds_info_from_combios(encoder, tmds); 1737 1737 1738 - if (ret == false) 1738 + if (!ret) 1739 1739 radeon_legacy_get_ext_tmds_info_from_table(encoder, tmds); 1740 1740 1741 1741 return tmds;
+1 -1
drivers/gpu/drm/radeon/radeon_pm.c
··· 1789 1789 u32 stat_crtc = 0; 1790 1790 bool in_vbl = radeon_pm_in_vbl(rdev); 1791 1791 1792 - if (in_vbl == false) 1792 + if (!in_vbl) 1793 1793 DRM_DEBUG_DRIVER("not in vbl for pm change %08x at %s\n", stat_crtc, 1794 1794 finish ? "exit" : "entry"); 1795 1795 return in_vbl;
+2 -2
drivers/gpu/drm/radeon/radeon_vce.c
··· 388 388 ib.ptr[i] = cpu_to_le32(0x0); 389 389 390 390 r = radeon_ib_schedule(rdev, &ib, NULL, false); 391 - if (r) { 391 + if (r) 392 392 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r); 393 - } 393 + 394 394 395 395 if (fence) 396 396 *fence = radeon_fence_ref(ib.fence);
+8 -8
drivers/gpu/drm/radeon/radeon_vm.c
··· 296 296 struct radeon_bo_va *bo_va; 297 297 298 298 list_for_each_entry(bo_va, &bo->va, bo_list) { 299 - if (bo_va->vm == vm) { 299 + if (bo_va->vm == vm) 300 300 return bo_va; 301 - } 301 + 302 302 } 303 303 return NULL; 304 304 } ··· 323 323 struct radeon_bo_va *bo_va; 324 324 325 325 bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); 326 - if (bo_va == NULL) { 326 + if (bo_va == NULL) 327 327 return NULL; 328 - } 328 + 329 329 bo_va->vm = vm; 330 330 bo_va->bo = bo; 331 331 bo_va->it.start = 0; ··· 947 947 948 948 if (mem) { 949 949 addr = (u64)mem->start << PAGE_SHIFT; 950 - if (mem->mem_type != TTM_PL_SYSTEM) { 950 + if (mem->mem_type != TTM_PL_SYSTEM) 951 951 bo_va->flags |= RADEON_VM_PAGE_VALID; 952 - } 952 + 953 953 if (mem->mem_type == TTM_PL_TT) { 954 954 bo_va->flags |= RADEON_VM_PAGE_SYSTEM; 955 955 if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC))) ··· 1233 1233 struct radeon_bo_va *bo_va, *tmp; 1234 1234 int i, r; 1235 1235 1236 - if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { 1236 + if (!RB_EMPTY_ROOT(&vm->va.rb_root)) 1237 1237 dev_err(rdev->dev, "still active bo inside vm\n"); 1238 - } 1238 + 1239 1239 rbtree_postorder_for_each_entry_safe(bo_va, tmp, 1240 1240 &vm->va.rb_root, it.rb) { 1241 1241 interval_tree_remove(&bo_va->it, &vm->va);
+1 -1
drivers/gpu/drm/radeon/rv770.c
··· 1703 1703 * there. So it is pointless to try to go through that code 1704 1704 * hence why we disable uvd here. 1705 1705 */ 1706 - rdev->has_uvd = 0; 1706 + rdev->has_uvd = false; 1707 1707 return; 1708 1708 } 1709 1709 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL;
+2 -2
drivers/gpu/drm/radeon/si.c
··· 6472 6472 * there. So it is pointless to try to go through that code 6473 6473 * hence why we disable uvd here. 6474 6474 */ 6475 - rdev->has_uvd = 0; 6475 + rdev->has_uvd = false; 6476 6476 return; 6477 6477 } 6478 6478 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL; ··· 6539 6539 * there. So it is pointless to try to go through that code 6540 6540 * hence why we disable vce here. 6541 6541 */ 6542 - rdev->has_vce = 0; 6542 + rdev->has_vce = false; 6543 6543 return; 6544 6544 } 6545 6545 rdev->ring[TN_RING_TYPE_VCE1_INDEX].ring_obj = NULL;
+26 -53
drivers/gpu/drm/scheduler/sched_entity.c
··· 38 38 * submit to HW ring. 39 39 * 40 40 * @entity: scheduler entity to init 41 - * @rq_list: the list of run queue on which jobs from this 41 + * @priority: priority of the entity 42 + * @sched_list: the list of drm scheds on which jobs from this 42 43 * entity can be submitted 43 - * @num_rq_list: number of run queue in rq_list 44 + * @num_sched_list: number of drm sched in sched_list 44 45 * @guilty: atomic_t set to 1 when a job on this queue 45 46 * is found to be guilty causing a timeout 46 47 * ··· 51 50 * Returns 0 on success or a negative error code on failure. 52 51 */ 53 52 int drm_sched_entity_init(struct drm_sched_entity *entity, 54 - struct drm_sched_rq **rq_list, 55 - unsigned int num_rq_list, 53 + enum drm_sched_priority priority, 54 + struct drm_gpu_scheduler **sched_list, 55 + unsigned int num_sched_list, 56 56 atomic_t *guilty) 57 57 { 58 - int i; 59 - 60 - if (!(entity && rq_list && (num_rq_list == 0 || rq_list[0]))) 58 + if (!(entity && sched_list && (num_sched_list == 0 || sched_list[0]))) 61 59 return -EINVAL; 62 60 63 61 memset(entity, 0, sizeof(struct drm_sched_entity)); 64 62 INIT_LIST_HEAD(&entity->list); 65 63 entity->rq = NULL; 66 64 entity->guilty = guilty; 67 - entity->num_rq_list = num_rq_list; 68 - entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *), 69 - GFP_KERNEL); 70 - if (!entity->rq_list) 71 - return -ENOMEM; 65 + entity->num_sched_list = num_sched_list; 66 + entity->priority = priority; 67 + entity->sched_list = num_sched_list > 1 ? sched_list : NULL; 68 + entity->last_scheduled = NULL; 69 + 70 + if(num_sched_list) 71 + entity->rq = &sched_list[0]->sched_rq[entity->priority]; 72 72 73 73 init_completion(&entity->entity_idle); 74 - 75 - for (i = 0; i < num_rq_list; ++i) 76 - entity->rq_list[i] = rq_list[i]; 77 - 78 - if (num_rq_list) 79 - entity->rq = rq_list[0]; 80 - 81 - entity->last_scheduled = NULL; 82 74 83 75 spin_lock_init(&entity->rq_lock); 84 76 spsc_queue_init(&entity->job_queue); ··· 133 139 unsigned int min_jobs = UINT_MAX, num_jobs; 134 140 int i; 135 141 136 - for (i = 0; i < entity->num_rq_list; ++i) { 137 - struct drm_gpu_scheduler *sched = entity->rq_list[i]->sched; 142 + for (i = 0; i < entity->num_sched_list; ++i) { 143 + struct drm_gpu_scheduler *sched = entity->sched_list[i]; 138 144 139 - if (!entity->rq_list[i]->sched->ready) { 145 + if (!entity->sched_list[i]->ready) { 140 146 DRM_WARN("sched%s is not ready, skipping", sched->name); 141 147 continue; 142 148 } ··· 144 150 num_jobs = atomic_read(&sched->num_jobs); 145 151 if (num_jobs < min_jobs) { 146 152 min_jobs = num_jobs; 147 - rq = entity->rq_list[i]; 153 + rq = &entity->sched_list[i]->sched_rq[entity->priority]; 148 154 } 149 155 } 150 156 ··· 302 308 303 309 dma_fence_put(entity->last_scheduled); 304 310 entity->last_scheduled = NULL; 305 - kfree(entity->rq_list); 306 311 } 307 312 EXPORT_SYMBOL(drm_sched_entity_fini); 308 313 ··· 347 354 } 348 355 349 356 /** 350 - * drm_sched_entity_set_rq_priority - helper for drm_sched_entity_set_priority 351 - */ 352 - static void drm_sched_entity_set_rq_priority(struct drm_sched_rq **rq, 353 - enum drm_sched_priority priority) 354 - { 355 - *rq = &(*rq)->sched->sched_rq[priority]; 356 - } 357 - 358 - /** 359 357 * drm_sched_entity_set_priority - Sets priority of the entity 360 358 * 361 359 * @entity: scheduler entity ··· 357 373 void drm_sched_entity_set_priority(struct drm_sched_entity *entity, 358 374 enum drm_sched_priority priority) 359 375 { 360 - unsigned int i; 361 - 362 376 spin_lock(&entity->rq_lock); 363 - 364 - for (i = 0; i < entity->num_rq_list; ++i) 365 - drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority); 366 - 367 - if (entity->rq) { 368 - drm_sched_rq_remove_entity(entity->rq, entity); 369 - drm_sched_entity_set_rq_priority(&entity->rq, priority); 370 - drm_sched_rq_add_entity(entity->rq, entity); 371 - } 372 - 377 + entity->priority = priority; 373 378 spin_unlock(&entity->rq_lock); 374 379 } 375 380 EXPORT_SYMBOL(drm_sched_entity_set_priority); ··· 463 490 struct dma_fence *fence; 464 491 struct drm_sched_rq *rq; 465 492 466 - if (spsc_queue_count(&entity->job_queue) || entity->num_rq_list <= 1) 493 + if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1) 467 494 return; 468 495 469 496 fence = READ_ONCE(entity->last_scheduled); 470 497 if (fence && !dma_fence_is_signaled(fence)) 471 498 return; 472 499 473 - rq = drm_sched_entity_get_free_sched(entity); 474 - if (rq == entity->rq) 475 - return; 476 - 477 500 spin_lock(&entity->rq_lock); 478 - drm_sched_rq_remove_entity(entity->rq, entity); 479 - entity->rq = rq; 501 + rq = drm_sched_entity_get_free_sched(entity); 502 + if (rq != entity->rq) { 503 + drm_sched_rq_remove_entity(entity->rq, entity); 504 + entity->rq = rq; 505 + } 506 + 480 507 spin_unlock(&entity->rq_lock); 481 508 } 482 509
+5 -3
drivers/gpu/drm/v3d/v3d_drv.c
··· 140 140 { 141 141 struct v3d_dev *v3d = to_v3d_dev(dev); 142 142 struct v3d_file_priv *v3d_priv; 143 - struct drm_sched_rq *rq; 143 + struct drm_gpu_scheduler *sched; 144 144 int i; 145 145 146 146 v3d_priv = kzalloc(sizeof(*v3d_priv), GFP_KERNEL); ··· 150 150 v3d_priv->v3d = v3d; 151 151 152 152 for (i = 0; i < V3D_MAX_QUEUES; i++) { 153 - rq = &v3d->queue[i].sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL]; 154 - drm_sched_entity_init(&v3d_priv->sched_entity[i], &rq, 1, NULL); 153 + sched = &v3d->queue[i].sched; 154 + drm_sched_entity_init(&v3d_priv->sched_entity[i], 155 + DRM_SCHED_PRIORITY_NORMAL, &sched, 156 + 1, NULL); 155 157 } 156 158 157 159 file->driver_priv = v3d_priv;
+5 -3
include/drm/gpu_scheduler.h
··· 81 81 struct drm_sched_entity { 82 82 struct list_head list; 83 83 struct drm_sched_rq *rq; 84 - struct drm_sched_rq **rq_list; 85 - unsigned int num_rq_list; 84 + unsigned int num_sched_list; 85 + struct drm_gpu_scheduler **sched_list; 86 + enum drm_sched_priority priority; 86 87 spinlock_t rq_lock; 87 88 88 89 struct spsc_queue job_queue; ··· 313 312 struct drm_sched_entity *entity); 314 313 315 314 int drm_sched_entity_init(struct drm_sched_entity *entity, 316 - struct drm_sched_rq **rq_list, 315 + enum drm_sched_priority priority, 316 + struct drm_gpu_scheduler **sched_list, 317 317 unsigned int num_rq_list, 318 318 atomic_t *guilty); 319 319 long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
+107
include/drm/task_barrier.h
··· 1 + /* 2 + * Copyright 2019 Advanced Micro Devices, Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + * 22 + */ 23 + #include <linux/semaphore.h> 24 + #include <linux/atomic.h> 25 + 26 + /* 27 + * Reusable 2 PHASE task barrier (randevouz point) implementation for N tasks. 28 + * Based on the Little book of sempahores - https://greenteapress.com/wp/semaphores/ 29 + */ 30 + 31 + 32 + 33 + #ifndef DRM_TASK_BARRIER_H_ 34 + #define DRM_TASK_BARRIER_H_ 35 + 36 + /* 37 + * Represents an instance of a task barrier. 38 + */ 39 + struct task_barrier { 40 + unsigned int n; 41 + atomic_t count; 42 + struct semaphore enter_turnstile; 43 + struct semaphore exit_turnstile; 44 + }; 45 + 46 + static inline void task_barrier_signal_turnstile(struct semaphore *turnstile, 47 + unsigned int n) 48 + { 49 + int i; 50 + 51 + for (i = 0 ; i < n; i++) 52 + up(turnstile); 53 + } 54 + 55 + static inline void task_barrier_init(struct task_barrier *tb) 56 + { 57 + tb->n = 0; 58 + atomic_set(&tb->count, 0); 59 + sema_init(&tb->enter_turnstile, 0); 60 + sema_init(&tb->exit_turnstile, 0); 61 + } 62 + 63 + static inline void task_barrier_add_task(struct task_barrier *tb) 64 + { 65 + tb->n++; 66 + } 67 + 68 + static inline void task_barrier_rem_task(struct task_barrier *tb) 69 + { 70 + tb->n--; 71 + } 72 + 73 + /* 74 + * Lines up all the threads BEFORE the critical point. 75 + * 76 + * When all thread passed this code the entry barrier is back to locked state. 77 + */ 78 + static inline void task_barrier_enter(struct task_barrier *tb) 79 + { 80 + if (atomic_inc_return(&tb->count) == tb->n) 81 + task_barrier_signal_turnstile(&tb->enter_turnstile, tb->n); 82 + 83 + down(&tb->enter_turnstile); 84 + } 85 + 86 + /* 87 + * Lines up all the threads AFTER the critical point. 88 + * 89 + * This function is used to avoid any one thread running ahead if the barrier is 90 + * used repeatedly . 91 + */ 92 + static inline void task_barrier_exit(struct task_barrier *tb) 93 + { 94 + if (atomic_dec_return(&tb->count) == 0) 95 + task_barrier_signal_turnstile(&tb->exit_turnstile, tb->n); 96 + 97 + down(&tb->exit_turnstile); 98 + } 99 + 100 + /* Convinieince function when nothing to be done in between entry and exit */ 101 + static inline void task_barrier_full(struct task_barrier *tb) 102 + { 103 + task_barrier_enter(tb); 104 + task_barrier_exit(tb); 105 + } 106 + 107 + #endif