Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'amd-drm-next-6.18-2025-09-19' of https://gitlab.freedesktop.org/agd5f/linux into drm-next

amd-drm-next-6.18-2025-09-19:

amdgpu:
- Fence drv clean up fix
- DPC fixes
- Misc display fixes
- Support the MMIO remap page as a ttm pool
- JPEG parser updates
- UserQ updates
- VCN ctx handling fixes
- Documentation updates
- Misc cleanups
- SMU 13.0.x updates
- SI DPM updates
- GC 11.x cleaner shader updates
- DMCUB updates
- DML fixes
- Improve fallback handling for pixel encoding
- VCN reset improvements
- DCE6 DC updates
- DSC fixes
- Use devm for i2c buses
- GPUVM locking updates
- GPUVM documentation improvements
- Drop non-DC DCE11 code
- S0ix fixes
- Backlight fix
- SR-IOV fixes

amdkfd:
- SVM updates

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://lore.kernel.org/r/20250919193354.2989255-1-alexander.deucher@amd.com

+2100 -5201
+1
Documentation/gpu/amdgpu/apu-asic-info-table.csv
··· 13 13 Ryzen 7x40 series, Phoenix, 3.1.4, 11.0.1 / 11.0.4, 4.0.2, 6.0.1, 13.0.4 / 13.0.11, 13.0.4 / 13.0.11 14 14 Ryzen 8x40 series, Hawk Point, 3.1.4, 11.0.1 / 11.0.4, 4.0.2, 6.0.1, 13.0.4 / 13.0.11, 13.0.4 / 13.0.11 15 15 Ryzen AI 300 series, Strix Point, 3.5.0, 11.5.0, 4.0.5, 6.1.0, 14.0.0, 14.0.0 16 + Ryzen AI 330 series, Krackan Point, 3.6.0, 11.5.3, 4.0.5, 6.1.3, 14.0.5, 14.0.5 16 17 Ryzen AI 350 series, Krackan Point, 3.5.0, 11.5.2, 4.0.5, 6.1.2, 14.0.4, 14.0.4 17 18 Ryzen AI Max 300 series, Strix Halo, 3.5.1, 11.5.1, 4.0.6, 6.1.1, 14.0.1, 14.0.1
+1 -1
Documentation/gpu/amdgpu/driver-core.rst
··· 210 210 :doc: IP Blocks 211 211 212 212 .. kernel-doc:: drivers/gpu/drm/amd/include/amd_shared.h 213 - :identifiers: amd_ip_block_type amd_ip_funcs DC_DEBUG_MASK 213 + :identifiers: amd_ip_block_type amd_ip_funcs DC_FEATURE_MASK DC_DEBUG_MASK
-1
drivers/gpu/drm/amd/amdgpu/Makefile
··· 138 138 # add DCE block 139 139 amdgpu-y += \ 140 140 dce_v10_0.o \ 141 - dce_v11_0.o \ 142 141 amdgpu_vkms.o 143 142 144 143 # add GFX block
+4 -3
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 63 63 #include "kgd_pp_interface.h" 64 64 65 65 #include "amd_shared.h" 66 + #include "amdgpu_utils.h" 66 67 #include "amdgpu_mode.h" 67 68 #include "amdgpu_ih.h" 68 69 #include "amdgpu_irq.h" ··· 435 434 uint32_t default_mclk; 436 435 uint32_t default_sclk; 437 436 uint32_t default_dispclk; 438 - uint32_t current_dispclk; 439 437 uint32_t dp_extclk; 440 438 uint32_t max_pixel_clock; 441 439 }; ··· 545 545 * this value can be accessed directly by using the offset as an index. 546 546 * For the GPU address, it is necessary to use gpu_addr and the offset. 547 547 */ 548 - volatile uint32_t *wb; 548 + uint32_t *wb; 549 549 550 550 /** 551 551 * @gpu_addr: ··· 721 721 /* VRAM scratch page for HDP bug, default vram page */ 722 722 struct amdgpu_mem_scratch { 723 723 struct amdgpu_bo *robj; 724 - volatile uint32_t *ptr; 724 + uint32_t *ptr; 725 725 u64 gpu_addr; 726 726 }; 727 727 ··· 752 752 struct amdgpu_mmio_remap { 753 753 u32 reg_offset; 754 754 resource_size_t bus_addr; 755 + struct amdgpu_bo *bo; 755 756 }; 756 757 757 758 /* Define the HW IP blocks will be used in driver , add more if necessary */
+12 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
··· 250 250 251 251 void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool suspend_proc) 252 252 { 253 - if (adev->kfd.dev) 254 - kgd2kfd_suspend(adev->kfd.dev, suspend_proc); 253 + if (adev->kfd.dev) { 254 + if (adev->in_s0ix) 255 + kgd2kfd_stop_sched_all_nodes(adev->kfd.dev); 256 + else 257 + kgd2kfd_suspend(adev->kfd.dev, suspend_proc); 258 + } 255 259 } 256 260 257 261 int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool resume_proc) 258 262 { 259 263 int r = 0; 260 264 261 - if (adev->kfd.dev) 262 - r = kgd2kfd_resume(adev->kfd.dev, resume_proc); 265 + if (adev->kfd.dev) { 266 + if (adev->in_s0ix) 267 + r = kgd2kfd_start_sched_all_nodes(adev->kfd.dev); 268 + else 269 + r = kgd2kfd_resume(adev->kfd.dev, resume_proc); 270 + } 263 271 264 272 return r; 265 273 }
+12
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
··· 428 428 int kgd2kfd_check_and_lock_kfd(struct kfd_dev *kfd); 429 429 void kgd2kfd_unlock_kfd(struct kfd_dev *kfd); 430 430 int kgd2kfd_start_sched(struct kfd_dev *kfd, uint32_t node_id); 431 + int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd); 431 432 int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id); 433 + int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd); 432 434 bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id); 433 435 bool kgd2kfd_vmfault_fast_path(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry, 434 436 bool retry_fault); ··· 520 518 return 0; 521 519 } 522 520 521 + static inline int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd) 522 + { 523 + return 0; 524 + } 525 + 523 526 static inline int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id) 527 + { 528 + return 0; 529 + } 530 + 531 + static inline int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd) 524 532 { 525 533 return 0; 526 534 }
-1
drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
··· 706 706 } 707 707 adev->clock.dp_extclk = 708 708 le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); 709 - adev->clock.current_dispclk = adev->clock.default_dispclk; 710 709 711 710 adev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock); 712 711 if (adev->clock.max_pixel_clock == 0)
+17 -24
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
··· 184 184 int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in, 185 185 struct drm_amdgpu_bo_list_entry **info_param) 186 186 { 187 - const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr); 188 187 const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry); 188 + const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr); 189 + const uint32_t bo_info_size = in->bo_info_size; 190 + const uint32_t bo_number = in->bo_number; 189 191 struct drm_amdgpu_bo_list_entry *info; 190 - int r; 191 - 192 - info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL); 193 - if (!info) 194 - return -ENOMEM; 195 192 196 193 /* copy the handle array from userspace to a kernel buffer */ 197 - r = -EFAULT; 198 - if (likely(info_size == in->bo_info_size)) { 199 - unsigned long bytes = in->bo_number * 200 - in->bo_info_size; 201 - 202 - if (copy_from_user(info, uptr, bytes)) 203 - goto error_free; 204 - 194 + if (likely(info_size == bo_info_size)) { 195 + info = vmemdup_array_user(uptr, bo_number, info_size); 196 + if (IS_ERR(info)) 197 + return PTR_ERR(info); 205 198 } else { 206 - unsigned long bytes = min(in->bo_info_size, info_size); 199 + const uint32_t bytes = min(bo_info_size, info_size); 207 200 unsigned i; 208 201 209 - memset(info, 0, in->bo_number * info_size); 210 - for (i = 0; i < in->bo_number; ++i) { 211 - if (copy_from_user(&info[i], uptr, bytes)) 212 - goto error_free; 202 + info = kvmalloc_array(bo_number, info_size, GFP_KERNEL); 203 + if (!info) 204 + return -ENOMEM; 213 205 214 - uptr += in->bo_info_size; 206 + memset(info, 0, bo_number * info_size); 207 + for (i = 0; i < bo_number; ++i, uptr += bo_info_size) { 208 + if (copy_from_user(&info[i], uptr, bytes)) { 209 + kvfree(info); 210 + return -EFAULT; 211 + } 215 212 } 216 213 } 217 214 218 215 *info_param = info; 219 216 return 0; 220 - 221 - error_free: 222 - kvfree(info); 223 - return r; 224 217 } 225 218 226 219 int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
+16 -38
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 178 178 struct amdgpu_fpriv *fpriv = p->filp->driver_priv; 179 179 unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { }; 180 180 struct amdgpu_vm *vm = &fpriv->vm; 181 - uint64_t *chunk_array_user; 182 181 uint64_t *chunk_array; 183 182 uint32_t uf_offset = 0; 184 183 size_t size; 185 184 int ret; 186 185 int i; 187 186 188 - chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), 189 - GFP_KERNEL); 190 - if (!chunk_array) 191 - return -ENOMEM; 192 - 193 - /* get chunks */ 194 - chunk_array_user = u64_to_user_ptr(cs->in.chunks); 195 - if (copy_from_user(chunk_array, chunk_array_user, 196 - sizeof(uint64_t)*cs->in.num_chunks)) { 197 - ret = -EFAULT; 198 - goto free_chunk; 199 - } 187 + chunk_array = memdup_array_user(u64_to_user_ptr(cs->in.chunks), 188 + cs->in.num_chunks, 189 + sizeof(uint64_t)); 190 + if (IS_ERR(chunk_array)) 191 + return PTR_ERR(chunk_array); 200 192 201 193 p->nchunks = cs->in.num_chunks; 202 194 p->chunks = kvmalloc_array(p->nchunks, sizeof(struct amdgpu_cs_chunk), ··· 201 209 for (i = 0; i < p->nchunks; i++) { 202 210 struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL; 203 211 struct drm_amdgpu_cs_chunk user_chunk; 204 - uint32_t __user *cdata; 205 212 206 213 chunk_ptr = u64_to_user_ptr(chunk_array[i]); 207 214 if (copy_from_user(&user_chunk, chunk_ptr, ··· 213 222 p->chunks[i].length_dw = user_chunk.length_dw; 214 223 215 224 size = p->chunks[i].length_dw; 216 - cdata = u64_to_user_ptr(user_chunk.chunk_data); 217 225 218 - p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), 219 - GFP_KERNEL); 220 - if (p->chunks[i].kdata == NULL) { 221 - ret = -ENOMEM; 226 + p->chunks[i].kdata = vmemdup_array_user(u64_to_user_ptr(user_chunk.chunk_data), 227 + size, 228 + sizeof(uint32_t)); 229 + if (IS_ERR(p->chunks[i].kdata)) { 230 + ret = PTR_ERR(p->chunks[i].kdata); 222 231 i--; 223 232 goto free_partial_kdata; 224 233 } 225 234 size *= sizeof(uint32_t); 226 - if (copy_from_user(p->chunks[i].kdata, cdata, size)) { 227 - ret = -EFAULT; 228 - goto free_partial_kdata; 229 - } 230 235 231 236 /* Assume the worst on the following checks */ 232 237 ret = -EINVAL; ··· 273 286 } 274 287 } 275 288 276 - if (!p->gang_size) { 289 + if (!p->gang_size || (amdgpu_sriov_vf(p->adev) && p->gang_size > 1)) { 277 290 ret = -EINVAL; 278 291 goto free_all_kdata; 279 292 } ··· 1754 1767 { 1755 1768 struct amdgpu_device *adev = drm_to_adev(dev); 1756 1769 union drm_amdgpu_wait_fences *wait = data; 1757 - uint32_t fence_count = wait->in.fence_count; 1758 - struct drm_amdgpu_fence *fences_user; 1759 1770 struct drm_amdgpu_fence *fences; 1760 1771 int r; 1761 1772 1762 1773 /* Get the fences from userspace */ 1763 - fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence), 1764 - GFP_KERNEL); 1765 - if (fences == NULL) 1766 - return -ENOMEM; 1767 - 1768 - fences_user = u64_to_user_ptr(wait->in.fences); 1769 - if (copy_from_user(fences, fences_user, 1770 - sizeof(struct drm_amdgpu_fence) * fence_count)) { 1771 - r = -EFAULT; 1772 - goto err_free_fences; 1773 - } 1774 + fences = memdup_array_user(u64_to_user_ptr(wait->in.fences), 1775 + wait->in.fence_count, 1776 + sizeof(struct drm_amdgpu_fence)); 1777 + if (IS_ERR(fences)) 1778 + return PTR_ERR(fences); 1774 1779 1775 1780 if (wait->in.wait_all) 1776 1781 r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences); 1777 1782 else 1778 1783 r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences); 1779 1784 1780 - err_free_fences: 1781 1785 kfree(fences); 1782 1786 1783 1787 return r;
+18 -19
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 5072 5072 if (!adev->in_s4 && (adev->flags & AMD_IS_APU)) 5073 5073 return 0; 5074 5074 5075 + /* No need to evict when going to S5 through S4 callbacks */ 5076 + if (system_state == SYSTEM_POWER_OFF) 5077 + return 0; 5078 + 5075 5079 ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); 5076 5080 if (ret) { 5077 5081 dev_warn(adev->dev, "evicting device resources failed\n"); ··· 5200 5196 adev->in_suspend = true; 5201 5197 5202 5198 if (amdgpu_sriov_vf(adev)) { 5203 - if (!adev->in_s0ix && !adev->in_runpm) 5199 + if (!adev->in_runpm) 5204 5200 amdgpu_amdkfd_suspend_process(adev); 5205 5201 amdgpu_virt_fini_data_exchange(adev); 5206 5202 r = amdgpu_virt_request_full_gpu(adev, false); ··· 5220 5216 5221 5217 amdgpu_device_ip_suspend_phase1(adev); 5222 5218 5223 - if (!adev->in_s0ix) { 5224 - amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); 5225 - amdgpu_userq_suspend(adev); 5226 - } 5219 + amdgpu_amdkfd_suspend(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); 5220 + amdgpu_userq_suspend(adev); 5227 5221 5228 5222 r = amdgpu_device_evict_resources(adev); 5229 5223 if (r) ··· 5316 5314 goto exit; 5317 5315 } 5318 5316 5319 - if (!adev->in_s0ix) { 5320 - r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); 5321 - if (r) 5322 - goto exit; 5317 + r = amdgpu_amdkfd_resume(adev, !amdgpu_sriov_vf(adev) && !adev->in_runpm); 5318 + if (r) 5319 + goto exit; 5323 5320 5324 - r = amdgpu_userq_resume(adev); 5325 - if (r) 5326 - goto exit; 5327 - } 5321 + r = amdgpu_userq_resume(adev); 5322 + if (r) 5323 + goto exit; 5328 5324 5329 5325 r = amdgpu_device_ip_late_init(adev); 5330 5326 if (r) ··· 5335 5335 amdgpu_virt_init_data_exchange(adev); 5336 5336 amdgpu_virt_release_full_gpu(adev, true); 5337 5337 5338 - if (!adev->in_s0ix && !r && !adev->in_runpm) 5338 + if (!r && !adev->in_runpm) 5339 5339 r = amdgpu_amdkfd_resume_process(adev); 5340 5340 } 5341 5341 ··· 6937 6937 { 6938 6938 struct drm_device *dev = pci_get_drvdata(pdev); 6939 6939 struct amdgpu_device *adev = drm_to_adev(dev); 6940 - struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); 6940 + struct amdgpu_hive_info *hive __free(xgmi_put_hive) = 6941 + amdgpu_get_xgmi_hive(adev); 6941 6942 struct amdgpu_reset_context reset_context; 6942 6943 struct list_head device_list; 6943 6944 ··· 6977 6976 amdgpu_device_recovery_get_reset_lock(adev, &device_list); 6978 6977 amdgpu_device_halt_activities(adev, NULL, &reset_context, &device_list, 6979 6978 hive, false); 6980 - if (hive) { 6979 + if (hive) 6981 6980 mutex_unlock(&hive->hive_lock); 6982 - amdgpu_put_xgmi_hive(hive); 6983 - } 6984 6981 return PCI_ERS_RESULT_NEED_RESET; 6985 6982 case pci_channel_io_perm_failure: 6986 6983 /* Permanent error, prepare for device removal */ ··· 7160 7161 struct pci_dev *parent = pci_upstream_bridge(adev->pdev); 7161 7162 int r; 7162 7163 7163 - if (parent->vendor != PCI_VENDOR_ID_ATI) 7164 + if (!parent || parent->vendor != PCI_VENDOR_ID_ATI) 7164 7165 return; 7165 7166 7166 7167 /* If already saved, return */
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
··· 70 70 [AMDGPU_PL_GWS] = "gws", 71 71 [AMDGPU_PL_OA] = "oa", 72 72 [AMDGPU_PL_DOORBELL] = "doorbell", 73 + [AMDGPU_PL_MMIO_REMAP] = "mmioremap", 73 74 }; 74 75 unsigned int hw_ip, i; 75 76
+3
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
··· 458 458 /* always clear VRAM */ 459 459 flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED; 460 460 461 + if (args->in.domains & AMDGPU_GEM_DOMAIN_MMIO_REMAP) 462 + return -EINVAL; 463 + 461 464 /* create a gem object to contain this object in */ 462 465 if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | 463 466 AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
··· 2280 2280 * Return: 2281 2281 * return the latest index. 2282 2282 */ 2283 - u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer) 2283 + u32 amdgpu_gfx_csb_preamble_start(u32 *buffer) 2284 2284 { 2285 2285 u32 count = 0; 2286 2286 ··· 2304 2304 * Return: 2305 2305 * return the latest index. 2306 2306 */ 2307 - u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count) 2307 + u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, u32 *buffer, u32 count) 2308 2308 { 2309 2309 const struct cs_section_def *sect = NULL; 2310 2310 const struct cs_extent_def *ext = NULL; ··· 2331 2331 * @buffer: This is an output variable that gets the PACKET3 preamble end. 2332 2332 * @count: Index to start set the preemble end. 2333 2333 */ 2334 - void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count) 2334 + void amdgpu_gfx_csb_preamble_end(u32 *buffer, u32 count) 2335 2335 { 2336 2336 buffer[count++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 2337 2337 buffer[count++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE);
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
··· 642 642 void amdgpu_gfx_profile_idle_work_handler(struct work_struct *work); 643 643 void amdgpu_gfx_profile_ring_begin_use(struct amdgpu_ring *ring); 644 644 void amdgpu_gfx_profile_ring_end_use(struct amdgpu_ring *ring); 645 - u32 amdgpu_gfx_csb_preamble_start(volatile u32 *buffer); 646 - u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, volatile u32 *buffer, u32 count); 647 - void amdgpu_gfx_csb_preamble_end(volatile u32 *buffer, u32 count); 645 + u32 amdgpu_gfx_csb_preamble_start(u32 *buffer); 646 + u32 amdgpu_gfx_csb_data_parser(struct amdgpu_device *adev, u32 *buffer, u32 count); 647 + void amdgpu_gfx_csb_preamble_end(u32 *buffer, u32 count); 648 648 649 649 void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev); 650 650 void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev);
+3 -15
drivers/gpu/drm/amd/amdgpu/amdgpu_i2c.c
··· 184 184 snprintf(i2c->adapter.name, sizeof(i2c->adapter.name), 185 185 "AMDGPU i2c hw bus %s", name); 186 186 i2c->adapter.algo = &amdgpu_atombios_i2c_algo; 187 - ret = i2c_add_adapter(&i2c->adapter); 187 + ret = devm_i2c_add_adapter(dev->dev, &i2c->adapter); 188 188 if (ret) 189 189 goto out_free; 190 190 } else { ··· 215 215 216 216 } 217 217 218 - void amdgpu_i2c_destroy(struct amdgpu_i2c_chan *i2c) 219 - { 220 - if (!i2c) 221 - return; 222 - WARN_ON(i2c->has_aux); 223 - i2c_del_adapter(&i2c->adapter); 224 - kfree(i2c); 225 - } 226 - 227 218 void amdgpu_i2c_init(struct amdgpu_device *adev) 228 219 { 229 220 if (!adev->is_atom_fw) { ··· 239 248 { 240 249 int i; 241 250 242 - for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) { 243 - if (adev->i2c_bus[i]) { 244 - amdgpu_i2c_destroy(adev->i2c_bus[i]); 251 + for (i = 0; i < AMDGPU_MAX_I2C_BUS; i++) 252 + if (adev->i2c_bus[i]) 245 253 adev->i2c_bus[i] = NULL; 246 - } 247 - } 248 254 } 249 255 250 256 /* looks up bus based on id */
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_ih.h
··· 56 56 bool use_bus_addr; 57 57 58 58 struct amdgpu_bo *ring_obj; 59 - volatile uint32_t *ring; 59 + uint32_t *ring; 60 60 uint64_t gpu_addr; 61 61 62 62 uint64_t wptr_addr; 63 - volatile uint32_t *wptr_cpu; 63 + uint32_t *wptr_cpu; 64 64 65 65 uint64_t rptr_addr; 66 - volatile uint32_t *rptr_cpu; 66 + uint32_t *rptr_cpu; 67 67 68 68 bool enabled; 69 69 unsigned rptr;
+65
drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c
··· 540 540 drm_printf(p, "\nInactive Instance:JPEG%d\n", i); 541 541 } 542 542 } 543 + 544 + static inline bool amdgpu_jpeg_reg_valid(u32 reg) 545 + { 546 + if (reg < JPEG_REG_RANGE_START || reg > JPEG_REG_RANGE_END || 547 + (reg >= JPEG_ATOMIC_RANGE_START && reg <= JPEG_ATOMIC_RANGE_END)) 548 + return false; 549 + else 550 + return true; 551 + } 552 + 553 + /** 554 + * amdgpu_jpeg_dec_parse_cs - command submission parser 555 + * 556 + * @parser: Command submission parser context 557 + * @job: the job to parse 558 + * @ib: the IB to parse 559 + * 560 + * Parse the command stream, return -EINVAL for invalid packet, 561 + * 0 otherwise 562 + */ 563 + 564 + int amdgpu_jpeg_dec_parse_cs(struct amdgpu_cs_parser *parser, 565 + struct amdgpu_job *job, 566 + struct amdgpu_ib *ib) 567 + { 568 + u32 i, reg, res, cond, type; 569 + struct amdgpu_device *adev = parser->adev; 570 + 571 + for (i = 0; i < ib->length_dw ; i += 2) { 572 + reg = CP_PACKETJ_GET_REG(ib->ptr[i]); 573 + res = CP_PACKETJ_GET_RES(ib->ptr[i]); 574 + cond = CP_PACKETJ_GET_COND(ib->ptr[i]); 575 + type = CP_PACKETJ_GET_TYPE(ib->ptr[i]); 576 + 577 + if (res) /* only support 0 at the moment */ 578 + return -EINVAL; 579 + 580 + switch (type) { 581 + case PACKETJ_TYPE0: 582 + if (cond != PACKETJ_CONDITION_CHECK0 || 583 + !amdgpu_jpeg_reg_valid(reg)) { 584 + dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]); 585 + return -EINVAL; 586 + } 587 + break; 588 + case PACKETJ_TYPE3: 589 + if (cond != PACKETJ_CONDITION_CHECK3 || 590 + !amdgpu_jpeg_reg_valid(reg)) { 591 + dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]); 592 + return -EINVAL; 593 + } 594 + break; 595 + case PACKETJ_TYPE6: 596 + if (ib->ptr[i] == CP_PACKETJ_NOP) 597 + continue; 598 + dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]); 599 + return -EINVAL; 600 + default: 601 + dev_err(adev->dev, "Unknown packet type %d !\n", type); 602 + return -EINVAL; 603 + } 604 + } 605 + 606 + return 0; 607 + }
+10
drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.h
··· 25 25 #define __AMDGPU_JPEG_H__ 26 26 27 27 #include "amdgpu_ras.h" 28 + #include "amdgpu_cs.h" 28 29 29 30 #define AMDGPU_MAX_JPEG_INSTANCES 4 30 31 #define AMDGPU_MAX_JPEG_RINGS 10 31 32 #define AMDGPU_MAX_JPEG_RINGS_4_0_3 8 33 + 34 + #define JPEG_REG_RANGE_START 0x4000 35 + #define JPEG_REG_RANGE_END 0x41c2 36 + #define JPEG_ATOMIC_RANGE_START 0x4120 37 + #define JPEG_ATOMIC_RANGE_END 0x412A 38 + 32 39 33 40 #define AMDGPU_JPEG_HARVEST_JPEG0 (1 << 0) 34 41 #define AMDGPU_JPEG_HARVEST_JPEG1 (1 << 1) ··· 177 170 const struct amdgpu_hwip_reg_entry *reg, u32 count); 178 171 void amdgpu_jpeg_dump_ip_state(struct amdgpu_ip_block *ip_block); 179 172 void amdgpu_jpeg_print_ip_state(struct amdgpu_ip_block *ip_block, struct drm_printer *p); 173 + int amdgpu_jpeg_dec_parse_cs(struct amdgpu_cs_parser *parser, 174 + struct amdgpu_job *job, 175 + struct amdgpu_ib *ib); 180 176 181 177 #endif /*__AMDGPU_JPEG_H__*/
+4
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 939 939 if (adev->gfx.config.ta_cntl2_truncate_coord_mode) 940 940 dev_info->ids_flags |= AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD; 941 941 942 + /* Gang submit is not supported under SRIOV currently */ 943 + if (!amdgpu_sriov_vf(adev)) 944 + dev_info->ids_flags |= AMDGPU_IDS_FLAGS_GANG_SUBMIT; 945 + 942 946 if (amdgpu_passthrough(adev)) 943 947 dev_info->ids_flags |= (AMDGPU_IDS_FLAGS_MODE_PT << 944 948 AMDGPU_IDS_FLAGS_MODE_SHIFT) &
+13
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 153 153 c++; 154 154 } 155 155 156 + if (domain & AMDGPU_GEM_DOMAIN_MMIO_REMAP) { 157 + places[c].fpfn = 0; 158 + places[c].lpfn = 0; 159 + places[c].mem_type = AMDGPU_PL_MMIO_REMAP; 160 + places[c].flags = 0; 161 + c++; 162 + } 163 + 156 164 if (domain & AMDGPU_GEM_DOMAIN_GTT) { 157 165 places[c].fpfn = 0; 158 166 places[c].lpfn = 0; ··· 1554 1546 return AMDGPU_PL_OA; 1555 1547 case AMDGPU_GEM_DOMAIN_DOORBELL: 1556 1548 return AMDGPU_PL_DOORBELL; 1549 + case AMDGPU_GEM_DOMAIN_MMIO_REMAP: 1550 + return AMDGPU_PL_MMIO_REMAP; 1557 1551 default: 1558 1552 return TTM_PL_SYSTEM; 1559 1553 } ··· 1638 1628 break; 1639 1629 case AMDGPU_PL_DOORBELL: 1640 1630 placement = "DOORBELL"; 1631 + break; 1632 + case AMDGPU_PL_MMIO_REMAP: 1633 + placement = "MMIO REMAP"; 1641 1634 break; 1642 1635 case TTM_PL_SYSTEM: 1643 1636 default:
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
··· 167 167 return AMDGPU_GEM_DOMAIN_OA; 168 168 case AMDGPU_PL_DOORBELL: 169 169 return AMDGPU_GEM_DOMAIN_DOORBELL; 170 + case AMDGPU_PL_MMIO_REMAP: 171 + return AMDGPU_GEM_DOMAIN_MMIO_REMAP; 170 172 default: 171 173 break; 172 174 }
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
··· 506 506 } 507 507 508 508 ret = amdgpu_bo_create_kernel(adev, PSP_1_MEG, PSP_1_MEG, 509 - AMDGPU_GEM_DOMAIN_VRAM, 509 + (amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ? 510 + AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, 510 511 &psp->fw_pri_bo, 511 512 &psp->fw_pri_mc_addr, 512 513 &psp->fw_pri_buf);
+6 -14
drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
··· 171 171 172 172 copy_pos += sizeof(uint32_t); 173 173 174 - ta_bin = kzalloc(ta_bin_len, GFP_KERNEL); 175 - if (!ta_bin) 176 - return -ENOMEM; 177 - if (copy_from_user((void *)ta_bin, &buf[copy_pos], ta_bin_len)) { 178 - ret = -EFAULT; 179 - goto err_free_bin; 180 - } 174 + ta_bin = memdup_user(&buf[copy_pos], ta_bin_len); 175 + if (IS_ERR(ta_bin)) 176 + return PTR_ERR(ta_bin); 181 177 182 178 /* Set TA context and functions */ 183 179 set_ta_context_funcs(psp, ta_type, &context); ··· 323 327 return -EFAULT; 324 328 copy_pos += sizeof(uint32_t); 325 329 326 - shared_buf = kzalloc(shared_buf_len, GFP_KERNEL); 327 - if (!shared_buf) 328 - return -ENOMEM; 329 - if (copy_from_user((void *)shared_buf, &buf[copy_pos], shared_buf_len)) { 330 - ret = -EFAULT; 331 - goto err_free_shared_buf; 332 - } 330 + shared_buf = memdup_user(&buf[copy_pos], shared_buf_len); 331 + if (IS_ERR(shared_buf)) 332 + return PTR_ERR(shared_buf); 333 333 334 334 set_ta_context_funcs(psp, ta_type, &context); 335 335
+15
drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
··· 219 219 struct amdgpu_vram_block_info blk_info; 220 220 uint64_t page_pfns[32] = {0}; 221 221 int i, ret, count; 222 + bool hit = false; 222 223 223 224 if (amdgpu_ip_version(adev, UMC_HWIP, 0) < IP_VERSION(12, 0, 0)) 224 225 return 0; 226 + 227 + if (amdgpu_sriov_vf(adev)) { 228 + if (amdgpu_virt_check_vf_critical_region(adev, address, &hit)) 229 + return -EPERM; 230 + return hit ? -EACCES : 0; 231 + } 225 232 226 233 if ((address >= adev->gmc.mc_vram_size) || 227 234 (address >= RAS_UMC_INJECT_ADDR_LIMIT)) ··· 2709 2702 struct amdgpu_device *adev = ras->adev; 2710 2703 struct list_head device_list, *device_list_handle = NULL; 2711 2704 struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); 2705 + unsigned int error_query_mode; 2712 2706 enum ras_event_type type; 2713 2707 2714 2708 if (hive) { ··· 2736 2728 INIT_LIST_HEAD(&device_list); 2737 2729 list_add_tail(&adev->gmc.xgmi.head, &device_list); 2738 2730 device_list_handle = &device_list; 2731 + } 2732 + 2733 + if (amdgpu_ras_get_error_query_mode(adev, &error_query_mode)) { 2734 + if (error_query_mode == AMDGPU_RAS_FIRMWARE_ERROR_QUERY) { 2735 + /* wait 500ms to ensure pmfw polling mca bank info done */ 2736 + msleep(500); 2737 + } 2739 2738 } 2740 2739 2741 2740 type = amdgpu_ras_get_fatal_error_event(adev);
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h
··· 91 91 break; 92 92 case TTM_PL_TT: 93 93 case AMDGPU_PL_DOORBELL: 94 + case AMDGPU_PL_MMIO_REMAP: 94 95 node = to_ttm_range_mgr_node(res)->mm_nodes; 95 96 while (start >= node->size << PAGE_SHIFT) 96 97 start -= node++->size << PAGE_SHIFT; ··· 154 153 break; 155 154 case TTM_PL_TT: 156 155 case AMDGPU_PL_DOORBELL: 156 + case AMDGPU_PL_MMIO_REMAP: 157 157 node = cur->node; 158 158 159 159 cur->node = ++node;
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
··· 364 364 365 365 /* Allocate ring buffer */ 366 366 if (ring->ring_obj == NULL) { 367 - r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_dw, PAGE_SIZE, 367 + r = amdgpu_bo_create_kernel(adev, ring->ring_size + ring->funcs->extra_bytes, 368 + PAGE_SIZE, 368 369 AMDGPU_GEM_DOMAIN_GTT, 369 370 &ring->ring_obj, 370 371 &ring->gpu_addr,
+19 -11
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
··· 114 114 */ 115 115 struct amdgpu_fence_driver { 116 116 uint64_t gpu_addr; 117 - volatile uint32_t *cpu_addr; 117 + uint32_t *cpu_addr; 118 118 /* sync_seq is protected by ring emission lock */ 119 119 uint32_t sync_seq; 120 120 atomic_t last_seq; ··· 211 211 bool support_64bit_ptrs; 212 212 bool no_user_fence; 213 213 bool secure_submission_supported; 214 - unsigned extra_dw; 214 + 215 + /** 216 + * @extra_bytes: 217 + * 218 + * Optional extra space in bytes that is added to the ring size 219 + * when allocating the BO that holds the contents of the ring. 220 + * This space isn't used for command submission to the ring, 221 + * but is just there to satisfy some hardware requirements or 222 + * implement workarounds. It's up to the implementation of each 223 + * specific ring to initialize this space. 224 + */ 225 + unsigned extra_bytes; 215 226 216 227 /* ring read/write ptr handling */ 217 228 u64 (*get_rptr)(struct amdgpu_ring *ring); ··· 309 298 unsigned int ring_backup_entries_to_copy; 310 299 unsigned rptr_offs; 311 300 u64 rptr_gpu_addr; 312 - volatile u32 *rptr_cpu_addr; 301 + u32 *rptr_cpu_addr; 313 302 314 303 /** 315 304 * @wptr: ··· 389 378 * This is the CPU address pointer in the writeback slot. This is used 390 379 * to commit changes to the GPU. 391 380 */ 392 - volatile u32 *wptr_cpu_addr; 381 + u32 *wptr_cpu_addr; 393 382 unsigned fence_offs; 394 383 u64 fence_gpu_addr; 395 - volatile u32 *fence_cpu_addr; 384 + u32 *fence_cpu_addr; 396 385 uint64_t current_ctx; 397 386 char name[16]; 398 387 u32 trail_seq; 399 388 unsigned trail_fence_offs; 400 389 u64 trail_fence_gpu_addr; 401 - volatile u32 *trail_fence_cpu_addr; 390 + u32 *trail_fence_cpu_addr; 402 391 unsigned cond_exe_offs; 403 392 u64 cond_exe_gpu_addr; 404 - volatile u32 *cond_exe_cpu_addr; 393 + u32 *cond_exe_cpu_addr; 405 394 unsigned int set_q_mode_offs; 406 395 u32 *set_q_mode_ptr; 407 396 u64 set_q_mode_token; ··· 481 470 482 471 static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring) 483 472 { 484 - int i = 0; 485 - while (i <= ring->buf_mask) 486 - ring->ring[i++] = ring->funcs->nop; 487 - 473 + memset32(ring->ring, ring->funcs->nop, ring->buf_mask + 1); 488 474 } 489 475 490 476 static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.c
··· 89 89 int amdgpu_gfx_rlc_init_sr(struct amdgpu_device *adev, u32 dws) 90 90 { 91 91 const u32 *src_ptr; 92 - volatile u32 *dst_ptr; 92 + u32 *dst_ptr; 93 93 u32 i; 94 94 int r; 95 95 ··· 189 189 void amdgpu_gfx_rlc_setup_cp_table(struct amdgpu_device *adev) 190 190 { 191 191 const __le32 *fw_data; 192 - volatile u32 *dst_ptr; 192 + u32 *dst_ptr; 193 193 int me, i, max_me; 194 194 u32 bo_offset = 0; 195 195 u32 table_offset, table_size;
+4 -4
drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
··· 251 251 * and it also provides a pointer to it which is used by the firmware 252 252 * to load the clear state in some cases. 253 253 */ 254 - void (*get_csb_buffer)(struct amdgpu_device *adev, volatile u32 *buffer); 254 + void (*get_csb_buffer)(struct amdgpu_device *adev, u32 *buffer); 255 255 int (*get_cp_table_num)(struct amdgpu_device *adev); 256 256 int (*resume)(struct amdgpu_device *adev); 257 257 void (*stop)(struct amdgpu_device *adev); ··· 275 275 /* for power gating */ 276 276 struct amdgpu_bo *save_restore_obj; 277 277 uint64_t save_restore_gpu_addr; 278 - volatile uint32_t *sr_ptr; 278 + uint32_t *sr_ptr; 279 279 const u32 *reg_list; 280 280 u32 reg_list_size; 281 281 /* for clear state */ 282 282 struct amdgpu_bo *clear_state_obj; 283 283 uint64_t clear_state_gpu_addr; 284 - volatile uint32_t *cs_ptr; 284 + uint32_t *cs_ptr; 285 285 const struct cs_section_def *cs_data; 286 286 u32 clear_state_size; 287 287 /* for cp tables */ 288 288 struct amdgpu_bo *cp_table_obj; 289 289 uint64_t cp_table_gpu_addr; 290 - volatile uint32_t *cp_table_ptr; 290 + uint32_t *cp_table_ptr; 291 291 u32 cp_table_size; 292 292 293 293 /* safe mode for updating CG/PG state */
+93 -9
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
··· 123 123 case AMDGPU_PL_GWS: 124 124 case AMDGPU_PL_OA: 125 125 case AMDGPU_PL_DOORBELL: 126 + case AMDGPU_PL_MMIO_REMAP: 126 127 placement->num_placement = 0; 127 128 return; 128 129 ··· 449 448 return false; 450 449 451 450 if (res->mem_type == TTM_PL_SYSTEM || res->mem_type == TTM_PL_TT || 452 - res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL) 451 + res->mem_type == AMDGPU_PL_PREEMPT || res->mem_type == AMDGPU_PL_DOORBELL || 452 + res->mem_type == AMDGPU_PL_MMIO_REMAP) 453 453 return true; 454 454 455 455 if (res->mem_type != TTM_PL_VRAM) ··· 541 539 old_mem->mem_type == AMDGPU_PL_GWS || 542 540 old_mem->mem_type == AMDGPU_PL_OA || 543 541 old_mem->mem_type == AMDGPU_PL_DOORBELL || 542 + old_mem->mem_type == AMDGPU_PL_MMIO_REMAP || 544 543 new_mem->mem_type == AMDGPU_PL_GDS || 545 544 new_mem->mem_type == AMDGPU_PL_GWS || 546 545 new_mem->mem_type == AMDGPU_PL_OA || 547 - new_mem->mem_type == AMDGPU_PL_DOORBELL) { 546 + new_mem->mem_type == AMDGPU_PL_DOORBELL || 547 + new_mem->mem_type == AMDGPU_PL_MMIO_REMAP) { 548 548 /* Nothing to save here */ 549 549 amdgpu_bo_move_notify(bo, evict, new_mem); 550 550 ttm_bo_move_null(bo, new_mem); ··· 634 630 mem->bus.is_iomem = true; 635 631 mem->bus.caching = ttm_uncached; 636 632 break; 633 + case AMDGPU_PL_MMIO_REMAP: 634 + mem->bus.offset = mem->start << PAGE_SHIFT; 635 + mem->bus.offset += adev->rmmio_remap.bus_addr; 636 + mem->bus.is_iomem = true; 637 + mem->bus.caching = ttm_uncached; 638 + break; 637 639 default: 638 640 return -EINVAL; 639 641 } ··· 657 647 658 648 if (bo->resource->mem_type == AMDGPU_PL_DOORBELL) 659 649 return ((uint64_t)(adev->doorbell.base + cursor.start)) >> PAGE_SHIFT; 650 + else if (bo->resource->mem_type == AMDGPU_PL_MMIO_REMAP) 651 + return ((uint64_t)(adev->rmmio_remap.bus_addr + cursor.start)) >> PAGE_SHIFT; 660 652 661 653 return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT; 662 654 } ··· 1368 1356 1369 1357 if (mem && (mem->mem_type == TTM_PL_TT || 1370 1358 mem->mem_type == AMDGPU_PL_DOORBELL || 1371 - mem->mem_type == AMDGPU_PL_PREEMPT)) { 1359 + mem->mem_type == AMDGPU_PL_PREEMPT || 1360 + mem->mem_type == AMDGPU_PL_MMIO_REMAP)) { 1372 1361 flags |= AMDGPU_PTE_SYSTEM; 1373 1362 1374 1363 if (ttm->caching == ttm_cached) ··· 1856 1843 adev->mman.ttm_pools = NULL; 1857 1844 } 1858 1845 1846 + /** 1847 + * amdgpu_ttm_mmio_remap_bo_init - Allocate the singleton 4K MMIO_REMAP BO 1848 + * @adev: amdgpu device 1849 + * 1850 + * Allocates a one-page (4K) GEM BO in AMDGPU_GEM_DOMAIN_MMIO_REMAP when the 1851 + * hardware exposes a remap base (adev->rmmio_remap.bus_addr) and the host 1852 + * PAGE_SIZE is <= AMDGPU_GPU_PAGE_SIZE (4K). The BO is created as a regular 1853 + * GEM object (amdgpu_bo_create). 1854 + * 1855 + * Return: 1856 + * * 0 on success or intentional skip (feature not present/unsupported) 1857 + * * negative errno on allocation failure 1858 + */ 1859 + static int amdgpu_ttm_mmio_remap_bo_init(struct amdgpu_device *adev) 1860 + { 1861 + struct amdgpu_bo_param bp; 1862 + int r; 1863 + 1864 + /* Skip if HW doesn't expose remap, or if PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE (4K). */ 1865 + if (!adev->rmmio_remap.bus_addr || PAGE_SIZE > AMDGPU_GPU_PAGE_SIZE) 1866 + return 0; 1867 + 1868 + memset(&bp, 0, sizeof(bp)); 1869 + 1870 + /* Create exactly one GEM BO in the MMIO_REMAP domain. */ 1871 + bp.type = ttm_bo_type_device; /* userspace-mappable GEM */ 1872 + bp.size = AMDGPU_GPU_PAGE_SIZE; /* 4K */ 1873 + bp.byte_align = AMDGPU_GPU_PAGE_SIZE; 1874 + bp.domain = AMDGPU_GEM_DOMAIN_MMIO_REMAP; 1875 + bp.flags = 0; 1876 + bp.resv = NULL; 1877 + bp.bo_ptr_size = sizeof(struct amdgpu_bo); 1878 + 1879 + r = amdgpu_bo_create(adev, &bp, &adev->rmmio_remap.bo); 1880 + if (r) 1881 + return r; 1882 + 1883 + return 0; 1884 + } 1885 + 1886 + /** 1887 + * amdgpu_ttm_mmio_remap_bo_fini - Free the singleton MMIO_REMAP BO 1888 + * @adev: amdgpu device 1889 + * 1890 + * Frees the kernel-owned MMIO_REMAP BO if it was allocated by 1891 + * amdgpu_ttm_mmio_remap_bo_init(). 1892 + */ 1893 + static void amdgpu_ttm_mmio_remap_bo_fini(struct amdgpu_device *adev) 1894 + { 1895 + amdgpu_bo_unref(&adev->rmmio_remap.bo); 1896 + adev->rmmio_remap.bo = NULL; 1897 + } 1898 + 1859 1899 /* 1860 1900 * amdgpu_ttm_init - Init the memory management (ttm) as well as various 1861 1901 * gtt/vram related fields. ··· 1945 1879 } 1946 1880 adev->mman.initialized = true; 1947 1881 1948 - /* Initialize VRAM pool with all of VRAM divided into pages */ 1949 - r = amdgpu_vram_mgr_init(adev); 1950 - if (r) { 1951 - dev_err(adev->dev, "Failed initializing VRAM heap.\n"); 1952 - return r; 1882 + if (!adev->gmc.is_app_apu) { 1883 + /* Initialize VRAM pool with all of VRAM divided into pages */ 1884 + r = amdgpu_vram_mgr_init(adev); 1885 + if (r) { 1886 + dev_err(adev->dev, "Failed initializing VRAM heap.\n"); 1887 + return r; 1888 + } 1953 1889 } 1954 1890 1955 1891 /* Change the size here instead of the init above so only lpfn is affected */ ··· 2078 2010 return r; 2079 2011 } 2080 2012 2013 + /* Initialize MMIO-remap pool (single page 4K) */ 2014 + r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_MMIO_REMAP, 1); 2015 + if (r) { 2016 + dev_err(adev->dev, "Failed initializing MMIO-remap heap.\n"); 2017 + return r; 2018 + } 2019 + 2020 + /* Allocate the singleton MMIO_REMAP BO (4K) if supported */ 2021 + r = amdgpu_ttm_mmio_remap_bo_init(adev); 2022 + if (r) 2023 + return r; 2024 + 2081 2025 /* Initialize preemptible memory pool */ 2082 2026 r = amdgpu_preempt_mgr_init(adev); 2083 2027 if (r) { ··· 2152 2072 } 2153 2073 amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL, 2154 2074 &adev->mman.sdma_access_ptr); 2075 + 2076 + amdgpu_ttm_mmio_remap_bo_fini(adev); 2155 2077 amdgpu_ttm_fw_reserve_vram_fini(adev); 2156 2078 amdgpu_ttm_drv_reserve_vram_fini(adev); 2157 2079 ··· 2166 2084 drm_dev_exit(idx); 2167 2085 } 2168 2086 2169 - amdgpu_vram_mgr_fini(adev); 2087 + if (!adev->gmc.is_app_apu) 2088 + amdgpu_vram_mgr_fini(adev); 2170 2089 amdgpu_gtt_mgr_fini(adev); 2171 2090 amdgpu_preempt_mgr_fini(adev); 2172 2091 amdgpu_doorbell_fini(adev); ··· 2176 2093 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS); 2177 2094 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA); 2178 2095 ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_DOORBELL); 2096 + ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_MMIO_REMAP); 2179 2097 ttm_device_fini(&adev->mman.bdev); 2180 2098 adev->mman.initialized = false; 2181 2099 dev_info(adev->dev, "amdgpu: ttm finalized\n");
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
··· 34 34 #define AMDGPU_PL_OA (TTM_PL_PRIV + 2) 35 35 #define AMDGPU_PL_PREEMPT (TTM_PL_PRIV + 3) 36 36 #define AMDGPU_PL_DOORBELL (TTM_PL_PRIV + 4) 37 - #define __AMDGPU_PL_NUM (TTM_PL_PRIV + 5) 37 + #define AMDGPU_PL_MMIO_REMAP (TTM_PL_PRIV + 5) 38 + #define __AMDGPU_PL_NUM (TTM_PL_PRIV + 6) 38 39 39 40 #define AMDGPU_GTT_MAX_TRANSFER_SIZE 512 40 41 #define AMDGPU_GTT_NUM_TRANSFER_WINDOWS 2
+235 -128
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
··· 44 44 return userq_ip_mask; 45 45 } 46 46 47 + int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr, 48 + u64 expected_size) 49 + { 50 + struct amdgpu_bo_va_mapping *va_map; 51 + u64 user_addr; 52 + u64 size; 53 + int r = 0; 54 + 55 + user_addr = (addr & AMDGPU_GMC_HOLE_MASK) >> AMDGPU_GPU_PAGE_SHIFT; 56 + size = expected_size >> AMDGPU_GPU_PAGE_SHIFT; 57 + 58 + r = amdgpu_bo_reserve(vm->root.bo, false); 59 + if (r) 60 + return r; 61 + 62 + va_map = amdgpu_vm_bo_lookup_mapping(vm, user_addr); 63 + if (!va_map) { 64 + r = -EINVAL; 65 + goto out_err; 66 + } 67 + /* Only validate the userq whether resident in the VM mapping range */ 68 + if (user_addr >= va_map->start && 69 + va_map->last - user_addr + 1 >= size) { 70 + amdgpu_bo_unreserve(vm->root.bo); 71 + return 0; 72 + } 73 + 74 + out_err: 75 + amdgpu_bo_unreserve(vm->root.bo); 76 + return r; 77 + } 78 + 79 + static int 80 + amdgpu_userq_preempt_helper(struct amdgpu_userq_mgr *uq_mgr, 81 + struct amdgpu_usermode_queue *queue) 82 + { 83 + struct amdgpu_device *adev = uq_mgr->adev; 84 + const struct amdgpu_userq_funcs *userq_funcs = 85 + adev->userq_funcs[queue->queue_type]; 86 + int r = 0; 87 + 88 + if (queue->state == AMDGPU_USERQ_STATE_MAPPED) { 89 + r = userq_funcs->preempt(uq_mgr, queue); 90 + if (r) { 91 + queue->state = AMDGPU_USERQ_STATE_HUNG; 92 + } else { 93 + queue->state = AMDGPU_USERQ_STATE_PREEMPTED; 94 + } 95 + } 96 + 97 + return r; 98 + } 99 + 100 + static int 101 + amdgpu_userq_restore_helper(struct amdgpu_userq_mgr *uq_mgr, 102 + struct amdgpu_usermode_queue *queue) 103 + { 104 + struct amdgpu_device *adev = uq_mgr->adev; 105 + const struct amdgpu_userq_funcs *userq_funcs = 106 + adev->userq_funcs[queue->queue_type]; 107 + int r = 0; 108 + 109 + if (queue->state == AMDGPU_USERQ_STATE_PREEMPTED) { 110 + r = userq_funcs->restore(uq_mgr, queue); 111 + if (r) { 112 + queue->state = AMDGPU_USERQ_STATE_HUNG; 113 + } else { 114 + queue->state = AMDGPU_USERQ_STATE_MAPPED; 115 + } 116 + } 117 + 118 + return r; 119 + } 120 + 47 121 static int 48 122 amdgpu_userq_unmap_helper(struct amdgpu_userq_mgr *uq_mgr, 49 123 struct amdgpu_usermode_queue *queue) ··· 127 53 adev->userq_funcs[queue->queue_type]; 128 54 int r = 0; 129 55 130 - if (queue->state == AMDGPU_USERQ_STATE_MAPPED) { 56 + if ((queue->state == AMDGPU_USERQ_STATE_MAPPED) || 57 + (queue->state == AMDGPU_USERQ_STATE_PREEMPTED)) { 131 58 r = userq_funcs->unmap(uq_mgr, queue); 132 59 if (r) 133 60 queue->state = AMDGPU_USERQ_STATE_HUNG; ··· 185 110 amdgpu_userq_fence_driver_free(queue); 186 111 idr_remove(&uq_mgr->userq_idr, queue_id); 187 112 kfree(queue); 188 - } 189 - 190 - int 191 - amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr) 192 - { 193 - struct amdgpu_usermode_queue *queue; 194 - int queue_id; 195 - int ret = 0; 196 - 197 - mutex_lock(&uq_mgr->userq_mutex); 198 - /* Resume all the queues for this process */ 199 - idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) 200 - ret += queue->state == AMDGPU_USERQ_STATE_MAPPED; 201 - 202 - mutex_unlock(&uq_mgr->userq_mutex); 203 - return ret; 204 113 } 205 114 206 115 static struct amdgpu_usermode_queue * ··· 382 323 debugfs_remove_recursive(queue->debugfs_queue); 383 324 #endif 384 325 r = amdgpu_userq_unmap_helper(uq_mgr, queue); 326 + /*TODO: It requires a reset for userq hw unmap error*/ 327 + if (unlikely(r != AMDGPU_USERQ_STATE_UNMAPPED)) { 328 + drm_warn(adev_to_drm(uq_mgr->adev), "trying to destroy a HW mapping userq\n"); 329 + queue->state = AMDGPU_USERQ_STATE_HUNG; 330 + } 385 331 amdgpu_userq_cleanup(uq_mgr, queue, queue_id); 386 332 mutex_unlock(&uq_mgr->userq_mutex); 387 333 ··· 468 404 (args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK) >> 469 405 AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_SHIFT; 470 406 471 - /* Usermode queues are only supported for GFX IP as of now */ 472 - if (args->in.ip_type != AMDGPU_HW_IP_GFX && 473 - args->in.ip_type != AMDGPU_HW_IP_DMA && 474 - args->in.ip_type != AMDGPU_HW_IP_COMPUTE) { 475 - drm_file_err(uq_mgr->file, "Usermode queue doesn't support IP type %u\n", 476 - args->in.ip_type); 477 - return -EINVAL; 478 - } 479 - 480 407 r = amdgpu_userq_priority_permit(filp, priority); 481 408 if (r) 482 409 return r; 483 - 484 - if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) && 485 - (args->in.ip_type != AMDGPU_HW_IP_GFX) && 486 - (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) && 487 - !amdgpu_is_tmz(adev)) { 488 - drm_file_err(uq_mgr->file, "Secure only supported on GFX/Compute queues\n"); 489 - return -EINVAL; 490 - } 491 410 492 411 r = pm_runtime_get_sync(adev_to_drm(adev)->dev); 493 412 if (r < 0) { ··· 501 454 if (!queue) { 502 455 drm_file_err(uq_mgr->file, "Failed to allocate memory for queue\n"); 503 456 r = -ENOMEM; 457 + goto unlock; 458 + } 459 + 460 + /* Validate the userq virtual address.*/ 461 + if (amdgpu_userq_input_va_validate(&fpriv->vm, args->in.queue_va, args->in.queue_size) || 462 + amdgpu_userq_input_va_validate(&fpriv->vm, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) || 463 + amdgpu_userq_input_va_validate(&fpriv->vm, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) { 464 + kfree(queue); 504 465 goto unlock; 505 466 } 506 467 queue->doorbell_handle = args->in.doorbell_handle; ··· 598 543 return r; 599 544 } 600 545 601 - int amdgpu_userq_ioctl(struct drm_device *dev, void *data, 602 - struct drm_file *filp) 546 + static int amdgpu_userq_input_args_validate(struct drm_device *dev, 547 + union drm_amdgpu_userq *args, 548 + struct drm_file *filp) 603 549 { 604 - union drm_amdgpu_userq *args = data; 605 - int r; 550 + struct amdgpu_device *adev = drm_to_adev(dev); 606 551 607 552 switch (args->in.op) { 608 553 case AMDGPU_USERQ_OP_CREATE: 609 554 if (args->in.flags & ~(AMDGPU_USERQ_CREATE_FLAGS_QUEUE_PRIORITY_MASK | 610 555 AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE)) 611 556 return -EINVAL; 612 - r = amdgpu_userq_create(filp, args); 613 - if (r) 614 - drm_file_err(filp, "Failed to create usermode queue\n"); 615 - break; 557 + /* Usermode queues are only supported for GFX IP as of now */ 558 + if (args->in.ip_type != AMDGPU_HW_IP_GFX && 559 + args->in.ip_type != AMDGPU_HW_IP_DMA && 560 + args->in.ip_type != AMDGPU_HW_IP_COMPUTE) { 561 + drm_file_err(filp, "Usermode queue doesn't support IP type %u\n", 562 + args->in.ip_type); 563 + return -EINVAL; 564 + } 616 565 566 + if ((args->in.flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE) && 567 + (args->in.ip_type != AMDGPU_HW_IP_GFX) && 568 + (args->in.ip_type != AMDGPU_HW_IP_COMPUTE) && 569 + !amdgpu_is_tmz(adev)) { 570 + drm_file_err(filp, "Secure only supported on GFX/Compute queues\n"); 571 + return -EINVAL; 572 + } 573 + 574 + if (args->in.queue_va == AMDGPU_BO_INVALID_OFFSET || 575 + args->in.queue_va == 0 || 576 + args->in.queue_size == 0) { 577 + drm_file_err(filp, "invalidate userq queue va or size\n"); 578 + return -EINVAL; 579 + } 580 + if (!args->in.wptr_va || !args->in.rptr_va) { 581 + drm_file_err(filp, "invalidate userq queue rptr or wptr\n"); 582 + return -EINVAL; 583 + } 584 + break; 617 585 case AMDGPU_USERQ_OP_FREE: 618 586 if (args->in.ip_type || 619 587 args->in.doorbell_handle || ··· 649 571 args->in.mqd || 650 572 args->in.mqd_size) 651 573 return -EINVAL; 574 + break; 575 + default: 576 + return -EINVAL; 577 + } 578 + 579 + return 0; 580 + } 581 + 582 + int amdgpu_userq_ioctl(struct drm_device *dev, void *data, 583 + struct drm_file *filp) 584 + { 585 + union drm_amdgpu_userq *args = data; 586 + int r; 587 + 588 + if (amdgpu_userq_input_args_validate(dev, args, filp) < 0) 589 + return -EINVAL; 590 + 591 + switch (args->in.op) { 592 + case AMDGPU_USERQ_OP_CREATE: 593 + r = amdgpu_userq_create(filp, args); 594 + if (r) 595 + drm_file_err(filp, "Failed to create usermode queue\n"); 596 + break; 597 + 598 + case AMDGPU_USERQ_OP_FREE: 652 599 r = amdgpu_userq_destroy(filp, args->in.queue_id); 653 600 if (r) 654 601 drm_file_err(filp, "Failed to destroy usermode queue\n"); ··· 696 593 697 594 /* Resume all the queues for this process */ 698 595 idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { 699 - r = amdgpu_userq_map_helper(uq_mgr, queue); 596 + r = amdgpu_userq_restore_helper(uq_mgr, queue); 700 597 if (r) 701 598 ret = r; 702 599 } ··· 706 603 return ret; 707 604 } 708 605 709 - static int 710 - amdgpu_userq_validate_vm_bo(void *_unused, struct amdgpu_bo *bo) 606 + static int amdgpu_userq_validate_vm(void *param, struct amdgpu_bo *bo) 711 607 { 712 608 struct ttm_operation_ctx ctx = { false, false }; 713 - int ret; 714 609 715 610 amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); 716 - 717 - ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 718 - if (ret) 719 - DRM_ERROR("Fail to validate\n"); 720 - 721 - return ret; 611 + return ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 722 612 } 723 613 614 + /* Handle all BOs on the invalidated list, validate them and update the PTs */ 724 615 static int 725 - amdgpu_userq_validate_bos(struct amdgpu_userq_mgr *uq_mgr) 616 + amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec, 617 + struct amdgpu_vm *vm) 618 + { 619 + struct ttm_operation_ctx ctx = { false, false }; 620 + struct amdgpu_bo_va *bo_va; 621 + struct amdgpu_bo *bo; 622 + int ret; 623 + 624 + spin_lock(&vm->invalidated_lock); 625 + while (!list_empty(&vm->invalidated)) { 626 + bo_va = list_first_entry(&vm->invalidated, 627 + struct amdgpu_bo_va, 628 + base.vm_status); 629 + spin_unlock(&vm->invalidated_lock); 630 + 631 + bo = bo_va->base.bo; 632 + ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2); 633 + if (unlikely(ret)) 634 + return ret; 635 + 636 + amdgpu_bo_placement_from_domain(bo, bo->allowed_domains); 637 + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 638 + if (ret) 639 + return ret; 640 + 641 + /* This moves the bo_va to the done list */ 642 + ret = amdgpu_vm_bo_update(adev, bo_va, false); 643 + if (ret) 644 + return ret; 645 + 646 + spin_lock(&vm->invalidated_lock); 647 + } 648 + spin_unlock(&vm->invalidated_lock); 649 + 650 + return 0; 651 + } 652 + 653 + /* Make sure the whole VM is ready to be used */ 654 + static int 655 + amdgpu_userq_vm_validate(struct amdgpu_userq_mgr *uq_mgr) 726 656 { 727 657 struct amdgpu_fpriv *fpriv = uq_mgr_to_fpriv(uq_mgr); 728 - struct amdgpu_vm *vm = &fpriv->vm; 729 658 struct amdgpu_device *adev = uq_mgr->adev; 659 + struct amdgpu_vm *vm = &fpriv->vm; 730 660 struct amdgpu_bo_va *bo_va; 731 - struct ww_acquire_ctx *ticket; 732 661 struct drm_exec exec; 733 - struct amdgpu_bo *bo; 734 - struct dma_resv *resv; 735 - bool clear, unlock; 736 - int ret = 0; 662 + int ret; 737 663 738 664 drm_exec_init(&exec, DRM_EXEC_IGNORE_DUPLICATES, 0); 739 665 drm_exec_until_all_locked(&exec) { 740 - ret = amdgpu_vm_lock_pd(vm, &exec, 2); 666 + ret = amdgpu_vm_lock_pd(vm, &exec, 1); 741 667 drm_exec_retry_on_contention(&exec); 742 - if (unlikely(ret)) { 743 - drm_file_err(uq_mgr->file, "Failed to lock PD\n"); 668 + if (unlikely(ret)) 744 669 goto unlock_all; 745 - } 746 670 747 - /* Lock the done list */ 748 - list_for_each_entry(bo_va, &vm->done, base.vm_status) { 749 - bo = bo_va->base.bo; 750 - if (!bo) 751 - continue; 671 + ret = amdgpu_vm_lock_done_list(vm, &exec, 1); 672 + drm_exec_retry_on_contention(&exec); 673 + if (unlikely(ret)) 674 + goto unlock_all; 752 675 753 - ret = drm_exec_lock_obj(&exec, &bo->tbo.base); 754 - drm_exec_retry_on_contention(&exec); 755 - if (unlikely(ret)) 756 - goto unlock_all; 757 - } 676 + /* This validates PDs, PTs and per VM BOs */ 677 + ret = amdgpu_vm_validate(adev, vm, NULL, 678 + amdgpu_userq_validate_vm, 679 + NULL); 680 + if (unlikely(ret)) 681 + goto unlock_all; 682 + 683 + /* This locks and validates the remaining evicted BOs */ 684 + ret = amdgpu_userq_bo_validate(adev, &exec, vm); 685 + drm_exec_retry_on_contention(&exec); 686 + if (unlikely(ret)) 687 + goto unlock_all; 758 688 } 759 689 760 - spin_lock(&vm->status_lock); 761 - while (!list_empty(&vm->moved)) { 762 - bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, 763 - base.vm_status); 764 - spin_unlock(&vm->status_lock); 690 + ret = amdgpu_vm_handle_moved(adev, vm, NULL); 691 + if (ret) 692 + goto unlock_all; 765 693 766 - /* Per VM BOs never need to bo cleared in the page tables */ 767 - ret = amdgpu_vm_bo_update(adev, bo_va, false); 768 - if (ret) 769 - goto unlock_all; 770 - spin_lock(&vm->status_lock); 771 - } 694 + ret = amdgpu_vm_update_pdes(adev, vm, false); 695 + if (ret) 696 + goto unlock_all; 772 697 773 - ticket = &exec.ticket; 774 - while (!list_empty(&vm->invalidated)) { 775 - bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, 776 - base.vm_status); 777 - resv = bo_va->base.bo->tbo.base.resv; 778 - spin_unlock(&vm->status_lock); 779 - 780 - bo = bo_va->base.bo; 781 - ret = amdgpu_userq_validate_vm_bo(NULL, bo); 782 - if (ret) { 783 - drm_file_err(uq_mgr->file, "Failed to validate BO\n"); 784 - goto unlock_all; 785 - } 786 - 787 - /* Try to reserve the BO to avoid clearing its ptes */ 788 - if (!adev->debug_vm && dma_resv_trylock(resv)) { 789 - clear = false; 790 - unlock = true; 791 - /* The caller is already holding the reservation lock */ 792 - } else if (dma_resv_locking_ctx(resv) == ticket) { 793 - clear = false; 794 - unlock = false; 795 - /* Somebody else is using the BO right now */ 796 - } else { 797 - clear = true; 798 - unlock = false; 799 - } 800 - 801 - ret = amdgpu_vm_bo_update(adev, bo_va, clear); 802 - 803 - if (unlock) 804 - dma_resv_unlock(resv); 805 - if (ret) 806 - goto unlock_all; 807 - 808 - spin_lock(&vm->status_lock); 809 - } 810 - spin_unlock(&vm->status_lock); 698 + /* 699 + * We need to wait for all VM updates to finish before restarting the 700 + * queues. Using the done list like that is now ok since everything is 701 + * locked in place. 702 + */ 703 + list_for_each_entry(bo_va, &vm->done, base.vm_status) 704 + dma_fence_wait(bo_va->last_pt_update, false); 705 + dma_fence_wait(vm->last_update, false); 811 706 812 707 ret = amdgpu_eviction_fence_replace_fence(&fpriv->evf_mgr, &exec); 813 708 if (ret) ··· 826 725 827 726 mutex_lock(&uq_mgr->userq_mutex); 828 727 829 - ret = amdgpu_userq_validate_bos(uq_mgr); 728 + ret = amdgpu_userq_vm_validate(uq_mgr); 830 729 if (ret) { 831 730 drm_file_err(uq_mgr->file, "Failed to validate BOs to restore\n"); 832 731 goto unlock; ··· 851 750 852 751 /* Try to unmap all the queues in this process ctx */ 853 752 idr_for_each_entry(&uq_mgr->userq_idr, queue, queue_id) { 854 - r = amdgpu_userq_unmap_helper(uq_mgr, queue); 753 + r = amdgpu_userq_preempt_helper(uq_mgr, queue); 855 754 if (r) 856 755 ret = r; 857 756 } ··· 977 876 cancel_delayed_work_sync(&uqm->resume_work); 978 877 mutex_lock(&uqm->userq_mutex); 979 878 idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { 980 - r = amdgpu_userq_unmap_helper(uqm, queue); 879 + if (adev->in_s0ix) 880 + r = amdgpu_userq_preempt_helper(uqm, queue); 881 + else 882 + r = amdgpu_userq_unmap_helper(uqm, queue); 981 883 if (r) 982 884 ret = r; 983 885 } ··· 1005 901 list_for_each_entry_safe(uqm, tmp, &adev->userq_mgr_list, list) { 1006 902 mutex_lock(&uqm->userq_mutex); 1007 903 idr_for_each_entry(&uqm->userq_idr, queue, queue_id) { 1008 - r = amdgpu_userq_map_helper(uqm, queue); 904 + if (adev->in_s0ix) 905 + r = amdgpu_userq_restore_helper(uqm, queue); 906 + else 907 + r = amdgpu_userq_map_helper(uqm, queue); 1009 908 if (r) 1010 909 ret = r; 1011 910 } ··· 1042 935 if (((queue->queue_type == AMDGPU_HW_IP_GFX) || 1043 936 (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && 1044 937 (queue->xcp_id == idx)) { 1045 - r = amdgpu_userq_unmap_helper(uqm, queue); 938 + r = amdgpu_userq_preempt_helper(uqm, queue); 1046 939 if (r) 1047 940 ret = r; 1048 941 } ··· 1076 969 if (((queue->queue_type == AMDGPU_HW_IP_GFX) || 1077 970 (queue->queue_type == AMDGPU_HW_IP_COMPUTE)) && 1078 971 (queue->xcp_id == idx)) { 1079 - r = amdgpu_userq_map_helper(uqm, queue); 972 + r = amdgpu_userq_restore_helper(uqm, queue); 1080 973 if (r) 1081 974 ret = r; 1082 975 }
+2 -2
drivers/gpu/drm/amd/amdgpu/amdgpu_userq.h
··· 120 120 void amdgpu_userq_evict(struct amdgpu_userq_mgr *uq_mgr, 121 121 struct amdgpu_eviction_fence *ev_fence); 122 122 123 - int amdgpu_userq_active(struct amdgpu_userq_mgr *uq_mgr); 124 - 125 123 void amdgpu_userq_ensure_ev_fence(struct amdgpu_userq_mgr *userq_mgr, 126 124 struct amdgpu_eviction_fence_mgr *evf_mgr); 127 125 ··· 137 139 int amdgpu_userq_start_sched_for_enforce_isolation(struct amdgpu_device *adev, 138 140 u32 idx); 139 141 142 + int amdgpu_userq_input_va_validate(struct amdgpu_vm *vm, u64 addr, 143 + u64 expected_size); 140 144 #endif
+91
drivers/gpu/drm/amd/amdgpu/amdgpu_utils.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright 2025 Advanced Micro Devices, Inc. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the "Software"), 7 + * to deal in the Software without restriction, including without limitation 8 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 + * and/or sell copies of the Software, and to permit persons to whom the 10 + * Software is furnished to do so, subject to the following conditions: 11 + * 12 + * The above copyright notice and this permission notice shall be included in 13 + * all copies or substantial portions of the Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 19 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 20 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 21 + * OTHER DEALINGS IN THE SOFTWARE. 22 + * 23 + */ 24 + 25 + #ifndef AMDGPU_UTILS_H_ 26 + #define AMDGPU_UTILS_H_ 27 + 28 + /* ---------- Generic 2‑bit capability attribute encoding ---------- 29 + * 00 INVALID, 01 RO, 10 WO, 11 RW 30 + */ 31 + enum amdgpu_cap_attr { 32 + AMDGPU_CAP_ATTR_INVALID = 0, 33 + AMDGPU_CAP_ATTR_RO = 1 << 0, 34 + AMDGPU_CAP_ATTR_WO = 1 << 1, 35 + AMDGPU_CAP_ATTR_RW = (AMDGPU_CAP_ATTR_RO | AMDGPU_CAP_ATTR_WO), 36 + }; 37 + 38 + #define AMDGPU_CAP_ATTR_BITS 2 39 + #define AMDGPU_CAP_ATTR_MAX ((1U << AMDGPU_CAP_ATTR_BITS) - 1) 40 + 41 + /* Internal helper to build helpers for a given enum NAME */ 42 + #define DECLARE_ATTR_CAP_CLASS_HELPERS(NAME) \ 43 + enum { NAME##_BITMAP_BITS = NAME##_COUNT * AMDGPU_CAP_ATTR_BITS }; \ 44 + struct NAME##_caps { \ 45 + DECLARE_BITMAP(bmap, NAME##_BITMAP_BITS); \ 46 + }; \ 47 + static inline unsigned int NAME##_ATTR_START(enum NAME##_cap_id cap) \ 48 + { return (unsigned int)cap * AMDGPU_CAP_ATTR_BITS; } \ 49 + static inline void NAME##_attr_init(struct NAME##_caps *c) \ 50 + { if (c) bitmap_zero(c->bmap, NAME##_BITMAP_BITS); } \ 51 + static inline int NAME##_attr_set(struct NAME##_caps *c, \ 52 + enum NAME##_cap_id cap, enum amdgpu_cap_attr attr) \ 53 + { \ 54 + if (!c) \ 55 + return -EINVAL; \ 56 + if (cap >= NAME##_COUNT) \ 57 + return -EINVAL; \ 58 + if ((unsigned int)attr > AMDGPU_CAP_ATTR_MAX) \ 59 + return -EINVAL; \ 60 + bitmap_write(c->bmap, (unsigned long)attr, \ 61 + NAME##_ATTR_START(cap), AMDGPU_CAP_ATTR_BITS); \ 62 + return 0; \ 63 + } \ 64 + static inline int NAME##_attr_get(const struct NAME##_caps *c, \ 65 + enum NAME##_cap_id cap, enum amdgpu_cap_attr *out) \ 66 + { \ 67 + unsigned long v; \ 68 + if (!c || !out) \ 69 + return -EINVAL; \ 70 + if (cap >= NAME##_COUNT) \ 71 + return -EINVAL; \ 72 + v = bitmap_read(c->bmap, NAME##_ATTR_START(cap), AMDGPU_CAP_ATTR_BITS); \ 73 + *out = (enum amdgpu_cap_attr)v; \ 74 + return 0; \ 75 + } \ 76 + static inline bool NAME##_cap_is_ro(const struct NAME##_caps *c, enum NAME##_cap_id id) \ 77 + { enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_RO; } \ 78 + static inline bool NAME##_cap_is_wo(const struct NAME##_caps *c, enum NAME##_cap_id id) \ 79 + { enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_WO; } \ 80 + static inline bool NAME##_cap_is_rw(const struct NAME##_caps *c, enum NAME##_cap_id id) \ 81 + { enum amdgpu_cap_attr a; return !NAME##_attr_get(c, id, &a) && a == AMDGPU_CAP_ATTR_RW; } 82 + 83 + /* Element expander for enum creation */ 84 + #define _CAP_ENUM_ELEM(x) x, 85 + 86 + /* Public macro: declare enum + helpers from an X‑macro list */ 87 + #define DECLARE_ATTR_CAP_CLASS(NAME, LIST_MACRO) \ 88 + enum NAME##_cap_id { LIST_MACRO(_CAP_ENUM_ELEM) NAME##_COUNT }; \ 89 + DECLARE_ATTR_CAP_CLASS_HELPERS(NAME) 90 + 91 + #endif /* AMDGPU_UTILS_H_ */
+7 -9
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
··· 257 257 return 0; 258 258 } 259 259 260 - int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i) 260 + void amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i) 261 261 { 262 262 int j; 263 263 264 264 if (adev->vcn.harvest_config & (1 << i)) 265 - return 0; 265 + return; 266 266 267 267 amdgpu_bo_free_kernel( 268 268 &adev->vcn.inst[i].dpg_sram_bo, ··· 292 292 293 293 mutex_destroy(&adev->vcn.inst[i].vcn_pg_lock); 294 294 mutex_destroy(&adev->vcn.inst[i].vcn1_jpeg1_workaround); 295 - 296 - return 0; 297 295 } 298 296 299 297 bool amdgpu_vcn_is_disabled_vcn(struct amdgpu_device *adev, enum vcn_ring_type type, uint32_t vcn_instance) ··· 1157 1159 { 1158 1160 struct amdgpu_vcn_inst *vcn; 1159 1161 void *log_buf; 1160 - volatile struct amdgpu_vcn_fwlog *plog; 1162 + struct amdgpu_vcn_fwlog *plog; 1161 1163 unsigned int read_pos, write_pos, available, i, read_bytes = 0; 1162 1164 unsigned int read_num[2] = {0}; 1163 1165 ··· 1170 1172 1171 1173 log_buf = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size; 1172 1174 1173 - plog = (volatile struct amdgpu_vcn_fwlog *)log_buf; 1175 + plog = (struct amdgpu_vcn_fwlog *)log_buf; 1174 1176 read_pos = plog->rptr; 1175 1177 write_pos = plog->wptr; 1176 1178 ··· 1237 1239 void amdgpu_vcn_fwlog_init(struct amdgpu_vcn_inst *vcn) 1238 1240 { 1239 1241 #if defined(CONFIG_DEBUG_FS) 1240 - volatile uint32_t *flag = vcn->fw_shared.cpu_addr; 1242 + uint32_t *flag = vcn->fw_shared.cpu_addr; 1241 1243 void *fw_log_cpu_addr = vcn->fw_shared.cpu_addr + vcn->fw_shared.mem_size; 1242 1244 uint64_t fw_log_gpu_addr = vcn->fw_shared.gpu_addr + vcn->fw_shared.mem_size; 1243 - volatile struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr; 1244 - volatile struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr 1245 + struct amdgpu_vcn_fwlog *log_buf = fw_log_cpu_addr; 1246 + struct amdgpu_fw_shared_fw_logging *fw_log = vcn->fw_shared.cpu_addr 1245 1247 + vcn->fw_shared.log_offset; 1246 1248 *flag |= cpu_to_le32(AMDGPU_VCN_FW_LOGGING_FLAG); 1247 1249 fw_log->is_enabled = 1;
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
··· 516 516 517 517 int amdgpu_vcn_early_init(struct amdgpu_device *adev, int i); 518 518 int amdgpu_vcn_sw_init(struct amdgpu_device *adev, int i); 519 - int amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i); 519 + void amdgpu_vcn_sw_fini(struct amdgpu_device *adev, int i); 520 520 int amdgpu_vcn_suspend(struct amdgpu_device *adev, int i); 521 521 int amdgpu_vcn_resume(struct amdgpu_device *adev, int i); 522 522 void amdgpu_vcn_ring_begin_use(struct amdgpu_ring *ring);
+55
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
··· 828 828 { 829 829 ratelimit_state_init(&adev->virt.ras.ras_error_cnt_rs, 5 * HZ, 1); 830 830 ratelimit_state_init(&adev->virt.ras.ras_cper_dump_rs, 5 * HZ, 1); 831 + ratelimit_state_init(&adev->virt.ras.ras_chk_criti_rs, 5 * HZ, 1); 831 832 832 833 ratelimit_set_flags(&adev->virt.ras.ras_error_cnt_rs, 833 834 RATELIMIT_MSG_ON_RELEASE); 834 835 ratelimit_set_flags(&adev->virt.ras.ras_cper_dump_rs, 836 + RATELIMIT_MSG_ON_RELEASE); 837 + ratelimit_set_flags(&adev->virt.ras.ras_chk_criti_rs, 835 838 RATELIMIT_MSG_ON_RELEASE); 836 839 837 840 mutex_init(&adev->virt.ras.ras_telemetry_mutex); ··· 1503 1500 1504 1501 if (virt->ops && virt->ops->req_bad_pages) 1505 1502 virt->ops->req_bad_pages(adev); 1503 + } 1504 + 1505 + static int amdgpu_virt_cache_chk_criti_hit(struct amdgpu_device *adev, 1506 + struct amdsriov_ras_telemetry *host_telemetry, 1507 + bool *hit) 1508 + { 1509 + struct amd_sriov_ras_chk_criti *tmp = NULL; 1510 + uint32_t checksum, used_size; 1511 + 1512 + checksum = host_telemetry->header.checksum; 1513 + used_size = host_telemetry->header.used_size; 1514 + 1515 + if (used_size > (AMD_SRIOV_RAS_TELEMETRY_SIZE_KB << 10)) 1516 + return 0; 1517 + 1518 + tmp = kmemdup(&host_telemetry->body.chk_criti, used_size, GFP_KERNEL); 1519 + if (!tmp) 1520 + return -ENOMEM; 1521 + 1522 + if (checksum != amd_sriov_msg_checksum(tmp, used_size, 0, 0)) 1523 + goto out; 1524 + 1525 + if (hit) 1526 + *hit = tmp->hit ? true : false; 1527 + 1528 + out: 1529 + kfree(tmp); 1530 + 1531 + return 0; 1532 + } 1533 + 1534 + int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit) 1535 + { 1536 + struct amdgpu_virt *virt = &adev->virt; 1537 + int r = -EPERM; 1538 + 1539 + if (!virt->ops || !virt->ops->req_ras_chk_criti) 1540 + return -EOPNOTSUPP; 1541 + 1542 + /* Host allows 15 ras telemetry requests per 60 seconds. Afterwhich, the Host 1543 + * will ignore incoming guest messages. Ratelimit the guest messages to 1544 + * prevent guest self DOS. 1545 + */ 1546 + if (__ratelimit(&virt->ras.ras_chk_criti_rs)) { 1547 + mutex_lock(&virt->ras.ras_telemetry_mutex); 1548 + if (!virt->ops->req_ras_chk_criti(adev, addr)) 1549 + r = amdgpu_virt_cache_chk_criti_hit( 1550 + adev, virt->fw_reserve.ras_telemetry, hit); 1551 + mutex_unlock(&virt->ras.ras_telemetry_mutex); 1552 + } 1553 + 1554 + return r; 1506 1555 }
+8
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
··· 98 98 int (*req_ras_err_count)(struct amdgpu_device *adev); 99 99 int (*req_ras_cper_dump)(struct amdgpu_device *adev, u64 vf_rptr); 100 100 int (*req_bad_pages)(struct amdgpu_device *adev); 101 + int (*req_ras_chk_criti)(struct amdgpu_device *adev, u64 addr); 101 102 }; 102 103 103 104 /* ··· 253 252 struct amdgpu_virt_ras { 254 253 struct ratelimit_state ras_error_cnt_rs; 255 254 struct ratelimit_state ras_cper_dump_rs; 255 + struct ratelimit_state ras_chk_criti_rs; 256 256 struct mutex ras_telemetry_mutex; 257 257 uint64_t cper_rptr; 258 258 }; 259 + 260 + #define AMDGPU_VIRT_CAPS_LIST(X) X(AMDGPU_VIRT_CAP_POWER_LIMIT) 261 + 262 + DECLARE_ATTR_CAP_CLASS(amdgpu_virt, AMDGPU_VIRT_CAPS_LIST); 259 263 260 264 /* GPU virtualization */ 261 265 struct amdgpu_virt { ··· 280 274 const struct amdgpu_virt_ops *ops; 281 275 struct amdgpu_vf_error_buffer vf_errors; 282 276 struct amdgpu_virt_fw_reserve fw_reserve; 277 + struct amdgpu_virt_caps virt_caps; 283 278 uint32_t gim_feature; 284 279 uint32_t reg_access_mode; 285 280 int req_init_data_ver; ··· 455 448 bool amdgpu_virt_ras_telemetry_block_en(struct amdgpu_device *adev, 456 449 enum amdgpu_ras_block block); 457 450 void amdgpu_virt_request_bad_pages(struct amdgpu_device *adev); 451 + int amdgpu_virt_check_vf_critical_region(struct amdgpu_device *adev, u64 addr, bool *hit); 458 452 #endif
-8
drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c
··· 14 14 #include "dce_v8_0.h" 15 15 #endif 16 16 #include "dce_v10_0.h" 17 - #include "dce_v11_0.h" 18 17 #include "ivsrcid/ivsrcid_vislands30.h" 19 18 #include "amdgpu_vkms.h" 20 19 #include "amdgpu_display.h" ··· 579 580 case CHIP_FIJI: 580 581 case CHIP_TONGA: 581 582 dce_v10_0_disable_dce(adev); 582 - break; 583 - case CHIP_CARRIZO: 584 - case CHIP_STONEY: 585 - case CHIP_POLARIS10: 586 - case CHIP_POLARIS11: 587 - case CHIP_VEGAM: 588 - dce_v11_0_disable_dce(adev); 589 583 break; 590 584 case CHIP_TOPAZ: 591 585 #ifdef CONFIG_DRM_AMDGPU_SI
+111 -94
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 128 128 }; 129 129 130 130 /** 131 + * amdgpu_vm_assert_locked - check if VM is correctly locked 132 + * @vm: the VM which schould be tested 133 + * 134 + * Asserts that the VM root PD is locked. 135 + */ 136 + static void amdgpu_vm_assert_locked(struct amdgpu_vm *vm) 137 + { 138 + dma_resv_assert_held(vm->root.bo->tbo.base.resv); 139 + } 140 + 141 + /** 131 142 * amdgpu_vm_set_pasid - manage pasid and vm ptr mapping 132 143 * 133 144 * @adev: amdgpu_device pointer ··· 153 142 u32 pasid) 154 143 { 155 144 int r; 145 + 146 + amdgpu_vm_assert_locked(vm); 156 147 157 148 if (vm->pasid == pasid) 158 149 return 0; ··· 194 181 struct amdgpu_bo *bo = vm_bo->bo; 195 182 196 183 vm_bo->moved = true; 197 - spin_lock(&vm_bo->vm->status_lock); 184 + amdgpu_vm_assert_locked(vm); 198 185 if (bo->tbo.type == ttm_bo_type_kernel) 199 186 list_move(&vm_bo->vm_status, &vm->evicted); 200 187 else 201 188 list_move_tail(&vm_bo->vm_status, &vm->evicted); 202 - spin_unlock(&vm_bo->vm->status_lock); 203 189 } 204 190 /** 205 191 * amdgpu_vm_bo_moved - vm_bo is moved ··· 210 198 */ 211 199 static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo) 212 200 { 213 - spin_lock(&vm_bo->vm->status_lock); 201 + amdgpu_vm_assert_locked(vm_bo->vm); 214 202 list_move(&vm_bo->vm_status, &vm_bo->vm->moved); 215 - spin_unlock(&vm_bo->vm->status_lock); 216 203 } 217 204 218 205 /** ··· 224 213 */ 225 214 static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo) 226 215 { 227 - spin_lock(&vm_bo->vm->status_lock); 216 + amdgpu_vm_assert_locked(vm_bo->vm); 228 217 list_move(&vm_bo->vm_status, &vm_bo->vm->idle); 229 - spin_unlock(&vm_bo->vm->status_lock); 230 218 vm_bo->moved = false; 231 219 } 232 220 ··· 239 229 */ 240 230 static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo) 241 231 { 242 - spin_lock(&vm_bo->vm->status_lock); 232 + spin_lock(&vm_bo->vm->invalidated_lock); 243 233 list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated); 244 - spin_unlock(&vm_bo->vm->status_lock); 234 + spin_unlock(&vm_bo->vm->invalidated_lock); 245 235 } 246 236 247 237 /** ··· 254 244 */ 255 245 static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo) 256 246 { 247 + amdgpu_vm_assert_locked(vm_bo->vm); 257 248 vm_bo->moved = true; 258 - spin_lock(&vm_bo->vm->status_lock); 259 249 list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user); 260 - spin_unlock(&vm_bo->vm->status_lock); 261 250 } 262 251 263 252 /** ··· 269 260 */ 270 261 static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo) 271 262 { 272 - if (vm_bo->bo->parent) { 273 - spin_lock(&vm_bo->vm->status_lock); 263 + amdgpu_vm_assert_locked(vm_bo->vm); 264 + if (vm_bo->bo->parent) 274 265 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); 275 - spin_unlock(&vm_bo->vm->status_lock); 276 - } else { 266 + else 277 267 amdgpu_vm_bo_idle(vm_bo); 278 - } 279 268 } 280 269 281 270 /** ··· 286 279 */ 287 280 static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo) 288 281 { 289 - spin_lock(&vm_bo->vm->status_lock); 282 + amdgpu_vm_assert_locked(vm_bo->vm); 290 283 list_move(&vm_bo->vm_status, &vm_bo->vm->done); 291 - spin_unlock(&vm_bo->vm->status_lock); 292 284 } 293 285 294 286 /** ··· 301 295 { 302 296 struct amdgpu_vm_bo_base *vm_bo, *tmp; 303 297 304 - spin_lock(&vm->status_lock); 298 + spin_lock(&vm->invalidated_lock); 305 299 list_splice_init(&vm->done, &vm->invalidated); 306 300 list_for_each_entry(vm_bo, &vm->invalidated, vm_status) 307 301 vm_bo->moved = true; 302 + spin_unlock(&vm->invalidated_lock); 303 + 304 + amdgpu_vm_assert_locked(vm_bo->vm); 308 305 list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) { 309 306 struct amdgpu_bo *bo = vm_bo->bo; 310 307 ··· 317 308 else if (bo->parent) 318 309 list_move(&vm_bo->vm_status, &vm_bo->vm->relocated); 319 310 } 320 - spin_unlock(&vm->status_lock); 321 311 } 322 312 323 313 /** 324 314 * amdgpu_vm_update_shared - helper to update shared memory stat 325 315 * @base: base structure for tracking BO usage in a VM 326 316 * 327 - * Takes the vm status_lock and updates the shared memory stat. If the basic 317 + * Takes the vm stats_lock and updates the shared memory stat. If the basic 328 318 * stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called 329 319 * as well. 330 320 */ ··· 335 327 uint32_t bo_memtype = amdgpu_bo_mem_stats_placement(bo); 336 328 bool shared; 337 329 338 - spin_lock(&vm->status_lock); 330 + dma_resv_assert_held(bo->tbo.base.resv); 331 + spin_lock(&vm->stats_lock); 339 332 shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base); 340 333 if (base->shared != shared) { 341 334 base->shared = shared; ··· 348 339 vm->stats[bo_memtype].drm.private += size; 349 340 } 350 341 } 351 - spin_unlock(&vm->status_lock); 342 + spin_unlock(&vm->stats_lock); 352 343 } 353 344 354 345 /** ··· 373 364 * be bo->tbo.resource 374 365 * @sign: if we should add (+1) or subtract (-1) from the stat 375 366 * 376 - * Caller need to have the vm status_lock held. Useful for when multiple update 367 + * Caller need to have the vm stats_lock held. Useful for when multiple update 377 368 * need to happen at the same time. 378 369 */ 379 370 static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base, 380 - struct ttm_resource *res, int sign) 371 + struct ttm_resource *res, int sign) 381 372 { 382 373 struct amdgpu_vm *vm = base->vm; 383 374 struct amdgpu_bo *bo = base->bo; ··· 401 392 */ 402 393 if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) 403 394 vm->stats[res_memtype].drm.purgeable += size; 404 - if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype))) 395 + if (!(bo->preferred_domains & 396 + amdgpu_mem_type_to_domain(res_memtype))) 405 397 vm->stats[bo_memtype].evicted += size; 406 398 } 407 399 } ··· 421 411 { 422 412 struct amdgpu_vm *vm = base->vm; 423 413 424 - spin_lock(&vm->status_lock); 414 + spin_lock(&vm->stats_lock); 425 415 amdgpu_vm_update_stats_locked(base, res, sign); 426 - spin_unlock(&vm->status_lock); 416 + spin_unlock(&vm->stats_lock); 427 417 } 428 418 429 419 /** ··· 449 439 base->next = bo->vm_bo; 450 440 bo->vm_bo = base; 451 441 452 - spin_lock(&vm->status_lock); 442 + spin_lock(&vm->stats_lock); 453 443 base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base); 454 444 amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1); 455 - spin_unlock(&vm->status_lock); 445 + spin_unlock(&vm->stats_lock); 456 446 457 447 if (!amdgpu_vm_is_bo_always_valid(vm, bo)) 458 448 return; ··· 492 482 /* We need at least two fences for the VM PD/PT updates */ 493 483 return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base, 494 484 2 + num_fences); 485 + } 486 + 487 + /** 488 + * amdgpu_vm_lock_done_list - lock all BOs on the done list 489 + * @vm: vm providing the BOs 490 + * @exec: drm execution context 491 + * @num_fences: number of extra fences to reserve 492 + * 493 + * Lock the BOs on the done list in the DRM execution context. 494 + */ 495 + int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec, 496 + unsigned int num_fences) 497 + { 498 + struct list_head *prev = &vm->done; 499 + struct amdgpu_bo_va *bo_va; 500 + struct amdgpu_bo *bo; 501 + int ret; 502 + 503 + /* We can only trust prev->next while holding the lock */ 504 + spin_lock(&vm->invalidated_lock); 505 + while (!list_is_head(prev->next, &vm->done)) { 506 + bo_va = list_entry(prev->next, typeof(*bo_va), base.vm_status); 507 + spin_unlock(&vm->invalidated_lock); 508 + 509 + bo = bo_va->base.bo; 510 + if (bo) { 511 + ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 1); 512 + if (unlikely(ret)) 513 + return ret; 514 + } 515 + spin_lock(&vm->invalidated_lock); 516 + prev = prev->next; 517 + } 518 + spin_unlock(&vm->invalidated_lock); 519 + 520 + return 0; 495 521 } 496 522 497 523 /** ··· 621 575 void *param) 622 576 { 623 577 uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm); 624 - struct amdgpu_vm_bo_base *bo_base; 578 + struct amdgpu_vm_bo_base *bo_base, *tmp; 625 579 struct amdgpu_bo *bo; 626 580 int r; 627 581 ··· 634 588 return r; 635 589 } 636 590 637 - spin_lock(&vm->status_lock); 638 - while (!list_empty(&vm->evicted)) { 639 - bo_base = list_first_entry(&vm->evicted, 640 - struct amdgpu_vm_bo_base, 641 - vm_status); 642 - spin_unlock(&vm->status_lock); 643 - 591 + list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) { 644 592 bo = bo_base->bo; 645 593 646 594 r = validate(param, bo); ··· 647 607 vm->update_funcs->map_table(to_amdgpu_bo_vm(bo)); 648 608 amdgpu_vm_bo_relocated(bo_base); 649 609 } 650 - spin_lock(&vm->status_lock); 651 610 } 652 - while (ticket && !list_empty(&vm->evicted_user)) { 653 - bo_base = list_first_entry(&vm->evicted_user, 654 - struct amdgpu_vm_bo_base, 655 - vm_status); 656 - spin_unlock(&vm->status_lock); 657 611 658 - bo = bo_base->bo; 612 + if (ticket) { 613 + list_for_each_entry_safe(bo_base, tmp, &vm->evicted_user, 614 + vm_status) { 615 + bo = bo_base->bo; 616 + dma_resv_assert_held(bo->tbo.base.resv); 659 617 660 - if (dma_resv_locking_ctx(bo->tbo.base.resv) != ticket) { 661 - struct amdgpu_task_info *ti = amdgpu_vm_get_task_info_vm(vm); 618 + r = validate(param, bo); 619 + if (r) 620 + return r; 662 621 663 - pr_warn_ratelimited("Evicted user BO is not reserved\n"); 664 - if (ti) { 665 - pr_warn_ratelimited("pid %d\n", ti->task.pid); 666 - amdgpu_vm_put_task_info(ti); 667 - } 668 - 669 - return -EINVAL; 622 + amdgpu_vm_bo_invalidated(bo_base); 670 623 } 671 - 672 - r = validate(param, bo); 673 - if (r) 674 - return r; 675 - 676 - amdgpu_vm_bo_invalidated(bo_base); 677 - 678 - spin_lock(&vm->status_lock); 679 624 } 680 - spin_unlock(&vm->status_lock); 681 625 682 626 amdgpu_vm_eviction_lock(vm); 683 627 vm->evicting = false; ··· 684 660 { 685 661 bool ret; 686 662 663 + amdgpu_vm_assert_locked(vm); 664 + 687 665 amdgpu_vm_eviction_lock(vm); 688 666 ret = !vm->evicting; 689 667 amdgpu_vm_eviction_unlock(vm); 690 668 691 - spin_lock(&vm->status_lock); 692 669 ret &= list_empty(&vm->evicted); 693 - spin_unlock(&vm->status_lock); 694 670 695 671 spin_lock(&vm->immediate.lock); 696 672 ret &= !vm->immediate.stopped; ··· 981 957 struct amdgpu_vm *vm, bool immediate) 982 958 { 983 959 struct amdgpu_vm_update_params params; 984 - struct amdgpu_vm_bo_base *entry; 960 + struct amdgpu_vm_bo_base *entry, *tmp; 985 961 bool flush_tlb_needed = false; 986 - LIST_HEAD(relocated); 987 962 int r, idx; 988 963 989 - spin_lock(&vm->status_lock); 990 - list_splice_init(&vm->relocated, &relocated); 991 - spin_unlock(&vm->status_lock); 964 + amdgpu_vm_assert_locked(vm); 992 965 993 - if (list_empty(&relocated)) 966 + if (list_empty(&vm->relocated)) 994 967 return 0; 995 968 996 969 if (!drm_dev_enter(adev_to_drm(adev), &idx)) ··· 1003 982 if (r) 1004 983 goto error; 1005 984 1006 - list_for_each_entry(entry, &relocated, vm_status) { 985 + list_for_each_entry(entry, &vm->relocated, vm_status) { 1007 986 /* vm_flush_needed after updating moved PDEs */ 1008 987 flush_tlb_needed |= entry->moved; 1009 988 ··· 1019 998 if (flush_tlb_needed) 1020 999 atomic64_inc(&vm->tlb_seq); 1021 1000 1022 - while (!list_empty(&relocated)) { 1023 - entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base, 1024 - vm_status); 1001 + list_for_each_entry_safe(entry, tmp, &vm->relocated, vm_status) { 1025 1002 amdgpu_vm_bo_idle(entry); 1026 1003 } 1027 1004 ··· 1246 1227 void amdgpu_vm_get_memory(struct amdgpu_vm *vm, 1247 1228 struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM]) 1248 1229 { 1249 - spin_lock(&vm->status_lock); 1230 + spin_lock(&vm->stats_lock); 1250 1231 memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM); 1251 - spin_unlock(&vm->status_lock); 1232 + spin_unlock(&vm->stats_lock); 1252 1233 } 1253 1234 1254 1235 /** ··· 1615 1596 struct amdgpu_vm *vm, 1616 1597 struct ww_acquire_ctx *ticket) 1617 1598 { 1618 - struct amdgpu_bo_va *bo_va; 1599 + struct amdgpu_bo_va *bo_va, *tmp; 1619 1600 struct dma_resv *resv; 1620 1601 bool clear, unlock; 1621 1602 int r; 1622 1603 1623 - spin_lock(&vm->status_lock); 1624 - while (!list_empty(&vm->moved)) { 1625 - bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va, 1626 - base.vm_status); 1627 - spin_unlock(&vm->status_lock); 1628 - 1604 + list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { 1629 1605 /* Per VM BOs never need to bo cleared in the page tables */ 1630 1606 r = amdgpu_vm_bo_update(adev, bo_va, false); 1631 1607 if (r) 1632 1608 return r; 1633 - spin_lock(&vm->status_lock); 1634 1609 } 1635 1610 1611 + spin_lock(&vm->invalidated_lock); 1636 1612 while (!list_empty(&vm->invalidated)) { 1637 1613 bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va, 1638 1614 base.vm_status); 1639 1615 resv = bo_va->base.bo->tbo.base.resv; 1640 - spin_unlock(&vm->status_lock); 1616 + spin_unlock(&vm->invalidated_lock); 1641 1617 1642 1618 /* Try to reserve the BO to avoid clearing its ptes */ 1643 1619 if (!adev->debug_vm && dma_resv_trylock(resv)) { ··· 1664 1650 bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM)) 1665 1651 amdgpu_vm_bo_evicted_user(&bo_va->base); 1666 1652 1667 - spin_lock(&vm->status_lock); 1653 + spin_lock(&vm->invalidated_lock); 1668 1654 } 1669 - spin_unlock(&vm->status_lock); 1655 + spin_unlock(&vm->invalidated_lock); 1670 1656 1671 1657 return 0; 1672 1658 } ··· 2195 2181 } 2196 2182 } 2197 2183 2198 - spin_lock(&vm->status_lock); 2184 + spin_lock(&vm->invalidated_lock); 2199 2185 list_del(&bo_va->base.vm_status); 2200 - spin_unlock(&vm->status_lock); 2186 + spin_unlock(&vm->invalidated_lock); 2201 2187 2202 2188 list_for_each_entry_safe(mapping, next, &bo_va->valids, list) { 2203 2189 list_del(&mapping->list); ··· 2305 2291 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { 2306 2292 struct amdgpu_vm *vm = bo_base->vm; 2307 2293 2308 - spin_lock(&vm->status_lock); 2294 + spin_lock(&vm->stats_lock); 2309 2295 amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1); 2310 2296 amdgpu_vm_update_stats_locked(bo_base, new_mem, +1); 2311 - spin_unlock(&vm->status_lock); 2297 + spin_unlock(&vm->stats_lock); 2312 2298 } 2313 2299 2314 2300 amdgpu_vm_bo_invalidate(bo, evicted); ··· 2575 2561 INIT_LIST_HEAD(&vm->relocated); 2576 2562 INIT_LIST_HEAD(&vm->moved); 2577 2563 INIT_LIST_HEAD(&vm->idle); 2564 + spin_lock_init(&vm->invalidated_lock); 2578 2565 INIT_LIST_HEAD(&vm->invalidated); 2579 - spin_lock_init(&vm->status_lock); 2580 2566 INIT_LIST_HEAD(&vm->freed); 2581 2567 INIT_LIST_HEAD(&vm->done); 2582 2568 INIT_KFIFO(vm->faults); 2569 + spin_lock_init(&vm->stats_lock); 2583 2570 2584 2571 r = amdgpu_vm_init_entities(adev, vm); 2585 2572 if (r) ··· 3045 3030 unsigned int total_done_objs = 0; 3046 3031 unsigned int id = 0; 3047 3032 3048 - spin_lock(&vm->status_lock); 3033 + amdgpu_vm_assert_locked(vm); 3034 + 3049 3035 seq_puts(m, "\tIdle BOs:\n"); 3050 3036 list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) { 3051 3037 if (!bo_va->base.bo) ··· 3084 3068 id = 0; 3085 3069 3086 3070 seq_puts(m, "\tInvalidated BOs:\n"); 3071 + spin_lock(&vm->invalidated_lock); 3087 3072 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) { 3088 3073 if (!bo_va->base.bo) 3089 3074 continue; 3090 3075 total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m); 3091 3076 } 3077 + spin_unlock(&vm->invalidated_lock); 3092 3078 total_invalidated_objs = id; 3093 3079 id = 0; 3094 3080 ··· 3100 3082 continue; 3101 3083 total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m); 3102 3084 } 3103 - spin_unlock(&vm->status_lock); 3104 3085 total_done_objs = id; 3105 3086 3106 3087 seq_printf(m, "\tTotal idle size: %12lld\tobjs:\t%d\n", total_idle,
+35 -12
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
··· 203 203 /* protected by bo being reserved */ 204 204 struct amdgpu_vm_bo_base *next; 205 205 206 - /* protected by vm status_lock */ 206 + /* protected by vm reservation and invalidated_lock */ 207 207 struct list_head vm_status; 208 208 209 209 /* if the bo is counted as shared in mem stats 210 - * protected by vm status_lock */ 210 + * protected by vm BO being reserved */ 211 211 bool shared; 212 212 213 213 /* protected by the BO being reserved */ ··· 343 343 bool evicting; 344 344 unsigned int saved_flags; 345 345 346 - /* Lock to protect vm_bo add/del/move on all lists of vm */ 347 - spinlock_t status_lock; 348 - 349 - /* Memory statistics for this vm, protected by status_lock */ 346 + /* Memory statistics for this vm, protected by stats_lock */ 347 + spinlock_t stats_lock; 350 348 struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM]; 349 + 350 + /* 351 + * The following lists contain amdgpu_vm_bo_base objects for either 352 + * PDs, PTs or per VM BOs. The state transits are: 353 + * 354 + * evicted -> relocated (PDs, PTs) or moved (per VM BOs) -> idle 355 + * 356 + * Lists are protected by the root PD dma_resv lock. 357 + */ 351 358 352 359 /* Per-VM and PT BOs who needs a validation */ 353 360 struct list_head evicted; 354 - 355 - /* BOs for user mode queues that need a validation */ 356 - struct list_head evicted_user; 357 361 358 362 /* PT BOs which relocated and their parent need an update */ 359 363 struct list_head relocated; ··· 368 364 /* All BOs of this VM not currently in the state machine */ 369 365 struct list_head idle; 370 366 367 + /* 368 + * The following lists contain amdgpu_vm_bo_base objects for BOs which 369 + * have their own dma_resv object and not depend on the root PD. Their 370 + * state transits are: 371 + * 372 + * evicted_user or invalidated -> done 373 + * 374 + * Lists are protected by the invalidated_lock. 375 + */ 376 + spinlock_t invalidated_lock; 377 + 378 + /* BOs for user mode queues that need a validation */ 379 + struct list_head evicted_user; 380 + 371 381 /* regular invalidated BOs, but not yet updated in the PT */ 372 382 struct list_head invalidated; 373 383 374 - /* BO mappings freed, but not yet updated in the PT */ 375 - struct list_head freed; 376 - 377 384 /* BOs which are invalidated, has been updated in the PTs */ 378 385 struct list_head done; 386 + 387 + /* 388 + * This list contains amdgpu_bo_va_mapping objects which have been freed 389 + * but not updated in the PTs 390 + */ 391 + struct list_head freed; 379 392 380 393 /* contains the page directory */ 381 394 struct amdgpu_vm_bo_base root; ··· 512 491 void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); 513 492 int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec, 514 493 unsigned int num_fences); 494 + int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec, 495 + unsigned int num_fences); 515 496 bool amdgpu_vm_ready(struct amdgpu_vm *vm); 516 497 uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm); 517 498 int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
-4
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
··· 543 543 entry->bo->vm_bo = NULL; 544 544 ttm_bo_set_bulk_move(&entry->bo->tbo, NULL); 545 545 546 - spin_lock(&entry->vm->status_lock); 547 546 list_del(&entry->vm_status); 548 - spin_unlock(&entry->vm->status_lock); 549 547 amdgpu_bo_unref(&entry->bo); 550 548 } 551 549 ··· 587 589 struct amdgpu_vm_pt_cursor seek; 588 590 struct amdgpu_vm_bo_base *entry; 589 591 590 - spin_lock(&params->vm->status_lock); 591 592 for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) { 592 593 if (entry && entry->bo) 593 594 list_move(&entry->vm_status, &params->tlb_flush_waitlist); ··· 594 597 595 598 /* enter start node now */ 596 599 list_move(&cursor->entry->vm_status, &params->tlb_flush_waitlist); 597 - spin_unlock(&params->vm->status_lock); 598 600 } 599 601 600 602 /**
+4 -57
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
··· 425 425 return ret; 426 426 } 427 427 428 - static void amdgpu_dummy_vram_mgr_debug(struct ttm_resource_manager *man, 429 - struct drm_printer *printer) 430 - { 431 - DRM_DEBUG_DRIVER("Dummy vram mgr debug\n"); 432 - } 433 - 434 - static bool amdgpu_dummy_vram_mgr_compatible(struct ttm_resource_manager *man, 435 - struct ttm_resource *res, 436 - const struct ttm_place *place, 437 - size_t size) 438 - { 439 - DRM_DEBUG_DRIVER("Dummy vram mgr compatible\n"); 440 - return false; 441 - } 442 - 443 - static bool amdgpu_dummy_vram_mgr_intersects(struct ttm_resource_manager *man, 444 - struct ttm_resource *res, 445 - const struct ttm_place *place, 446 - size_t size) 447 - { 448 - DRM_DEBUG_DRIVER("Dummy vram mgr intersects\n"); 449 - return true; 450 - } 451 - 452 - static void amdgpu_dummy_vram_mgr_del(struct ttm_resource_manager *man, 453 - struct ttm_resource *res) 454 - { 455 - DRM_DEBUG_DRIVER("Dummy vram mgr deleted\n"); 456 - } 457 - 458 - static int amdgpu_dummy_vram_mgr_new(struct ttm_resource_manager *man, 459 - struct ttm_buffer_object *tbo, 460 - const struct ttm_place *place, 461 - struct ttm_resource **res) 462 - { 463 - DRM_DEBUG_DRIVER("Dummy vram mgr new\n"); 464 - return -ENOSPC; 465 - } 466 - 467 428 /** 468 429 * amdgpu_vram_mgr_new - allocate new ranges 469 430 * ··· 893 932 mutex_unlock(&mgr->lock); 894 933 } 895 934 896 - static const struct ttm_resource_manager_func amdgpu_dummy_vram_mgr_func = { 897 - .alloc = amdgpu_dummy_vram_mgr_new, 898 - .free = amdgpu_dummy_vram_mgr_del, 899 - .intersects = amdgpu_dummy_vram_mgr_intersects, 900 - .compatible = amdgpu_dummy_vram_mgr_compatible, 901 - .debug = amdgpu_dummy_vram_mgr_debug 902 - }; 903 - 904 935 static const struct ttm_resource_manager_func amdgpu_vram_mgr_func = { 905 936 .alloc = amdgpu_vram_mgr_new, 906 937 .free = amdgpu_vram_mgr_del, ··· 926 973 INIT_LIST_HEAD(&mgr->allocated_vres_list); 927 974 mgr->default_page_size = PAGE_SIZE; 928 975 929 - if (!adev->gmc.is_app_apu) { 930 - man->func = &amdgpu_vram_mgr_func; 931 - 932 - err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE); 933 - if (err) 934 - return err; 935 - } else { 936 - man->func = &amdgpu_dummy_vram_mgr_func; 937 - DRM_INFO("Setup dummy vram mgr\n"); 938 - } 976 + man->func = &amdgpu_vram_mgr_func; 977 + err = drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE); 978 + if (err) 979 + return err; 939 980 940 981 ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_VRAM, &mgr->manager); 941 982 ttm_resource_manager_set_used(man, true);
+4
drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
··· 126 126 127 127 void amgpu_xgmi_set_max_speed_width(struct amdgpu_device *adev, 128 128 uint16_t max_speed, uint8_t max_width); 129 + 130 + /* Cleanup macro for use with __free(xgmi_put_hive) */ 131 + DEFINE_FREE(xgmi_put_hive, struct amdgpu_hive_info *, if (_T) amdgpu_put_xgmi_hive(_T)) 132 + 129 133 #endif
+5
drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
··· 405 405 uint32_t buf[]; 406 406 }; 407 407 408 + struct amd_sriov_ras_chk_criti { 409 + uint32_t hit; 410 + }; 411 + 408 412 struct amdsriov_ras_telemetry { 409 413 struct amd_sriov_ras_telemetry_header header; 410 414 411 415 union { 412 416 struct amd_sriov_ras_telemetry_error_count error_count; 413 417 struct amd_sriov_ras_cper_dump cper_dump; 418 + struct amd_sriov_ras_chk_criti chk_criti; 414 419 } body; 415 420 }; 416 421
+4
drivers/gpu/drm/amd/amdgpu/atom.c
··· 1246 1246 ectx.last_jump_jiffies = 0; 1247 1247 if (ws) { 1248 1248 ectx.ws = kcalloc(4, ws, GFP_KERNEL); 1249 + if (!ectx.ws) { 1250 + ret = -ENOMEM; 1251 + goto free; 1252 + } 1249 1253 ectx.ws_size = ws; 1250 1254 } else { 1251 1255 ectx.ws = NULL;
-3817
drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
··· 1 - /* 2 - * Copyright 2014 Advanced Micro Devices, Inc. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 - * OTHER DEALINGS IN THE SOFTWARE. 21 - * 22 - */ 23 - 24 - #include <drm/drm_edid.h> 25 - #include <drm/drm_fourcc.h> 26 - #include <drm/drm_modeset_helper.h> 27 - #include <drm/drm_modeset_helper_vtables.h> 28 - #include <drm/drm_vblank.h> 29 - 30 - #include "amdgpu.h" 31 - #include "amdgpu_pm.h" 32 - #include "amdgpu_i2c.h" 33 - #include "vid.h" 34 - #include "atom.h" 35 - #include "amdgpu_atombios.h" 36 - #include "atombios_crtc.h" 37 - #include "atombios_encoders.h" 38 - #include "amdgpu_pll.h" 39 - #include "amdgpu_connectors.h" 40 - #include "amdgpu_display.h" 41 - #include "dce_v11_0.h" 42 - 43 - #include "dce/dce_11_0_d.h" 44 - #include "dce/dce_11_0_sh_mask.h" 45 - #include "dce/dce_11_0_enum.h" 46 - #include "oss/oss_3_0_d.h" 47 - #include "oss/oss_3_0_sh_mask.h" 48 - #include "gmc/gmc_8_1_d.h" 49 - #include "gmc/gmc_8_1_sh_mask.h" 50 - 51 - #include "ivsrcid/ivsrcid_vislands30.h" 52 - 53 - static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev); 54 - static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev); 55 - static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev, int hpd); 56 - 57 - static const u32 crtc_offsets[] = 58 - { 59 - CRTC0_REGISTER_OFFSET, 60 - CRTC1_REGISTER_OFFSET, 61 - CRTC2_REGISTER_OFFSET, 62 - CRTC3_REGISTER_OFFSET, 63 - CRTC4_REGISTER_OFFSET, 64 - CRTC5_REGISTER_OFFSET, 65 - CRTC6_REGISTER_OFFSET 66 - }; 67 - 68 - static const u32 hpd_offsets[] = 69 - { 70 - HPD0_REGISTER_OFFSET, 71 - HPD1_REGISTER_OFFSET, 72 - HPD2_REGISTER_OFFSET, 73 - HPD3_REGISTER_OFFSET, 74 - HPD4_REGISTER_OFFSET, 75 - HPD5_REGISTER_OFFSET 76 - }; 77 - 78 - static const uint32_t dig_offsets[] = { 79 - DIG0_REGISTER_OFFSET, 80 - DIG1_REGISTER_OFFSET, 81 - DIG2_REGISTER_OFFSET, 82 - DIG3_REGISTER_OFFSET, 83 - DIG4_REGISTER_OFFSET, 84 - DIG5_REGISTER_OFFSET, 85 - DIG6_REGISTER_OFFSET, 86 - DIG7_REGISTER_OFFSET, 87 - DIG8_REGISTER_OFFSET 88 - }; 89 - 90 - static const struct { 91 - uint32_t reg; 92 - uint32_t vblank; 93 - uint32_t vline; 94 - uint32_t hpd; 95 - 96 - } interrupt_status_offsets[] = { { 97 - .reg = mmDISP_INTERRUPT_STATUS, 98 - .vblank = DISP_INTERRUPT_STATUS__LB_D1_VBLANK_INTERRUPT_MASK, 99 - .vline = DISP_INTERRUPT_STATUS__LB_D1_VLINE_INTERRUPT_MASK, 100 - .hpd = DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 101 - }, { 102 - .reg = mmDISP_INTERRUPT_STATUS_CONTINUE, 103 - .vblank = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VBLANK_INTERRUPT_MASK, 104 - .vline = DISP_INTERRUPT_STATUS_CONTINUE__LB_D2_VLINE_INTERRUPT_MASK, 105 - .hpd = DISP_INTERRUPT_STATUS_CONTINUE__DC_HPD2_INTERRUPT_MASK 106 - }, { 107 - .reg = mmDISP_INTERRUPT_STATUS_CONTINUE2, 108 - .vblank = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VBLANK_INTERRUPT_MASK, 109 - .vline = DISP_INTERRUPT_STATUS_CONTINUE2__LB_D3_VLINE_INTERRUPT_MASK, 110 - .hpd = DISP_INTERRUPT_STATUS_CONTINUE2__DC_HPD3_INTERRUPT_MASK 111 - }, { 112 - .reg = mmDISP_INTERRUPT_STATUS_CONTINUE3, 113 - .vblank = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VBLANK_INTERRUPT_MASK, 114 - .vline = DISP_INTERRUPT_STATUS_CONTINUE3__LB_D4_VLINE_INTERRUPT_MASK, 115 - .hpd = DISP_INTERRUPT_STATUS_CONTINUE3__DC_HPD4_INTERRUPT_MASK 116 - }, { 117 - .reg = mmDISP_INTERRUPT_STATUS_CONTINUE4, 118 - .vblank = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VBLANK_INTERRUPT_MASK, 119 - .vline = DISP_INTERRUPT_STATUS_CONTINUE4__LB_D5_VLINE_INTERRUPT_MASK, 120 - .hpd = DISP_INTERRUPT_STATUS_CONTINUE4__DC_HPD5_INTERRUPT_MASK 121 - }, { 122 - .reg = mmDISP_INTERRUPT_STATUS_CONTINUE5, 123 - .vblank = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VBLANK_INTERRUPT_MASK, 124 - .vline = DISP_INTERRUPT_STATUS_CONTINUE5__LB_D6_VLINE_INTERRUPT_MASK, 125 - .hpd = DISP_INTERRUPT_STATUS_CONTINUE5__DC_HPD6_INTERRUPT_MASK 126 - } }; 127 - 128 - static const u32 cz_golden_settings_a11[] = 129 - { 130 - mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000, 131 - mmFBC_MISC, 0x1f311fff, 0x14300000, 132 - }; 133 - 134 - static const u32 cz_mgcg_cgcg_init[] = 135 - { 136 - mmXDMA_CLOCK_GATING_CNTL, 0xffffffff, 0x00000100, 137 - mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, 138 - }; 139 - 140 - static const u32 stoney_golden_settings_a11[] = 141 - { 142 - mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000, 143 - mmFBC_MISC, 0x1f311fff, 0x14302000, 144 - }; 145 - 146 - static const u32 polaris11_golden_settings_a11[] = 147 - { 148 - mmDCI_CLK_CNTL, 0x00000080, 0x00000000, 149 - mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, 150 - mmFBC_DEBUG1, 0xffffffff, 0x00000008, 151 - mmFBC_MISC, 0x9f313fff, 0x14302008, 152 - mmHDMI_CONTROL, 0x313f031f, 0x00000011, 153 - }; 154 - 155 - static const u32 polaris10_golden_settings_a11[] = 156 - { 157 - mmDCI_CLK_CNTL, 0x00000080, 0x00000000, 158 - mmFBC_DEBUG_COMP, 0x000000f0, 0x00000070, 159 - mmFBC_MISC, 0x9f313fff, 0x14302008, 160 - mmHDMI_CONTROL, 0x313f031f, 0x00000011, 161 - }; 162 - 163 - static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev) 164 - { 165 - switch (adev->asic_type) { 166 - case CHIP_CARRIZO: 167 - amdgpu_device_program_register_sequence(adev, 168 - cz_mgcg_cgcg_init, 169 - ARRAY_SIZE(cz_mgcg_cgcg_init)); 170 - amdgpu_device_program_register_sequence(adev, 171 - cz_golden_settings_a11, 172 - ARRAY_SIZE(cz_golden_settings_a11)); 173 - break; 174 - case CHIP_STONEY: 175 - amdgpu_device_program_register_sequence(adev, 176 - stoney_golden_settings_a11, 177 - ARRAY_SIZE(stoney_golden_settings_a11)); 178 - break; 179 - case CHIP_POLARIS11: 180 - case CHIP_POLARIS12: 181 - amdgpu_device_program_register_sequence(adev, 182 - polaris11_golden_settings_a11, 183 - ARRAY_SIZE(polaris11_golden_settings_a11)); 184 - break; 185 - case CHIP_POLARIS10: 186 - case CHIP_VEGAM: 187 - amdgpu_device_program_register_sequence(adev, 188 - polaris10_golden_settings_a11, 189 - ARRAY_SIZE(polaris10_golden_settings_a11)); 190 - break; 191 - default: 192 - break; 193 - } 194 - } 195 - 196 - static u32 dce_v11_0_audio_endpt_rreg(struct amdgpu_device *adev, 197 - u32 block_offset, u32 reg) 198 - { 199 - unsigned long flags; 200 - u32 r; 201 - 202 - spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); 203 - WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 204 - r = RREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset); 205 - spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); 206 - 207 - return r; 208 - } 209 - 210 - static void dce_v11_0_audio_endpt_wreg(struct amdgpu_device *adev, 211 - u32 block_offset, u32 reg, u32 v) 212 - { 213 - unsigned long flags; 214 - 215 - spin_lock_irqsave(&adev->audio_endpt_idx_lock, flags); 216 - WREG32(mmAZALIA_F0_CODEC_ENDPOINT_INDEX + block_offset, reg); 217 - WREG32(mmAZALIA_F0_CODEC_ENDPOINT_DATA + block_offset, v); 218 - spin_unlock_irqrestore(&adev->audio_endpt_idx_lock, flags); 219 - } 220 - 221 - static u32 dce_v11_0_vblank_get_counter(struct amdgpu_device *adev, int crtc) 222 - { 223 - if (crtc < 0 || crtc >= adev->mode_info.num_crtc) 224 - return 0; 225 - else 226 - return RREG32(mmCRTC_STATUS_FRAME_COUNT + crtc_offsets[crtc]); 227 - } 228 - 229 - static void dce_v11_0_pageflip_interrupt_init(struct amdgpu_device *adev) 230 - { 231 - unsigned i; 232 - 233 - /* Enable pflip interrupts */ 234 - for (i = 0; i < adev->mode_info.num_crtc; i++) 235 - amdgpu_irq_get(adev, &adev->pageflip_irq, i); 236 - } 237 - 238 - static void dce_v11_0_pageflip_interrupt_fini(struct amdgpu_device *adev) 239 - { 240 - unsigned i; 241 - 242 - /* Disable pflip interrupts */ 243 - for (i = 0; i < adev->mode_info.num_crtc; i++) 244 - amdgpu_irq_put(adev, &adev->pageflip_irq, i); 245 - } 246 - 247 - /** 248 - * dce_v11_0_page_flip - pageflip callback. 249 - * 250 - * @adev: amdgpu_device pointer 251 - * @crtc_id: crtc to cleanup pageflip on 252 - * @crtc_base: new address of the crtc (GPU MC address) 253 - * @async: asynchronous flip 254 - * 255 - * Triggers the actual pageflip by updating the primary 256 - * surface base address. 257 - */ 258 - static void dce_v11_0_page_flip(struct amdgpu_device *adev, 259 - int crtc_id, u64 crtc_base, bool async) 260 - { 261 - struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 262 - struct drm_framebuffer *fb = amdgpu_crtc->base.primary->fb; 263 - u32 tmp; 264 - 265 - /* flip immediate for async, default is vsync */ 266 - tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); 267 - tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL, 268 - GRPH_SURFACE_UPDATE_IMMEDIATE_EN, async ? 1 : 0); 269 - WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); 270 - /* update pitch */ 271 - WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, 272 - fb->pitches[0] / fb->format->cpp[0]); 273 - /* update the scanout addresses */ 274 - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 275 - upper_32_bits(crtc_base)); 276 - /* writing to the low address triggers the update */ 277 - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 278 - lower_32_bits(crtc_base)); 279 - /* post the write */ 280 - RREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset); 281 - } 282 - 283 - static int dce_v11_0_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, 284 - u32 *vbl, u32 *position) 285 - { 286 - if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) 287 - return -EINVAL; 288 - 289 - *vbl = RREG32(mmCRTC_V_BLANK_START_END + crtc_offsets[crtc]); 290 - *position = RREG32(mmCRTC_STATUS_POSITION + crtc_offsets[crtc]); 291 - 292 - return 0; 293 - } 294 - 295 - /** 296 - * dce_v11_0_hpd_sense - hpd sense callback. 297 - * 298 - * @adev: amdgpu_device pointer 299 - * @hpd: hpd (hotplug detect) pin 300 - * 301 - * Checks if a digital monitor is connected (evergreen+). 302 - * Returns true if connected, false if not connected. 303 - */ 304 - static bool dce_v11_0_hpd_sense(struct amdgpu_device *adev, 305 - enum amdgpu_hpd_id hpd) 306 - { 307 - bool connected = false; 308 - 309 - if (hpd >= adev->mode_info.num_hpd) 310 - return connected; 311 - 312 - if (RREG32(mmDC_HPD_INT_STATUS + hpd_offsets[hpd]) & 313 - DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK) 314 - connected = true; 315 - 316 - return connected; 317 - } 318 - 319 - /** 320 - * dce_v11_0_hpd_set_polarity - hpd set polarity callback. 321 - * 322 - * @adev: amdgpu_device pointer 323 - * @hpd: hpd (hotplug detect) pin 324 - * 325 - * Set the polarity of the hpd pin (evergreen+). 326 - */ 327 - static void dce_v11_0_hpd_set_polarity(struct amdgpu_device *adev, 328 - enum amdgpu_hpd_id hpd) 329 - { 330 - u32 tmp; 331 - bool connected = dce_v11_0_hpd_sense(adev, hpd); 332 - 333 - if (hpd >= adev->mode_info.num_hpd) 334 - return; 335 - 336 - tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); 337 - if (connected) 338 - tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 0); 339 - else 340 - tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_POLARITY, 1); 341 - WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); 342 - } 343 - 344 - /** 345 - * dce_v11_0_hpd_init - hpd setup callback. 346 - * 347 - * @adev: amdgpu_device pointer 348 - * 349 - * Setup the hpd pins used by the card (evergreen+). 350 - * Enable the pin, set the polarity, and enable the hpd interrupts. 351 - */ 352 - static void dce_v11_0_hpd_init(struct amdgpu_device *adev) 353 - { 354 - struct drm_device *dev = adev_to_drm(adev); 355 - struct drm_connector *connector; 356 - struct drm_connector_list_iter iter; 357 - u32 tmp; 358 - 359 - drm_connector_list_iter_begin(dev, &iter); 360 - drm_for_each_connector_iter(connector, &iter) { 361 - struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 362 - 363 - if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) 364 - continue; 365 - 366 - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || 367 - connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { 368 - /* don't try to enable hpd on eDP or LVDS avoid breaking the 369 - * aux dp channel on imac and help (but not completely fix) 370 - * https://bugzilla.redhat.com/show_bug.cgi?id=726143 371 - * also avoid interrupt storms during dpms. 372 - */ 373 - tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); 374 - tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); 375 - WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); 376 - continue; 377 - } 378 - 379 - tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); 380 - tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 1); 381 - WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); 382 - 383 - tmp = RREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd]); 384 - tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, 385 - DC_HPD_CONNECT_INT_DELAY, 386 - AMDGPU_HPD_CONNECT_INT_DELAY_IN_MS); 387 - tmp = REG_SET_FIELD(tmp, DC_HPD_TOGGLE_FILT_CNTL, 388 - DC_HPD_DISCONNECT_INT_DELAY, 389 - AMDGPU_HPD_DISCONNECT_INT_DELAY_IN_MS); 390 - WREG32(mmDC_HPD_TOGGLE_FILT_CNTL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); 391 - 392 - dce_v11_0_hpd_int_ack(adev, amdgpu_connector->hpd.hpd); 393 - dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); 394 - amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); 395 - } 396 - drm_connector_list_iter_end(&iter); 397 - } 398 - 399 - /** 400 - * dce_v11_0_hpd_fini - hpd tear down callback. 401 - * 402 - * @adev: amdgpu_device pointer 403 - * 404 - * Tear down the hpd pins used by the card (evergreen+). 405 - * Disable the hpd interrupts. 406 - */ 407 - static void dce_v11_0_hpd_fini(struct amdgpu_device *adev) 408 - { 409 - struct drm_device *dev = adev_to_drm(adev); 410 - struct drm_connector *connector; 411 - struct drm_connector_list_iter iter; 412 - u32 tmp; 413 - 414 - drm_connector_list_iter_begin(dev, &iter); 415 - drm_for_each_connector_iter(connector, &iter) { 416 - struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 417 - 418 - if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) 419 - continue; 420 - 421 - tmp = RREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd]); 422 - tmp = REG_SET_FIELD(tmp, DC_HPD_CONTROL, DC_HPD_EN, 0); 423 - WREG32(mmDC_HPD_CONTROL + hpd_offsets[amdgpu_connector->hpd.hpd], tmp); 424 - 425 - amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); 426 - } 427 - drm_connector_list_iter_end(&iter); 428 - } 429 - 430 - static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev) 431 - { 432 - return mmDC_GPIO_HPD_A; 433 - } 434 - 435 - static bool dce_v11_0_is_display_hung(struct amdgpu_device *adev) 436 - { 437 - u32 crtc_hung = 0; 438 - u32 crtc_status[6]; 439 - u32 i, j, tmp; 440 - 441 - for (i = 0; i < adev->mode_info.num_crtc; i++) { 442 - tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); 443 - if (REG_GET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN)) { 444 - crtc_status[i] = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); 445 - crtc_hung |= (1 << i); 446 - } 447 - } 448 - 449 - for (j = 0; j < 10; j++) { 450 - for (i = 0; i < adev->mode_info.num_crtc; i++) { 451 - if (crtc_hung & (1 << i)) { 452 - tmp = RREG32(mmCRTC_STATUS_HV_COUNT + crtc_offsets[i]); 453 - if (tmp != crtc_status[i]) 454 - crtc_hung &= ~(1 << i); 455 - } 456 - } 457 - if (crtc_hung == 0) 458 - return false; 459 - udelay(100); 460 - } 461 - 462 - return true; 463 - } 464 - 465 - static void dce_v11_0_set_vga_render_state(struct amdgpu_device *adev, 466 - bool render) 467 - { 468 - u32 tmp; 469 - 470 - /* Lockout access through VGA aperture*/ 471 - tmp = RREG32(mmVGA_HDP_CONTROL); 472 - if (render) 473 - tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 0); 474 - else 475 - tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); 476 - WREG32(mmVGA_HDP_CONTROL, tmp); 477 - 478 - /* disable VGA render */ 479 - tmp = RREG32(mmVGA_RENDER_CONTROL); 480 - if (render) 481 - tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 1); 482 - else 483 - tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); 484 - WREG32(mmVGA_RENDER_CONTROL, tmp); 485 - } 486 - 487 - static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev) 488 - { 489 - int num_crtc = 0; 490 - 491 - switch (adev->asic_type) { 492 - case CHIP_CARRIZO: 493 - num_crtc = 3; 494 - break; 495 - case CHIP_STONEY: 496 - num_crtc = 2; 497 - break; 498 - case CHIP_POLARIS10: 499 - case CHIP_VEGAM: 500 - num_crtc = 6; 501 - break; 502 - case CHIP_POLARIS11: 503 - case CHIP_POLARIS12: 504 - num_crtc = 5; 505 - break; 506 - default: 507 - num_crtc = 0; 508 - } 509 - return num_crtc; 510 - } 511 - 512 - void dce_v11_0_disable_dce(struct amdgpu_device *adev) 513 - { 514 - /*Disable VGA render and enabled crtc, if has DCE engine*/ 515 - if (amdgpu_atombios_has_dce_engine_info(adev)) { 516 - u32 tmp; 517 - int crtc_enabled, i; 518 - 519 - dce_v11_0_set_vga_render_state(adev, false); 520 - 521 - /*Disable crtc*/ 522 - for (i = 0; i < dce_v11_0_get_num_crtc(adev); i++) { 523 - crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), 524 - CRTC_CONTROL, CRTC_MASTER_EN); 525 - if (crtc_enabled) { 526 - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); 527 - tmp = RREG32(mmCRTC_CONTROL + crtc_offsets[i]); 528 - tmp = REG_SET_FIELD(tmp, CRTC_CONTROL, CRTC_MASTER_EN, 0); 529 - WREG32(mmCRTC_CONTROL + crtc_offsets[i], tmp); 530 - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); 531 - } 532 - } 533 - } 534 - } 535 - 536 - static void dce_v11_0_program_fmt(struct drm_encoder *encoder) 537 - { 538 - struct drm_device *dev = encoder->dev; 539 - struct amdgpu_device *adev = drm_to_adev(dev); 540 - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 541 - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 542 - struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 543 - int bpc = 0; 544 - u32 tmp = 0; 545 - enum amdgpu_connector_dither dither = AMDGPU_FMT_DITHER_DISABLE; 546 - 547 - if (connector) { 548 - struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 549 - bpc = amdgpu_connector_get_monitor_bpc(connector); 550 - dither = amdgpu_connector->dither; 551 - } 552 - 553 - /* LVDS/eDP FMT is set up by atom */ 554 - if (amdgpu_encoder->devices & ATOM_DEVICE_LCD_SUPPORT) 555 - return; 556 - 557 - /* not needed for analog */ 558 - if ((amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1) || 559 - (amdgpu_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2)) 560 - return; 561 - 562 - if (bpc == 0) 563 - return; 564 - 565 - switch (bpc) { 566 - case 6: 567 - if (dither == AMDGPU_FMT_DITHER_ENABLE) { 568 - /* XXX sort out optimal dither settings */ 569 - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); 570 - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); 571 - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); 572 - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 0); 573 - } else { 574 - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); 575 - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 0); 576 - } 577 - break; 578 - case 8: 579 - if (dither == AMDGPU_FMT_DITHER_ENABLE) { 580 - /* XXX sort out optimal dither settings */ 581 - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); 582 - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); 583 - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1); 584 - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); 585 - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 1); 586 - } else { 587 - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); 588 - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 1); 589 - } 590 - break; 591 - case 10: 592 - if (dither == AMDGPU_FMT_DITHER_ENABLE) { 593 - /* XXX sort out optimal dither settings */ 594 - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_FRAME_RANDOM_ENABLE, 1); 595 - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_HIGHPASS_RANDOM_ENABLE, 1); 596 - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_RGB_RANDOM_ENABLE, 1); 597 - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_EN, 1); 598 - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_SPATIAL_DITHER_DEPTH, 2); 599 - } else { 600 - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_EN, 1); 601 - tmp = REG_SET_FIELD(tmp, FMT_BIT_DEPTH_CONTROL, FMT_TRUNCATE_DEPTH, 2); 602 - } 603 - break; 604 - default: 605 - /* not needed */ 606 - break; 607 - } 608 - 609 - WREG32(mmFMT_BIT_DEPTH_CONTROL + amdgpu_crtc->crtc_offset, tmp); 610 - } 611 - 612 - 613 - /* display watermark setup */ 614 - /** 615 - * dce_v11_0_line_buffer_adjust - Set up the line buffer 616 - * 617 - * @adev: amdgpu_device pointer 618 - * @amdgpu_crtc: the selected display controller 619 - * @mode: the current display mode on the selected display 620 - * controller 621 - * 622 - * Setup up the line buffer allocation for 623 - * the selected display controller (CIK). 624 - * Returns the line buffer size in pixels. 625 - */ 626 - static u32 dce_v11_0_line_buffer_adjust(struct amdgpu_device *adev, 627 - struct amdgpu_crtc *amdgpu_crtc, 628 - struct drm_display_mode *mode) 629 - { 630 - u32 tmp, buffer_alloc, i, mem_cfg; 631 - u32 pipe_offset = amdgpu_crtc->crtc_id; 632 - /* 633 - * Line Buffer Setup 634 - * There are 6 line buffers, one for each display controllers. 635 - * There are 3 partitions per LB. Select the number of partitions 636 - * to enable based on the display width. For display widths larger 637 - * than 4096, you need use to use 2 display controllers and combine 638 - * them using the stereo blender. 639 - */ 640 - if (amdgpu_crtc->base.enabled && mode) { 641 - if (mode->crtc_hdisplay < 1920) { 642 - mem_cfg = 1; 643 - buffer_alloc = 2; 644 - } else if (mode->crtc_hdisplay < 2560) { 645 - mem_cfg = 2; 646 - buffer_alloc = 2; 647 - } else if (mode->crtc_hdisplay < 4096) { 648 - mem_cfg = 0; 649 - buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; 650 - } else { 651 - DRM_DEBUG_KMS("Mode too big for LB!\n"); 652 - mem_cfg = 0; 653 - buffer_alloc = (adev->flags & AMD_IS_APU) ? 2 : 4; 654 - } 655 - } else { 656 - mem_cfg = 1; 657 - buffer_alloc = 0; 658 - } 659 - 660 - tmp = RREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset); 661 - tmp = REG_SET_FIELD(tmp, LB_MEMORY_CTRL, LB_MEMORY_CONFIG, mem_cfg); 662 - WREG32(mmLB_MEMORY_CTRL + amdgpu_crtc->crtc_offset, tmp); 663 - 664 - tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset); 665 - tmp = REG_SET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATED, buffer_alloc); 666 - WREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset, tmp); 667 - 668 - for (i = 0; i < adev->usec_timeout; i++) { 669 - tmp = RREG32(mmPIPE0_DMIF_BUFFER_CONTROL + pipe_offset); 670 - if (REG_GET_FIELD(tmp, PIPE0_DMIF_BUFFER_CONTROL, DMIF_BUFFERS_ALLOCATION_COMPLETED)) 671 - break; 672 - udelay(1); 673 - } 674 - 675 - if (amdgpu_crtc->base.enabled && mode) { 676 - switch (mem_cfg) { 677 - case 0: 678 - default: 679 - return 4096 * 2; 680 - case 1: 681 - return 1920 * 2; 682 - case 2: 683 - return 2560 * 2; 684 - } 685 - } 686 - 687 - /* controller not enabled, so no lb used */ 688 - return 0; 689 - } 690 - 691 - /** 692 - * cik_get_number_of_dram_channels - get the number of dram channels 693 - * 694 - * @adev: amdgpu_device pointer 695 - * 696 - * Look up the number of video ram channels (CIK). 697 - * Used for display watermark bandwidth calculations 698 - * Returns the number of dram channels 699 - */ 700 - static u32 cik_get_number_of_dram_channels(struct amdgpu_device *adev) 701 - { 702 - u32 tmp = RREG32(mmMC_SHARED_CHMAP); 703 - 704 - switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { 705 - case 0: 706 - default: 707 - return 1; 708 - case 1: 709 - return 2; 710 - case 2: 711 - return 4; 712 - case 3: 713 - return 8; 714 - case 4: 715 - return 3; 716 - case 5: 717 - return 6; 718 - case 6: 719 - return 10; 720 - case 7: 721 - return 12; 722 - case 8: 723 - return 16; 724 - } 725 - } 726 - 727 - struct dce10_wm_params { 728 - u32 dram_channels; /* number of dram channels */ 729 - u32 yclk; /* bandwidth per dram data pin in kHz */ 730 - u32 sclk; /* engine clock in kHz */ 731 - u32 disp_clk; /* display clock in kHz */ 732 - u32 src_width; /* viewport width */ 733 - u32 active_time; /* active display time in ns */ 734 - u32 blank_time; /* blank time in ns */ 735 - bool interlaced; /* mode is interlaced */ 736 - fixed20_12 vsc; /* vertical scale ratio */ 737 - u32 num_heads; /* number of active crtcs */ 738 - u32 bytes_per_pixel; /* bytes per pixel display + overlay */ 739 - u32 lb_size; /* line buffer allocated to pipe */ 740 - u32 vtaps; /* vertical scaler taps */ 741 - }; 742 - 743 - /** 744 - * dce_v11_0_dram_bandwidth - get the dram bandwidth 745 - * 746 - * @wm: watermark calculation data 747 - * 748 - * Calculate the raw dram bandwidth (CIK). 749 - * Used for display watermark bandwidth calculations 750 - * Returns the dram bandwidth in MBytes/s 751 - */ 752 - static u32 dce_v11_0_dram_bandwidth(struct dce10_wm_params *wm) 753 - { 754 - /* Calculate raw DRAM Bandwidth */ 755 - fixed20_12 dram_efficiency; /* 0.7 */ 756 - fixed20_12 yclk, dram_channels, bandwidth; 757 - fixed20_12 a; 758 - 759 - a.full = dfixed_const(1000); 760 - yclk.full = dfixed_const(wm->yclk); 761 - yclk.full = dfixed_div(yclk, a); 762 - dram_channels.full = dfixed_const(wm->dram_channels * 4); 763 - a.full = dfixed_const(10); 764 - dram_efficiency.full = dfixed_const(7); 765 - dram_efficiency.full = dfixed_div(dram_efficiency, a); 766 - bandwidth.full = dfixed_mul(dram_channels, yclk); 767 - bandwidth.full = dfixed_mul(bandwidth, dram_efficiency); 768 - 769 - return dfixed_trunc(bandwidth); 770 - } 771 - 772 - /** 773 - * dce_v11_0_dram_bandwidth_for_display - get the dram bandwidth for display 774 - * 775 - * @wm: watermark calculation data 776 - * 777 - * Calculate the dram bandwidth used for display (CIK). 778 - * Used for display watermark bandwidth calculations 779 - * Returns the dram bandwidth for display in MBytes/s 780 - */ 781 - static u32 dce_v11_0_dram_bandwidth_for_display(struct dce10_wm_params *wm) 782 - { 783 - /* Calculate DRAM Bandwidth and the part allocated to display. */ 784 - fixed20_12 disp_dram_allocation; /* 0.3 to 0.7 */ 785 - fixed20_12 yclk, dram_channels, bandwidth; 786 - fixed20_12 a; 787 - 788 - a.full = dfixed_const(1000); 789 - yclk.full = dfixed_const(wm->yclk); 790 - yclk.full = dfixed_div(yclk, a); 791 - dram_channels.full = dfixed_const(wm->dram_channels * 4); 792 - a.full = dfixed_const(10); 793 - disp_dram_allocation.full = dfixed_const(3); /* XXX worse case value 0.3 */ 794 - disp_dram_allocation.full = dfixed_div(disp_dram_allocation, a); 795 - bandwidth.full = dfixed_mul(dram_channels, yclk); 796 - bandwidth.full = dfixed_mul(bandwidth, disp_dram_allocation); 797 - 798 - return dfixed_trunc(bandwidth); 799 - } 800 - 801 - /** 802 - * dce_v11_0_data_return_bandwidth - get the data return bandwidth 803 - * 804 - * @wm: watermark calculation data 805 - * 806 - * Calculate the data return bandwidth used for display (CIK). 807 - * Used for display watermark bandwidth calculations 808 - * Returns the data return bandwidth in MBytes/s 809 - */ 810 - static u32 dce_v11_0_data_return_bandwidth(struct dce10_wm_params *wm) 811 - { 812 - /* Calculate the display Data return Bandwidth */ 813 - fixed20_12 return_efficiency; /* 0.8 */ 814 - fixed20_12 sclk, bandwidth; 815 - fixed20_12 a; 816 - 817 - a.full = dfixed_const(1000); 818 - sclk.full = dfixed_const(wm->sclk); 819 - sclk.full = dfixed_div(sclk, a); 820 - a.full = dfixed_const(10); 821 - return_efficiency.full = dfixed_const(8); 822 - return_efficiency.full = dfixed_div(return_efficiency, a); 823 - a.full = dfixed_const(32); 824 - bandwidth.full = dfixed_mul(a, sclk); 825 - bandwidth.full = dfixed_mul(bandwidth, return_efficiency); 826 - 827 - return dfixed_trunc(bandwidth); 828 - } 829 - 830 - /** 831 - * dce_v11_0_dmif_request_bandwidth - get the dmif bandwidth 832 - * 833 - * @wm: watermark calculation data 834 - * 835 - * Calculate the dmif bandwidth used for display (CIK). 836 - * Used for display watermark bandwidth calculations 837 - * Returns the dmif bandwidth in MBytes/s 838 - */ 839 - static u32 dce_v11_0_dmif_request_bandwidth(struct dce10_wm_params *wm) 840 - { 841 - /* Calculate the DMIF Request Bandwidth */ 842 - fixed20_12 disp_clk_request_efficiency; /* 0.8 */ 843 - fixed20_12 disp_clk, bandwidth; 844 - fixed20_12 a, b; 845 - 846 - a.full = dfixed_const(1000); 847 - disp_clk.full = dfixed_const(wm->disp_clk); 848 - disp_clk.full = dfixed_div(disp_clk, a); 849 - a.full = dfixed_const(32); 850 - b.full = dfixed_mul(a, disp_clk); 851 - 852 - a.full = dfixed_const(10); 853 - disp_clk_request_efficiency.full = dfixed_const(8); 854 - disp_clk_request_efficiency.full = dfixed_div(disp_clk_request_efficiency, a); 855 - 856 - bandwidth.full = dfixed_mul(b, disp_clk_request_efficiency); 857 - 858 - return dfixed_trunc(bandwidth); 859 - } 860 - 861 - /** 862 - * dce_v11_0_available_bandwidth - get the min available bandwidth 863 - * 864 - * @wm: watermark calculation data 865 - * 866 - * Calculate the min available bandwidth used for display (CIK). 867 - * Used for display watermark bandwidth calculations 868 - * Returns the min available bandwidth in MBytes/s 869 - */ 870 - static u32 dce_v11_0_available_bandwidth(struct dce10_wm_params *wm) 871 - { 872 - /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */ 873 - u32 dram_bandwidth = dce_v11_0_dram_bandwidth(wm); 874 - u32 data_return_bandwidth = dce_v11_0_data_return_bandwidth(wm); 875 - u32 dmif_req_bandwidth = dce_v11_0_dmif_request_bandwidth(wm); 876 - 877 - return min(dram_bandwidth, min(data_return_bandwidth, dmif_req_bandwidth)); 878 - } 879 - 880 - /** 881 - * dce_v11_0_average_bandwidth - get the average available bandwidth 882 - * 883 - * @wm: watermark calculation data 884 - * 885 - * Calculate the average available bandwidth used for display (CIK). 886 - * Used for display watermark bandwidth calculations 887 - * Returns the average available bandwidth in MBytes/s 888 - */ 889 - static u32 dce_v11_0_average_bandwidth(struct dce10_wm_params *wm) 890 - { 891 - /* Calculate the display mode Average Bandwidth 892 - * DisplayMode should contain the source and destination dimensions, 893 - * timing, etc. 894 - */ 895 - fixed20_12 bpp; 896 - fixed20_12 line_time; 897 - fixed20_12 src_width; 898 - fixed20_12 bandwidth; 899 - fixed20_12 a; 900 - 901 - a.full = dfixed_const(1000); 902 - line_time.full = dfixed_const(wm->active_time + wm->blank_time); 903 - line_time.full = dfixed_div(line_time, a); 904 - bpp.full = dfixed_const(wm->bytes_per_pixel); 905 - src_width.full = dfixed_const(wm->src_width); 906 - bandwidth.full = dfixed_mul(src_width, bpp); 907 - bandwidth.full = dfixed_mul(bandwidth, wm->vsc); 908 - bandwidth.full = dfixed_div(bandwidth, line_time); 909 - 910 - return dfixed_trunc(bandwidth); 911 - } 912 - 913 - /** 914 - * dce_v11_0_latency_watermark - get the latency watermark 915 - * 916 - * @wm: watermark calculation data 917 - * 918 - * Calculate the latency watermark (CIK). 919 - * Used for display watermark bandwidth calculations 920 - * Returns the latency watermark in ns 921 - */ 922 - static u32 dce_v11_0_latency_watermark(struct dce10_wm_params *wm) 923 - { 924 - /* First calculate the latency in ns */ 925 - u32 mc_latency = 2000; /* 2000 ns. */ 926 - u32 available_bandwidth = dce_v11_0_available_bandwidth(wm); 927 - u32 worst_chunk_return_time = (512 * 8 * 1000) / available_bandwidth; 928 - u32 cursor_line_pair_return_time = (128 * 4 * 1000) / available_bandwidth; 929 - u32 dc_latency = 40000000 / wm->disp_clk; /* dc pipe latency */ 930 - u32 other_heads_data_return_time = ((wm->num_heads + 1) * worst_chunk_return_time) + 931 - (wm->num_heads * cursor_line_pair_return_time); 932 - u32 latency = mc_latency + other_heads_data_return_time + dc_latency; 933 - u32 max_src_lines_per_dst_line, lb_fill_bw, line_fill_time; 934 - u32 tmp, dmif_size = 12288; 935 - fixed20_12 a, b, c; 936 - 937 - if (wm->num_heads == 0) 938 - return 0; 939 - 940 - a.full = dfixed_const(2); 941 - b.full = dfixed_const(1); 942 - if ((wm->vsc.full > a.full) || 943 - ((wm->vsc.full > b.full) && (wm->vtaps >= 3)) || 944 - (wm->vtaps >= 5) || 945 - ((wm->vsc.full >= a.full) && wm->interlaced)) 946 - max_src_lines_per_dst_line = 4; 947 - else 948 - max_src_lines_per_dst_line = 2; 949 - 950 - a.full = dfixed_const(available_bandwidth); 951 - b.full = dfixed_const(wm->num_heads); 952 - a.full = dfixed_div(a, b); 953 - tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512); 954 - tmp = min(dfixed_trunc(a), tmp); 955 - 956 - lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000); 957 - 958 - a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel); 959 - b.full = dfixed_const(1000); 960 - c.full = dfixed_const(lb_fill_bw); 961 - b.full = dfixed_div(c, b); 962 - a.full = dfixed_div(a, b); 963 - line_fill_time = dfixed_trunc(a); 964 - 965 - if (line_fill_time < wm->active_time) 966 - return latency; 967 - else 968 - return latency + (line_fill_time - wm->active_time); 969 - 970 - } 971 - 972 - /** 973 - * dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display - check 974 - * average and available dram bandwidth 975 - * 976 - * @wm: watermark calculation data 977 - * 978 - * Check if the display average bandwidth fits in the display 979 - * dram bandwidth (CIK). 980 - * Used for display watermark bandwidth calculations 981 - * Returns true if the display fits, false if not. 982 - */ 983 - static bool dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(struct dce10_wm_params *wm) 984 - { 985 - if (dce_v11_0_average_bandwidth(wm) <= 986 - (dce_v11_0_dram_bandwidth_for_display(wm) / wm->num_heads)) 987 - return true; 988 - else 989 - return false; 990 - } 991 - 992 - /** 993 - * dce_v11_0_average_bandwidth_vs_available_bandwidth - check 994 - * average and available bandwidth 995 - * 996 - * @wm: watermark calculation data 997 - * 998 - * Check if the display average bandwidth fits in the display 999 - * available bandwidth (CIK). 1000 - * Used for display watermark bandwidth calculations 1001 - * Returns true if the display fits, false if not. 1002 - */ 1003 - static bool dce_v11_0_average_bandwidth_vs_available_bandwidth(struct dce10_wm_params *wm) 1004 - { 1005 - if (dce_v11_0_average_bandwidth(wm) <= 1006 - (dce_v11_0_available_bandwidth(wm) / wm->num_heads)) 1007 - return true; 1008 - else 1009 - return false; 1010 - } 1011 - 1012 - /** 1013 - * dce_v11_0_check_latency_hiding - check latency hiding 1014 - * 1015 - * @wm: watermark calculation data 1016 - * 1017 - * Check latency hiding (CIK). 1018 - * Used for display watermark bandwidth calculations 1019 - * Returns true if the display fits, false if not. 1020 - */ 1021 - static bool dce_v11_0_check_latency_hiding(struct dce10_wm_params *wm) 1022 - { 1023 - u32 lb_partitions = wm->lb_size / wm->src_width; 1024 - u32 line_time = wm->active_time + wm->blank_time; 1025 - u32 latency_tolerant_lines; 1026 - u32 latency_hiding; 1027 - fixed20_12 a; 1028 - 1029 - a.full = dfixed_const(1); 1030 - if (wm->vsc.full > a.full) 1031 - latency_tolerant_lines = 1; 1032 - else { 1033 - if (lb_partitions <= (wm->vtaps + 1)) 1034 - latency_tolerant_lines = 1; 1035 - else 1036 - latency_tolerant_lines = 2; 1037 - } 1038 - 1039 - latency_hiding = (latency_tolerant_lines * line_time + wm->blank_time); 1040 - 1041 - if (dce_v11_0_latency_watermark(wm) <= latency_hiding) 1042 - return true; 1043 - else 1044 - return false; 1045 - } 1046 - 1047 - /** 1048 - * dce_v11_0_program_watermarks - program display watermarks 1049 - * 1050 - * @adev: amdgpu_device pointer 1051 - * @amdgpu_crtc: the selected display controller 1052 - * @lb_size: line buffer size 1053 - * @num_heads: number of display controllers in use 1054 - * 1055 - * Calculate and program the display watermarks for the 1056 - * selected display controller (CIK). 1057 - */ 1058 - static void dce_v11_0_program_watermarks(struct amdgpu_device *adev, 1059 - struct amdgpu_crtc *amdgpu_crtc, 1060 - u32 lb_size, u32 num_heads) 1061 - { 1062 - struct drm_display_mode *mode = &amdgpu_crtc->base.mode; 1063 - struct dce10_wm_params wm_low, wm_high; 1064 - u32 active_time; 1065 - u32 line_time = 0; 1066 - u32 latency_watermark_a = 0, latency_watermark_b = 0; 1067 - u32 tmp, wm_mask, lb_vblank_lead_lines = 0; 1068 - 1069 - if (amdgpu_crtc->base.enabled && num_heads && mode) { 1070 - active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000, 1071 - (u32)mode->clock); 1072 - line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000, 1073 - (u32)mode->clock); 1074 - line_time = min_t(u32, line_time, 65535); 1075 - 1076 - /* watermark for high clocks */ 1077 - if (adev->pm.dpm_enabled) { 1078 - wm_high.yclk = 1079 - amdgpu_dpm_get_mclk(adev, false) * 10; 1080 - wm_high.sclk = 1081 - amdgpu_dpm_get_sclk(adev, false) * 10; 1082 - } else { 1083 - wm_high.yclk = adev->pm.current_mclk * 10; 1084 - wm_high.sclk = adev->pm.current_sclk * 10; 1085 - } 1086 - 1087 - wm_high.disp_clk = mode->clock; 1088 - wm_high.src_width = mode->crtc_hdisplay; 1089 - wm_high.active_time = active_time; 1090 - wm_high.blank_time = line_time - wm_high.active_time; 1091 - wm_high.interlaced = false; 1092 - if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1093 - wm_high.interlaced = true; 1094 - wm_high.vsc = amdgpu_crtc->vsc; 1095 - wm_high.vtaps = 1; 1096 - if (amdgpu_crtc->rmx_type != RMX_OFF) 1097 - wm_high.vtaps = 2; 1098 - wm_high.bytes_per_pixel = 4; /* XXX: get this from fb config */ 1099 - wm_high.lb_size = lb_size; 1100 - wm_high.dram_channels = cik_get_number_of_dram_channels(adev); 1101 - wm_high.num_heads = num_heads; 1102 - 1103 - /* set for high clocks */ 1104 - latency_watermark_a = min_t(u32, dce_v11_0_latency_watermark(&wm_high), 65535); 1105 - 1106 - /* possibly force display priority to high */ 1107 - /* should really do this at mode validation time... */ 1108 - if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_high) || 1109 - !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_high) || 1110 - !dce_v11_0_check_latency_hiding(&wm_high) || 1111 - (adev->mode_info.disp_priority == 2)) { 1112 - DRM_DEBUG_KMS("force priority to high\n"); 1113 - } 1114 - 1115 - /* watermark for low clocks */ 1116 - if (adev->pm.dpm_enabled) { 1117 - wm_low.yclk = 1118 - amdgpu_dpm_get_mclk(adev, true) * 10; 1119 - wm_low.sclk = 1120 - amdgpu_dpm_get_sclk(adev, true) * 10; 1121 - } else { 1122 - wm_low.yclk = adev->pm.current_mclk * 10; 1123 - wm_low.sclk = adev->pm.current_sclk * 10; 1124 - } 1125 - 1126 - wm_low.disp_clk = mode->clock; 1127 - wm_low.src_width = mode->crtc_hdisplay; 1128 - wm_low.active_time = active_time; 1129 - wm_low.blank_time = line_time - wm_low.active_time; 1130 - wm_low.interlaced = false; 1131 - if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1132 - wm_low.interlaced = true; 1133 - wm_low.vsc = amdgpu_crtc->vsc; 1134 - wm_low.vtaps = 1; 1135 - if (amdgpu_crtc->rmx_type != RMX_OFF) 1136 - wm_low.vtaps = 2; 1137 - wm_low.bytes_per_pixel = 4; /* XXX: get this from fb config */ 1138 - wm_low.lb_size = lb_size; 1139 - wm_low.dram_channels = cik_get_number_of_dram_channels(adev); 1140 - wm_low.num_heads = num_heads; 1141 - 1142 - /* set for low clocks */ 1143 - latency_watermark_b = min_t(u32, dce_v11_0_latency_watermark(&wm_low), 65535); 1144 - 1145 - /* possibly force display priority to high */ 1146 - /* should really do this at mode validation time... */ 1147 - if (!dce_v11_0_average_bandwidth_vs_dram_bandwidth_for_display(&wm_low) || 1148 - !dce_v11_0_average_bandwidth_vs_available_bandwidth(&wm_low) || 1149 - !dce_v11_0_check_latency_hiding(&wm_low) || 1150 - (adev->mode_info.disp_priority == 2)) { 1151 - DRM_DEBUG_KMS("force priority to high\n"); 1152 - } 1153 - lb_vblank_lead_lines = DIV_ROUND_UP(lb_size, mode->crtc_hdisplay); 1154 - } 1155 - 1156 - /* select wm A */ 1157 - wm_mask = RREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset); 1158 - tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 1); 1159 - WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); 1160 - tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset); 1161 - tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_a); 1162 - tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time); 1163 - WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp); 1164 - /* select wm B */ 1165 - tmp = REG_SET_FIELD(wm_mask, DPG_WATERMARK_MASK_CONTROL, URGENCY_WATERMARK_MASK, 2); 1166 - WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, tmp); 1167 - tmp = RREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset); 1168 - tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_LOW_WATERMARK, latency_watermark_b); 1169 - tmp = REG_SET_FIELD(tmp, DPG_PIPE_URGENCY_CONTROL, URGENCY_HIGH_WATERMARK, line_time); 1170 - WREG32(mmDPG_PIPE_URGENCY_CONTROL + amdgpu_crtc->crtc_offset, tmp); 1171 - /* restore original selection */ 1172 - WREG32(mmDPG_WATERMARK_MASK_CONTROL + amdgpu_crtc->crtc_offset, wm_mask); 1173 - 1174 - /* save values for DPM */ 1175 - amdgpu_crtc->line_time = line_time; 1176 - 1177 - /* Save number of lines the linebuffer leads before the scanout */ 1178 - amdgpu_crtc->lb_vblank_lead_lines = lb_vblank_lead_lines; 1179 - } 1180 - 1181 - /** 1182 - * dce_v11_0_bandwidth_update - program display watermarks 1183 - * 1184 - * @adev: amdgpu_device pointer 1185 - * 1186 - * Calculate and program the display watermarks and line 1187 - * buffer allocation (CIK). 1188 - */ 1189 - static void dce_v11_0_bandwidth_update(struct amdgpu_device *adev) 1190 - { 1191 - struct drm_display_mode *mode = NULL; 1192 - u32 num_heads = 0, lb_size; 1193 - int i; 1194 - 1195 - amdgpu_display_update_priority(adev); 1196 - 1197 - for (i = 0; i < adev->mode_info.num_crtc; i++) { 1198 - if (adev->mode_info.crtcs[i]->base.enabled) 1199 - num_heads++; 1200 - } 1201 - for (i = 0; i < adev->mode_info.num_crtc; i++) { 1202 - mode = &adev->mode_info.crtcs[i]->base.mode; 1203 - lb_size = dce_v11_0_line_buffer_adjust(adev, adev->mode_info.crtcs[i], mode); 1204 - dce_v11_0_program_watermarks(adev, adev->mode_info.crtcs[i], 1205 - lb_size, num_heads); 1206 - } 1207 - } 1208 - 1209 - static void dce_v11_0_audio_get_connected_pins(struct amdgpu_device *adev) 1210 - { 1211 - int i; 1212 - u32 offset, tmp; 1213 - 1214 - for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1215 - offset = adev->mode_info.audio.pin[i].offset; 1216 - tmp = RREG32_AUDIO_ENDPT(offset, 1217 - ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT); 1218 - if (((tmp & 1219 - AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK) >> 1220 - AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT) == 1) 1221 - adev->mode_info.audio.pin[i].connected = false; 1222 - else 1223 - adev->mode_info.audio.pin[i].connected = true; 1224 - } 1225 - } 1226 - 1227 - static struct amdgpu_audio_pin *dce_v11_0_audio_get_pin(struct amdgpu_device *adev) 1228 - { 1229 - int i; 1230 - 1231 - dce_v11_0_audio_get_connected_pins(adev); 1232 - 1233 - for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1234 - if (adev->mode_info.audio.pin[i].connected) 1235 - return &adev->mode_info.audio.pin[i]; 1236 - } 1237 - DRM_ERROR("No connected audio pins found!\n"); 1238 - return NULL; 1239 - } 1240 - 1241 - static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder) 1242 - { 1243 - struct amdgpu_device *adev = drm_to_adev(encoder->dev); 1244 - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1245 - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1246 - u32 tmp; 1247 - 1248 - if (!dig || !dig->afmt || !dig->afmt->pin) 1249 - return; 1250 - 1251 - tmp = RREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset); 1252 - tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, dig->afmt->pin->id); 1253 - WREG32(mmAFMT_AUDIO_SRC_CONTROL + dig->afmt->offset, tmp); 1254 - } 1255 - 1256 - static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder, 1257 - struct drm_display_mode *mode) 1258 - { 1259 - struct drm_device *dev = encoder->dev; 1260 - struct amdgpu_device *adev = drm_to_adev(dev); 1261 - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1262 - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1263 - struct drm_connector *connector; 1264 - struct drm_connector_list_iter iter; 1265 - struct amdgpu_connector *amdgpu_connector = NULL; 1266 - u32 tmp; 1267 - int interlace = 0; 1268 - 1269 - if (!dig || !dig->afmt || !dig->afmt->pin) 1270 - return; 1271 - 1272 - drm_connector_list_iter_begin(dev, &iter); 1273 - drm_for_each_connector_iter(connector, &iter) { 1274 - if (connector->encoder == encoder) { 1275 - amdgpu_connector = to_amdgpu_connector(connector); 1276 - break; 1277 - } 1278 - } 1279 - drm_connector_list_iter_end(&iter); 1280 - 1281 - if (!amdgpu_connector) { 1282 - DRM_ERROR("Couldn't find encoder's connector\n"); 1283 - return; 1284 - } 1285 - 1286 - if (mode->flags & DRM_MODE_FLAG_INTERLACE) 1287 - interlace = 1; 1288 - if (connector->latency_present[interlace]) { 1289 - tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1290 - VIDEO_LIPSYNC, connector->video_latency[interlace]); 1291 - tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1292 - AUDIO_LIPSYNC, connector->audio_latency[interlace]); 1293 - } else { 1294 - tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1295 - VIDEO_LIPSYNC, 0); 1296 - tmp = REG_SET_FIELD(0, AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, 1297 - AUDIO_LIPSYNC, 0); 1298 - } 1299 - WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, 1300 - ixAZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); 1301 - } 1302 - 1303 - static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder) 1304 - { 1305 - struct drm_device *dev = encoder->dev; 1306 - struct amdgpu_device *adev = drm_to_adev(dev); 1307 - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1308 - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1309 - struct drm_connector *connector; 1310 - struct drm_connector_list_iter iter; 1311 - struct amdgpu_connector *amdgpu_connector = NULL; 1312 - u32 tmp; 1313 - u8 *sadb = NULL; 1314 - int sad_count; 1315 - 1316 - if (!dig || !dig->afmt || !dig->afmt->pin) 1317 - return; 1318 - 1319 - drm_connector_list_iter_begin(dev, &iter); 1320 - drm_for_each_connector_iter(connector, &iter) { 1321 - if (connector->encoder == encoder) { 1322 - amdgpu_connector = to_amdgpu_connector(connector); 1323 - break; 1324 - } 1325 - } 1326 - drm_connector_list_iter_end(&iter); 1327 - 1328 - if (!amdgpu_connector) { 1329 - DRM_ERROR("Couldn't find encoder's connector\n"); 1330 - return; 1331 - } 1332 - 1333 - sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb); 1334 - if (sad_count < 0) { 1335 - DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 1336 - sad_count = 0; 1337 - } 1338 - 1339 - /* program the speaker allocation */ 1340 - tmp = RREG32_AUDIO_ENDPT(dig->afmt->pin->offset, 1341 - ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); 1342 - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1343 - DP_CONNECTION, 0); 1344 - /* set HDMI mode */ 1345 - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1346 - HDMI_CONNECTION, 1); 1347 - if (sad_count) 1348 - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1349 - SPEAKER_ALLOCATION, sadb[0]); 1350 - else 1351 - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, 1352 - SPEAKER_ALLOCATION, 5); /* stereo */ 1353 - WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, 1354 - ixAZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); 1355 - 1356 - kfree(sadb); 1357 - } 1358 - 1359 - static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder) 1360 - { 1361 - struct drm_device *dev = encoder->dev; 1362 - struct amdgpu_device *adev = drm_to_adev(dev); 1363 - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1364 - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1365 - struct drm_connector *connector; 1366 - struct drm_connector_list_iter iter; 1367 - struct amdgpu_connector *amdgpu_connector = NULL; 1368 - struct cea_sad *sads; 1369 - int i, sad_count; 1370 - 1371 - static const u16 eld_reg_to_type[][2] = { 1372 - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, HDMI_AUDIO_CODING_TYPE_PCM }, 1373 - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1, HDMI_AUDIO_CODING_TYPE_AC3 }, 1374 - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2, HDMI_AUDIO_CODING_TYPE_MPEG1 }, 1375 - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3, HDMI_AUDIO_CODING_TYPE_MP3 }, 1376 - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4, HDMI_AUDIO_CODING_TYPE_MPEG2 }, 1377 - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5, HDMI_AUDIO_CODING_TYPE_AAC_LC }, 1378 - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6, HDMI_AUDIO_CODING_TYPE_DTS }, 1379 - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7, HDMI_AUDIO_CODING_TYPE_ATRAC }, 1380 - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9, HDMI_AUDIO_CODING_TYPE_EAC3 }, 1381 - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10, HDMI_AUDIO_CODING_TYPE_DTS_HD }, 1382 - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11, HDMI_AUDIO_CODING_TYPE_MLP }, 1383 - { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, 1384 - }; 1385 - 1386 - if (!dig || !dig->afmt || !dig->afmt->pin) 1387 - return; 1388 - 1389 - drm_connector_list_iter_begin(dev, &iter); 1390 - drm_for_each_connector_iter(connector, &iter) { 1391 - if (connector->encoder == encoder) { 1392 - amdgpu_connector = to_amdgpu_connector(connector); 1393 - break; 1394 - } 1395 - } 1396 - drm_connector_list_iter_end(&iter); 1397 - 1398 - if (!amdgpu_connector) { 1399 - DRM_ERROR("Couldn't find encoder's connector\n"); 1400 - return; 1401 - } 1402 - 1403 - sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads); 1404 - if (sad_count < 0) 1405 - DRM_ERROR("Couldn't read SADs: %d\n", sad_count); 1406 - if (sad_count <= 0) 1407 - return; 1408 - BUG_ON(!sads); 1409 - 1410 - for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { 1411 - u32 tmp = 0; 1412 - u8 stereo_freqs = 0; 1413 - int max_channels = -1; 1414 - int j; 1415 - 1416 - for (j = 0; j < sad_count; j++) { 1417 - struct cea_sad *sad = &sads[j]; 1418 - 1419 - if (sad->format == eld_reg_to_type[i][1]) { 1420 - if (sad->channels > max_channels) { 1421 - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, 1422 - MAX_CHANNELS, sad->channels); 1423 - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, 1424 - DESCRIPTOR_BYTE_2, sad->byte2); 1425 - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, 1426 - SUPPORTED_FREQUENCIES, sad->freq); 1427 - max_channels = sad->channels; 1428 - } 1429 - 1430 - if (sad->format == HDMI_AUDIO_CODING_TYPE_PCM) 1431 - stereo_freqs |= sad->freq; 1432 - else 1433 - break; 1434 - } 1435 - } 1436 - 1437 - tmp = REG_SET_FIELD(tmp, AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0, 1438 - SUPPORTED_FREQUENCIES_STEREO, stereo_freqs); 1439 - WREG32_AUDIO_ENDPT(dig->afmt->pin->offset, eld_reg_to_type[i][0], tmp); 1440 - } 1441 - 1442 - kfree(sads); 1443 - } 1444 - 1445 - static void dce_v11_0_audio_enable(struct amdgpu_device *adev, 1446 - struct amdgpu_audio_pin *pin, 1447 - bool enable) 1448 - { 1449 - if (!pin) 1450 - return; 1451 - 1452 - WREG32_AUDIO_ENDPT(pin->offset, ixAZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, 1453 - enable ? AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK : 0); 1454 - } 1455 - 1456 - static const u32 pin_offsets[] = 1457 - { 1458 - AUD0_REGISTER_OFFSET, 1459 - AUD1_REGISTER_OFFSET, 1460 - AUD2_REGISTER_OFFSET, 1461 - AUD3_REGISTER_OFFSET, 1462 - AUD4_REGISTER_OFFSET, 1463 - AUD5_REGISTER_OFFSET, 1464 - AUD6_REGISTER_OFFSET, 1465 - AUD7_REGISTER_OFFSET, 1466 - }; 1467 - 1468 - static int dce_v11_0_audio_init(struct amdgpu_device *adev) 1469 - { 1470 - int i; 1471 - 1472 - if (!amdgpu_audio) 1473 - return 0; 1474 - 1475 - adev->mode_info.audio.enabled = true; 1476 - 1477 - switch (adev->asic_type) { 1478 - case CHIP_CARRIZO: 1479 - case CHIP_STONEY: 1480 - adev->mode_info.audio.num_pins = 7; 1481 - break; 1482 - case CHIP_POLARIS10: 1483 - case CHIP_VEGAM: 1484 - adev->mode_info.audio.num_pins = 8; 1485 - break; 1486 - case CHIP_POLARIS11: 1487 - case CHIP_POLARIS12: 1488 - adev->mode_info.audio.num_pins = 6; 1489 - break; 1490 - default: 1491 - return -EINVAL; 1492 - } 1493 - 1494 - for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 1495 - adev->mode_info.audio.pin[i].channels = -1; 1496 - adev->mode_info.audio.pin[i].rate = -1; 1497 - adev->mode_info.audio.pin[i].bits_per_sample = -1; 1498 - adev->mode_info.audio.pin[i].status_bits = 0; 1499 - adev->mode_info.audio.pin[i].category_code = 0; 1500 - adev->mode_info.audio.pin[i].connected = false; 1501 - adev->mode_info.audio.pin[i].offset = pin_offsets[i]; 1502 - adev->mode_info.audio.pin[i].id = i; 1503 - /* disable audio. it will be set up later */ 1504 - /* XXX remove once we switch to ip funcs */ 1505 - dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 1506 - } 1507 - 1508 - return 0; 1509 - } 1510 - 1511 - static void dce_v11_0_audio_fini(struct amdgpu_device *adev) 1512 - { 1513 - if (!amdgpu_audio) 1514 - return; 1515 - 1516 - if (!adev->mode_info.audio.enabled) 1517 - return; 1518 - 1519 - adev->mode_info.audio.enabled = false; 1520 - } 1521 - 1522 - /* 1523 - * update the N and CTS parameters for a given pixel clock rate 1524 - */ 1525 - static void dce_v11_0_afmt_update_ACR(struct drm_encoder *encoder, uint32_t clock) 1526 - { 1527 - struct drm_device *dev = encoder->dev; 1528 - struct amdgpu_device *adev = drm_to_adev(dev); 1529 - struct amdgpu_afmt_acr acr = amdgpu_afmt_acr(clock); 1530 - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1531 - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1532 - u32 tmp; 1533 - 1534 - tmp = RREG32(mmHDMI_ACR_32_0 + dig->afmt->offset); 1535 - tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_0, HDMI_ACR_CTS_32, acr.cts_32khz); 1536 - WREG32(mmHDMI_ACR_32_0 + dig->afmt->offset, tmp); 1537 - tmp = RREG32(mmHDMI_ACR_32_1 + dig->afmt->offset); 1538 - tmp = REG_SET_FIELD(tmp, HDMI_ACR_32_1, HDMI_ACR_N_32, acr.n_32khz); 1539 - WREG32(mmHDMI_ACR_32_1 + dig->afmt->offset, tmp); 1540 - 1541 - tmp = RREG32(mmHDMI_ACR_44_0 + dig->afmt->offset); 1542 - tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_0, HDMI_ACR_CTS_44, acr.cts_44_1khz); 1543 - WREG32(mmHDMI_ACR_44_0 + dig->afmt->offset, tmp); 1544 - tmp = RREG32(mmHDMI_ACR_44_1 + dig->afmt->offset); 1545 - tmp = REG_SET_FIELD(tmp, HDMI_ACR_44_1, HDMI_ACR_N_44, acr.n_44_1khz); 1546 - WREG32(mmHDMI_ACR_44_1 + dig->afmt->offset, tmp); 1547 - 1548 - tmp = RREG32(mmHDMI_ACR_48_0 + dig->afmt->offset); 1549 - tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_0, HDMI_ACR_CTS_48, acr.cts_48khz); 1550 - WREG32(mmHDMI_ACR_48_0 + dig->afmt->offset, tmp); 1551 - tmp = RREG32(mmHDMI_ACR_48_1 + dig->afmt->offset); 1552 - tmp = REG_SET_FIELD(tmp, HDMI_ACR_48_1, HDMI_ACR_N_48, acr.n_48khz); 1553 - WREG32(mmHDMI_ACR_48_1 + dig->afmt->offset, tmp); 1554 - 1555 - } 1556 - 1557 - /* 1558 - * build a HDMI Video Info Frame 1559 - */ 1560 - static void dce_v11_0_afmt_update_avi_infoframe(struct drm_encoder *encoder, 1561 - void *buffer, size_t size) 1562 - { 1563 - struct drm_device *dev = encoder->dev; 1564 - struct amdgpu_device *adev = drm_to_adev(dev); 1565 - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1566 - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1567 - uint8_t *frame = buffer + 3; 1568 - uint8_t *header = buffer; 1569 - 1570 - WREG32(mmAFMT_AVI_INFO0 + dig->afmt->offset, 1571 - frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24)); 1572 - WREG32(mmAFMT_AVI_INFO1 + dig->afmt->offset, 1573 - frame[0x4] | (frame[0x5] << 8) | (frame[0x6] << 16) | (frame[0x7] << 24)); 1574 - WREG32(mmAFMT_AVI_INFO2 + dig->afmt->offset, 1575 - frame[0x8] | (frame[0x9] << 8) | (frame[0xA] << 16) | (frame[0xB] << 24)); 1576 - WREG32(mmAFMT_AVI_INFO3 + dig->afmt->offset, 1577 - frame[0xC] | (frame[0xD] << 8) | (header[1] << 24)); 1578 - } 1579 - 1580 - static void dce_v11_0_audio_set_dto(struct drm_encoder *encoder, u32 clock) 1581 - { 1582 - struct drm_device *dev = encoder->dev; 1583 - struct amdgpu_device *adev = drm_to_adev(dev); 1584 - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1585 - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1586 - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 1587 - u32 dto_phase = 24 * 1000; 1588 - u32 dto_modulo = clock; 1589 - u32 tmp; 1590 - 1591 - if (!dig || !dig->afmt) 1592 - return; 1593 - 1594 - /* XXX two dtos; generally use dto0 for hdmi */ 1595 - /* Express [24MHz / target pixel clock] as an exact rational 1596 - * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE 1597 - * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 1598 - */ 1599 - tmp = RREG32(mmDCCG_AUDIO_DTO_SOURCE); 1600 - tmp = REG_SET_FIELD(tmp, DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, 1601 - amdgpu_crtc->crtc_id); 1602 - WREG32(mmDCCG_AUDIO_DTO_SOURCE, tmp); 1603 - WREG32(mmDCCG_AUDIO_DTO0_PHASE, dto_phase); 1604 - WREG32(mmDCCG_AUDIO_DTO0_MODULE, dto_modulo); 1605 - } 1606 - 1607 - /* 1608 - * update the info frames with the data from the current display mode 1609 - */ 1610 - static void dce_v11_0_afmt_setmode(struct drm_encoder *encoder, 1611 - struct drm_display_mode *mode) 1612 - { 1613 - struct drm_device *dev = encoder->dev; 1614 - struct amdgpu_device *adev = drm_to_adev(dev); 1615 - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1616 - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1617 - struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 1618 - u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; 1619 - struct hdmi_avi_infoframe frame; 1620 - ssize_t err; 1621 - u32 tmp; 1622 - int bpc = 8; 1623 - 1624 - if (!dig || !dig->afmt) 1625 - return; 1626 - 1627 - /* Silent, r600_hdmi_enable will raise WARN for us */ 1628 - if (!dig->afmt->enabled) 1629 - return; 1630 - 1631 - /* hdmi deep color mode general control packets setup, if bpc > 8 */ 1632 - if (encoder->crtc) { 1633 - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(encoder->crtc); 1634 - bpc = amdgpu_crtc->bpc; 1635 - } 1636 - 1637 - /* disable audio prior to setting up hw */ 1638 - dig->afmt->pin = dce_v11_0_audio_get_pin(adev); 1639 - dce_v11_0_audio_enable(adev, dig->afmt->pin, false); 1640 - 1641 - dce_v11_0_audio_set_dto(encoder, mode->clock); 1642 - 1643 - tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset); 1644 - tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); 1645 - WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); /* send null packets when required */ 1646 - 1647 - WREG32(mmAFMT_AUDIO_CRC_CONTROL + dig->afmt->offset, 0x1000); 1648 - 1649 - tmp = RREG32(mmHDMI_CONTROL + dig->afmt->offset); 1650 - switch (bpc) { 1651 - case 0: 1652 - case 6: 1653 - case 8: 1654 - case 16: 1655 - default: 1656 - tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 0); 1657 - tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 0); 1658 - DRM_DEBUG("%s: Disabling hdmi deep color for %d bpc.\n", 1659 - connector->name, bpc); 1660 - break; 1661 - case 10: 1662 - tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1); 1663 - tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 1); 1664 - DRM_DEBUG("%s: Enabling hdmi deep color 30 for 10 bpc.\n", 1665 - connector->name); 1666 - break; 1667 - case 12: 1668 - tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_ENABLE, 1); 1669 - tmp = REG_SET_FIELD(tmp, HDMI_CONTROL, HDMI_DEEP_COLOR_DEPTH, 2); 1670 - DRM_DEBUG("%s: Enabling hdmi deep color 36 for 12 bpc.\n", 1671 - connector->name); 1672 - break; 1673 - } 1674 - WREG32(mmHDMI_CONTROL + dig->afmt->offset, tmp); 1675 - 1676 - tmp = RREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset); 1677 - tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_NULL_SEND, 1); /* send null packets when required */ 1678 - tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_SEND, 1); /* send general control packets */ 1679 - tmp = REG_SET_FIELD(tmp, HDMI_VBI_PACKET_CONTROL, HDMI_GC_CONT, 1); /* send general control packets every frame */ 1680 - WREG32(mmHDMI_VBI_PACKET_CONTROL + dig->afmt->offset, tmp); 1681 - 1682 - tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); 1683 - /* enable audio info frames (frames won't be set until audio is enabled) */ 1684 - tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_SEND, 1); 1685 - /* required for audio info values to be updated */ 1686 - tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AUDIO_INFO_CONT, 1); 1687 - WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); 1688 - 1689 - tmp = RREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset); 1690 - /* required for audio info values to be updated */ 1691 - tmp = REG_SET_FIELD(tmp, AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1); 1692 - WREG32(mmAFMT_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); 1693 - 1694 - tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); 1695 - /* anything other than 0 */ 1696 - tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AUDIO_INFO_LINE, 2); 1697 - WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); 1698 - 1699 - WREG32(mmHDMI_GC + dig->afmt->offset, 0); /* unset HDMI_GC_AVMUTE */ 1700 - 1701 - tmp = RREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1702 - /* set the default audio delay */ 1703 - tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_DELAY_EN, 1); 1704 - /* should be suffient for all audio modes and small enough for all hblanks */ 1705 - tmp = REG_SET_FIELD(tmp, HDMI_AUDIO_PACKET_CONTROL, HDMI_AUDIO_PACKETS_PER_LINE, 3); 1706 - WREG32(mmHDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1707 - 1708 - tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1709 - /* allow 60958 channel status fields to be updated */ 1710 - tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1); 1711 - WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1712 - 1713 - tmp = RREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset); 1714 - if (bpc > 8) 1715 - /* clear SW CTS value */ 1716 - tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 0); 1717 - else 1718 - /* select SW CTS value */ 1719 - tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_SOURCE, 1); 1720 - /* allow hw to sent ACR packets when required */ 1721 - tmp = REG_SET_FIELD(tmp, HDMI_ACR_PACKET_CONTROL, HDMI_ACR_AUTO_SEND, 1); 1722 - WREG32(mmHDMI_ACR_PACKET_CONTROL + dig->afmt->offset, tmp); 1723 - 1724 - dce_v11_0_afmt_update_ACR(encoder, mode->clock); 1725 - 1726 - tmp = RREG32(mmAFMT_60958_0 + dig->afmt->offset); 1727 - tmp = REG_SET_FIELD(tmp, AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, 1); 1728 - WREG32(mmAFMT_60958_0 + dig->afmt->offset, tmp); 1729 - 1730 - tmp = RREG32(mmAFMT_60958_1 + dig->afmt->offset); 1731 - tmp = REG_SET_FIELD(tmp, AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, 2); 1732 - WREG32(mmAFMT_60958_1 + dig->afmt->offset, tmp); 1733 - 1734 - tmp = RREG32(mmAFMT_60958_2 + dig->afmt->offset); 1735 - tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, 3); 1736 - tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, 4); 1737 - tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, 5); 1738 - tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, 6); 1739 - tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, 7); 1740 - tmp = REG_SET_FIELD(tmp, AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, 8); 1741 - WREG32(mmAFMT_60958_2 + dig->afmt->offset, tmp); 1742 - 1743 - dce_v11_0_audio_write_speaker_allocation(encoder); 1744 - 1745 - WREG32(mmAFMT_AUDIO_PACKET_CONTROL2 + dig->afmt->offset, 1746 - (0xff << AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT)); 1747 - 1748 - dce_v11_0_afmt_audio_select_pin(encoder); 1749 - dce_v11_0_audio_write_sad_regs(encoder); 1750 - dce_v11_0_audio_write_latency_fields(encoder, mode); 1751 - 1752 - err = drm_hdmi_avi_infoframe_from_display_mode(&frame, connector, mode); 1753 - if (err < 0) { 1754 - DRM_ERROR("failed to setup AVI infoframe: %zd\n", err); 1755 - return; 1756 - } 1757 - 1758 - err = hdmi_avi_infoframe_pack(&frame, buffer, sizeof(buffer)); 1759 - if (err < 0) { 1760 - DRM_ERROR("failed to pack AVI infoframe: %zd\n", err); 1761 - return; 1762 - } 1763 - 1764 - dce_v11_0_afmt_update_avi_infoframe(encoder, buffer, sizeof(buffer)); 1765 - 1766 - tmp = RREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset); 1767 - /* enable AVI info frames */ 1768 - tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_SEND, 1); 1769 - /* required for audio info values to be updated */ 1770 - tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL0, HDMI_AVI_INFO_CONT, 1); 1771 - WREG32(mmHDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, tmp); 1772 - 1773 - tmp = RREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset); 1774 - tmp = REG_SET_FIELD(tmp, HDMI_INFOFRAME_CONTROL1, HDMI_AVI_INFO_LINE, 2); 1775 - WREG32(mmHDMI_INFOFRAME_CONTROL1 + dig->afmt->offset, tmp); 1776 - 1777 - tmp = RREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset); 1778 - /* send audio packets */ 1779 - tmp = REG_SET_FIELD(tmp, AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, 1); 1780 - WREG32(mmAFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset, tmp); 1781 - 1782 - WREG32(mmAFMT_RAMP_CONTROL0 + dig->afmt->offset, 0x00FFFFFF); 1783 - WREG32(mmAFMT_RAMP_CONTROL1 + dig->afmt->offset, 0x007FFFFF); 1784 - WREG32(mmAFMT_RAMP_CONTROL2 + dig->afmt->offset, 0x00000001); 1785 - WREG32(mmAFMT_RAMP_CONTROL3 + dig->afmt->offset, 0x00000001); 1786 - 1787 - /* enable audio after to setting up hw */ 1788 - dce_v11_0_audio_enable(adev, dig->afmt->pin, true); 1789 - } 1790 - 1791 - static void dce_v11_0_afmt_enable(struct drm_encoder *encoder, bool enable) 1792 - { 1793 - struct drm_device *dev = encoder->dev; 1794 - struct amdgpu_device *adev = drm_to_adev(dev); 1795 - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 1796 - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 1797 - 1798 - if (!dig || !dig->afmt) 1799 - return; 1800 - 1801 - /* Silent, r600_hdmi_enable will raise WARN for us */ 1802 - if (enable && dig->afmt->enabled) 1803 - return; 1804 - if (!enable && !dig->afmt->enabled) 1805 - return; 1806 - 1807 - if (!enable && dig->afmt->pin) { 1808 - dce_v11_0_audio_enable(adev, dig->afmt->pin, false); 1809 - dig->afmt->pin = NULL; 1810 - } 1811 - 1812 - dig->afmt->enabled = enable; 1813 - 1814 - DRM_DEBUG("%sabling AFMT interface @ 0x%04X for encoder 0x%x\n", 1815 - enable ? "En" : "Dis", dig->afmt->offset, amdgpu_encoder->encoder_id); 1816 - } 1817 - 1818 - static int dce_v11_0_afmt_init(struct amdgpu_device *adev) 1819 - { 1820 - int i; 1821 - 1822 - for (i = 0; i < adev->mode_info.num_dig; i++) 1823 - adev->mode_info.afmt[i] = NULL; 1824 - 1825 - /* DCE11 has audio blocks tied to DIG encoders */ 1826 - for (i = 0; i < adev->mode_info.num_dig; i++) { 1827 - adev->mode_info.afmt[i] = kzalloc(sizeof(struct amdgpu_afmt), GFP_KERNEL); 1828 - if (adev->mode_info.afmt[i]) { 1829 - adev->mode_info.afmt[i]->offset = dig_offsets[i]; 1830 - adev->mode_info.afmt[i]->id = i; 1831 - } else { 1832 - int j; 1833 - for (j = 0; j < i; j++) { 1834 - kfree(adev->mode_info.afmt[j]); 1835 - adev->mode_info.afmt[j] = NULL; 1836 - } 1837 - return -ENOMEM; 1838 - } 1839 - } 1840 - return 0; 1841 - } 1842 - 1843 - static void dce_v11_0_afmt_fini(struct amdgpu_device *adev) 1844 - { 1845 - int i; 1846 - 1847 - for (i = 0; i < adev->mode_info.num_dig; i++) { 1848 - kfree(adev->mode_info.afmt[i]); 1849 - adev->mode_info.afmt[i] = NULL; 1850 - } 1851 - } 1852 - 1853 - static const u32 vga_control_regs[6] = 1854 - { 1855 - mmD1VGA_CONTROL, 1856 - mmD2VGA_CONTROL, 1857 - mmD3VGA_CONTROL, 1858 - mmD4VGA_CONTROL, 1859 - mmD5VGA_CONTROL, 1860 - mmD6VGA_CONTROL, 1861 - }; 1862 - 1863 - static void dce_v11_0_vga_enable(struct drm_crtc *crtc, bool enable) 1864 - { 1865 - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1866 - struct drm_device *dev = crtc->dev; 1867 - struct amdgpu_device *adev = drm_to_adev(dev); 1868 - u32 vga_control; 1869 - 1870 - vga_control = RREG32(vga_control_regs[amdgpu_crtc->crtc_id]) & ~1; 1871 - if (enable) 1872 - WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control | 1); 1873 - else 1874 - WREG32(vga_control_regs[amdgpu_crtc->crtc_id], vga_control); 1875 - } 1876 - 1877 - static void dce_v11_0_grph_enable(struct drm_crtc *crtc, bool enable) 1878 - { 1879 - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1880 - struct drm_device *dev = crtc->dev; 1881 - struct amdgpu_device *adev = drm_to_adev(dev); 1882 - 1883 - if (enable) 1884 - WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 1); 1885 - else 1886 - WREG32(mmGRPH_ENABLE + amdgpu_crtc->crtc_offset, 0); 1887 - } 1888 - 1889 - static int dce_v11_0_crtc_do_set_base(struct drm_crtc *crtc, 1890 - struct drm_framebuffer *fb, 1891 - int x, int y, int atomic) 1892 - { 1893 - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 1894 - struct drm_device *dev = crtc->dev; 1895 - struct amdgpu_device *adev = drm_to_adev(dev); 1896 - struct drm_framebuffer *target_fb; 1897 - struct drm_gem_object *obj; 1898 - struct amdgpu_bo *abo; 1899 - uint64_t fb_location, tiling_flags; 1900 - uint32_t fb_format, fb_pitch_pixels; 1901 - u32 fb_swap = REG_SET_FIELD(0, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, ENDIAN_NONE); 1902 - u32 pipe_config; 1903 - u32 tmp, viewport_w, viewport_h; 1904 - int r; 1905 - bool bypass_lut = false; 1906 - 1907 - /* no fb bound */ 1908 - if (!atomic && !crtc->primary->fb) { 1909 - DRM_DEBUG_KMS("No FB bound\n"); 1910 - return 0; 1911 - } 1912 - 1913 - if (atomic) 1914 - target_fb = fb; 1915 - else 1916 - target_fb = crtc->primary->fb; 1917 - 1918 - /* If atomic, assume fb object is pinned & idle & fenced and 1919 - * just update base pointers 1920 - */ 1921 - obj = target_fb->obj[0]; 1922 - abo = gem_to_amdgpu_bo(obj); 1923 - r = amdgpu_bo_reserve(abo, false); 1924 - if (unlikely(r != 0)) 1925 - return r; 1926 - 1927 - if (!atomic) { 1928 - abo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 1929 - r = amdgpu_bo_pin(abo, AMDGPU_GEM_DOMAIN_VRAM); 1930 - if (unlikely(r != 0)) { 1931 - amdgpu_bo_unreserve(abo); 1932 - return -EINVAL; 1933 - } 1934 - } 1935 - fb_location = amdgpu_bo_gpu_offset(abo); 1936 - 1937 - amdgpu_bo_get_tiling_flags(abo, &tiling_flags); 1938 - amdgpu_bo_unreserve(abo); 1939 - 1940 - pipe_config = AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); 1941 - 1942 - switch (target_fb->format->format) { 1943 - case DRM_FORMAT_C8: 1944 - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 0); 1945 - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); 1946 - break; 1947 - case DRM_FORMAT_XRGB4444: 1948 - case DRM_FORMAT_ARGB4444: 1949 - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); 1950 - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 2); 1951 - #ifdef __BIG_ENDIAN 1952 - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 1953 - ENDIAN_8IN16); 1954 - #endif 1955 - break; 1956 - case DRM_FORMAT_XRGB1555: 1957 - case DRM_FORMAT_ARGB1555: 1958 - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); 1959 - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); 1960 - #ifdef __BIG_ENDIAN 1961 - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 1962 - ENDIAN_8IN16); 1963 - #endif 1964 - break; 1965 - case DRM_FORMAT_BGRX5551: 1966 - case DRM_FORMAT_BGRA5551: 1967 - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); 1968 - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 5); 1969 - #ifdef __BIG_ENDIAN 1970 - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 1971 - ENDIAN_8IN16); 1972 - #endif 1973 - break; 1974 - case DRM_FORMAT_RGB565: 1975 - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 1); 1976 - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1); 1977 - #ifdef __BIG_ENDIAN 1978 - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 1979 - ENDIAN_8IN16); 1980 - #endif 1981 - break; 1982 - case DRM_FORMAT_XRGB8888: 1983 - case DRM_FORMAT_ARGB8888: 1984 - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); 1985 - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); 1986 - #ifdef __BIG_ENDIAN 1987 - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 1988 - ENDIAN_8IN32); 1989 - #endif 1990 - break; 1991 - case DRM_FORMAT_XRGB2101010: 1992 - case DRM_FORMAT_ARGB2101010: 1993 - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); 1994 - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 1); 1995 - #ifdef __BIG_ENDIAN 1996 - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 1997 - ENDIAN_8IN32); 1998 - #endif 1999 - /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ 2000 - bypass_lut = true; 2001 - break; 2002 - case DRM_FORMAT_BGRX1010102: 2003 - case DRM_FORMAT_BGRA1010102: 2004 - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); 2005 - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 4); 2006 - #ifdef __BIG_ENDIAN 2007 - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 2008 - ENDIAN_8IN32); 2009 - #endif 2010 - /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */ 2011 - bypass_lut = true; 2012 - break; 2013 - case DRM_FORMAT_XBGR8888: 2014 - case DRM_FORMAT_ABGR8888: 2015 - fb_format = REG_SET_FIELD(0, GRPH_CONTROL, GRPH_DEPTH, 2); 2016 - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_FORMAT, 0); 2017 - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_RED_CROSSBAR, 2); 2018 - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_BLUE_CROSSBAR, 2); 2019 - #ifdef __BIG_ENDIAN 2020 - fb_swap = REG_SET_FIELD(fb_swap, GRPH_SWAP_CNTL, GRPH_ENDIAN_SWAP, 2021 - ENDIAN_8IN32); 2022 - #endif 2023 - break; 2024 - default: 2025 - DRM_ERROR("Unsupported screen format %p4cc\n", 2026 - &target_fb->format->format); 2027 - return -EINVAL; 2028 - } 2029 - 2030 - if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_2D_TILED_THIN1) { 2031 - unsigned bankw, bankh, mtaspect, tile_split, num_banks; 2032 - 2033 - bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); 2034 - bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); 2035 - mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); 2036 - tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); 2037 - num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); 2038 - 2039 - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_NUM_BANKS, num_banks); 2040 - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE, 2041 - ARRAY_2D_TILED_THIN1); 2042 - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_TILE_SPLIT, 2043 - tile_split); 2044 - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_WIDTH, bankw); 2045 - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_BANK_HEIGHT, bankh); 2046 - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MACRO_TILE_ASPECT, 2047 - mtaspect); 2048 - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_MICRO_TILE_MODE, 2049 - ADDR_SURF_MICRO_TILING_DISPLAY); 2050 - } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == ARRAY_1D_TILED_THIN1) { 2051 - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_ARRAY_MODE, 2052 - ARRAY_1D_TILED_THIN1); 2053 - } 2054 - 2055 - fb_format = REG_SET_FIELD(fb_format, GRPH_CONTROL, GRPH_PIPE_CONFIG, 2056 - pipe_config); 2057 - 2058 - dce_v11_0_vga_enable(crtc, false); 2059 - 2060 - /* Make sure surface address is updated at vertical blank rather than 2061 - * horizontal blank 2062 - */ 2063 - tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); 2064 - tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL, 2065 - GRPH_SURFACE_UPDATE_H_RETRACE_EN, 0); 2066 - WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2067 - 2068 - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 2069 - upper_32_bits(fb_location)); 2070 - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 2071 - upper_32_bits(fb_location)); 2072 - WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 2073 - (u32)fb_location & GRPH_PRIMARY_SURFACE_ADDRESS__GRPH_PRIMARY_SURFACE_ADDRESS_MASK); 2074 - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 2075 - (u32) fb_location & GRPH_SECONDARY_SURFACE_ADDRESS__GRPH_SECONDARY_SURFACE_ADDRESS_MASK); 2076 - WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); 2077 - WREG32(mmGRPH_SWAP_CNTL + amdgpu_crtc->crtc_offset, fb_swap); 2078 - 2079 - /* 2080 - * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT 2081 - * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to 2082 - * retain the full precision throughout the pipeline. 2083 - */ 2084 - tmp = RREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset); 2085 - if (bypass_lut) 2086 - tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 1); 2087 - else 2088 - tmp = REG_SET_FIELD(tmp, GRPH_LUT_10BIT_BYPASS, GRPH_LUT_10BIT_BYPASS_EN, 0); 2089 - WREG32(mmGRPH_LUT_10BIT_BYPASS + amdgpu_crtc->crtc_offset, tmp); 2090 - 2091 - if (bypass_lut) 2092 - DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n"); 2093 - 2094 - WREG32(mmGRPH_SURFACE_OFFSET_X + amdgpu_crtc->crtc_offset, 0); 2095 - WREG32(mmGRPH_SURFACE_OFFSET_Y + amdgpu_crtc->crtc_offset, 0); 2096 - WREG32(mmGRPH_X_START + amdgpu_crtc->crtc_offset, 0); 2097 - WREG32(mmGRPH_Y_START + amdgpu_crtc->crtc_offset, 0); 2098 - WREG32(mmGRPH_X_END + amdgpu_crtc->crtc_offset, target_fb->width); 2099 - WREG32(mmGRPH_Y_END + amdgpu_crtc->crtc_offset, target_fb->height); 2100 - 2101 - fb_pitch_pixels = target_fb->pitches[0] / target_fb->format->cpp[0]; 2102 - WREG32(mmGRPH_PITCH + amdgpu_crtc->crtc_offset, fb_pitch_pixels); 2103 - 2104 - dce_v11_0_grph_enable(crtc, true); 2105 - 2106 - WREG32(mmLB_DESKTOP_HEIGHT + amdgpu_crtc->crtc_offset, 2107 - target_fb->height); 2108 - 2109 - x &= ~3; 2110 - y &= ~1; 2111 - WREG32(mmVIEWPORT_START + amdgpu_crtc->crtc_offset, 2112 - (x << 16) | y); 2113 - viewport_w = crtc->mode.hdisplay; 2114 - viewport_h = (crtc->mode.vdisplay + 1) & ~1; 2115 - WREG32(mmVIEWPORT_SIZE + amdgpu_crtc->crtc_offset, 2116 - (viewport_w << 16) | viewport_h); 2117 - 2118 - /* set pageflip to happen anywhere in vblank interval */ 2119 - WREG32(mmCRTC_MASTER_UPDATE_MODE + amdgpu_crtc->crtc_offset, 0); 2120 - 2121 - if (!atomic && fb && fb != crtc->primary->fb) { 2122 - abo = gem_to_amdgpu_bo(fb->obj[0]); 2123 - r = amdgpu_bo_reserve(abo, true); 2124 - if (unlikely(r != 0)) 2125 - return r; 2126 - amdgpu_bo_unpin(abo); 2127 - amdgpu_bo_unreserve(abo); 2128 - } 2129 - 2130 - /* Bytes per pixel may have changed */ 2131 - dce_v11_0_bandwidth_update(adev); 2132 - 2133 - return 0; 2134 - } 2135 - 2136 - static void dce_v11_0_set_interleave(struct drm_crtc *crtc, 2137 - struct drm_display_mode *mode) 2138 - { 2139 - struct drm_device *dev = crtc->dev; 2140 - struct amdgpu_device *adev = drm_to_adev(dev); 2141 - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2142 - u32 tmp; 2143 - 2144 - tmp = RREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset); 2145 - if (mode->flags & DRM_MODE_FLAG_INTERLACE) 2146 - tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 1); 2147 - else 2148 - tmp = REG_SET_FIELD(tmp, LB_DATA_FORMAT, INTERLEAVE_EN, 0); 2149 - WREG32(mmLB_DATA_FORMAT + amdgpu_crtc->crtc_offset, tmp); 2150 - } 2151 - 2152 - static void dce_v11_0_crtc_load_lut(struct drm_crtc *crtc) 2153 - { 2154 - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2155 - struct drm_device *dev = crtc->dev; 2156 - struct amdgpu_device *adev = drm_to_adev(dev); 2157 - u16 *r, *g, *b; 2158 - int i; 2159 - u32 tmp; 2160 - 2161 - DRM_DEBUG_KMS("%d\n", amdgpu_crtc->crtc_id); 2162 - 2163 - tmp = RREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset); 2164 - tmp = REG_SET_FIELD(tmp, INPUT_CSC_CONTROL, INPUT_CSC_GRPH_MODE, 0); 2165 - WREG32(mmINPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2166 - 2167 - tmp = RREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset); 2168 - tmp = REG_SET_FIELD(tmp, PRESCALE_GRPH_CONTROL, GRPH_PRESCALE_BYPASS, 1); 2169 - WREG32(mmPRESCALE_GRPH_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2170 - 2171 - tmp = RREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset); 2172 - tmp = REG_SET_FIELD(tmp, INPUT_GAMMA_CONTROL, GRPH_INPUT_GAMMA_MODE, 0); 2173 - WREG32(mmINPUT_GAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2174 - 2175 - WREG32(mmDC_LUT_CONTROL + amdgpu_crtc->crtc_offset, 0); 2176 - 2177 - WREG32(mmDC_LUT_BLACK_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0); 2178 - WREG32(mmDC_LUT_BLACK_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0); 2179 - WREG32(mmDC_LUT_BLACK_OFFSET_RED + amdgpu_crtc->crtc_offset, 0); 2180 - 2181 - WREG32(mmDC_LUT_WHITE_OFFSET_BLUE + amdgpu_crtc->crtc_offset, 0xffff); 2182 - WREG32(mmDC_LUT_WHITE_OFFSET_GREEN + amdgpu_crtc->crtc_offset, 0xffff); 2183 - WREG32(mmDC_LUT_WHITE_OFFSET_RED + amdgpu_crtc->crtc_offset, 0xffff); 2184 - 2185 - WREG32(mmDC_LUT_RW_MODE + amdgpu_crtc->crtc_offset, 0); 2186 - WREG32(mmDC_LUT_WRITE_EN_MASK + amdgpu_crtc->crtc_offset, 0x00000007); 2187 - 2188 - WREG32(mmDC_LUT_RW_INDEX + amdgpu_crtc->crtc_offset, 0); 2189 - r = crtc->gamma_store; 2190 - g = r + crtc->gamma_size; 2191 - b = g + crtc->gamma_size; 2192 - for (i = 0; i < 256; i++) { 2193 - WREG32(mmDC_LUT_30_COLOR + amdgpu_crtc->crtc_offset, 2194 - ((*r++ & 0xffc0) << 14) | 2195 - ((*g++ & 0xffc0) << 4) | 2196 - (*b++ >> 6)); 2197 - } 2198 - 2199 - tmp = RREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset); 2200 - tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, GRPH_DEGAMMA_MODE, 0); 2201 - tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR_DEGAMMA_MODE, 0); 2202 - tmp = REG_SET_FIELD(tmp, DEGAMMA_CONTROL, CURSOR2_DEGAMMA_MODE, 0); 2203 - WREG32(mmDEGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2204 - 2205 - tmp = RREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset); 2206 - tmp = REG_SET_FIELD(tmp, GAMUT_REMAP_CONTROL, GRPH_GAMUT_REMAP_MODE, 0); 2207 - WREG32(mmGAMUT_REMAP_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2208 - 2209 - tmp = RREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset); 2210 - tmp = REG_SET_FIELD(tmp, REGAMMA_CONTROL, GRPH_REGAMMA_MODE, 0); 2211 - WREG32(mmREGAMMA_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2212 - 2213 - tmp = RREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset); 2214 - tmp = REG_SET_FIELD(tmp, OUTPUT_CSC_CONTROL, OUTPUT_CSC_GRPH_MODE, 0); 2215 - WREG32(mmOUTPUT_CSC_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2216 - 2217 - /* XXX match this to the depth of the crtc fmt block, move to modeset? */ 2218 - WREG32(mmDENORM_CONTROL + amdgpu_crtc->crtc_offset, 0); 2219 - /* XXX this only needs to be programmed once per crtc at startup, 2220 - * not sure where the best place for it is 2221 - */ 2222 - tmp = RREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset); 2223 - tmp = REG_SET_FIELD(tmp, ALPHA_CONTROL, CURSOR_ALPHA_BLND_ENA, 1); 2224 - WREG32(mmALPHA_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2225 - } 2226 - 2227 - static int dce_v11_0_pick_dig_encoder(struct drm_encoder *encoder) 2228 - { 2229 - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 2230 - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 2231 - 2232 - switch (amdgpu_encoder->encoder_id) { 2233 - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 2234 - if (dig->linkb) 2235 - return 1; 2236 - else 2237 - return 0; 2238 - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 2239 - if (dig->linkb) 2240 - return 3; 2241 - else 2242 - return 2; 2243 - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 2244 - if (dig->linkb) 2245 - return 5; 2246 - else 2247 - return 4; 2248 - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 2249 - return 6; 2250 - default: 2251 - DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); 2252 - return 0; 2253 - } 2254 - } 2255 - 2256 - /** 2257 - * dce_v11_0_pick_pll - Allocate a PPLL for use by the crtc. 2258 - * 2259 - * @crtc: drm crtc 2260 - * 2261 - * Returns the PPLL (Pixel PLL) to be used by the crtc. For DP monitors 2262 - * a single PPLL can be used for all DP crtcs/encoders. For non-DP 2263 - * monitors a dedicated PPLL must be used. If a particular board has 2264 - * an external DP PLL, return ATOM_PPLL_INVALID to skip PLL programming 2265 - * as there is no need to program the PLL itself. If we are not able to 2266 - * allocate a PLL, return ATOM_PPLL_INVALID to skip PLL programming to 2267 - * avoid messing up an existing monitor. 2268 - * 2269 - * Asic specific PLL information 2270 - * 2271 - * DCE 10.x 2272 - * Tonga 2273 - * - PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) 2274 - * CI 2275 - * - PPLL0, PPLL1, PPLL2 are available for all UNIPHY (both DP and non-DP) and DAC 2276 - * 2277 - */ 2278 - static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc) 2279 - { 2280 - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2281 - struct drm_device *dev = crtc->dev; 2282 - struct amdgpu_device *adev = drm_to_adev(dev); 2283 - u32 pll_in_use; 2284 - int pll; 2285 - 2286 - if ((adev->asic_type == CHIP_POLARIS10) || 2287 - (adev->asic_type == CHIP_POLARIS11) || 2288 - (adev->asic_type == CHIP_POLARIS12) || 2289 - (adev->asic_type == CHIP_VEGAM)) { 2290 - struct amdgpu_encoder *amdgpu_encoder = 2291 - to_amdgpu_encoder(amdgpu_crtc->encoder); 2292 - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 2293 - 2294 - if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) 2295 - return ATOM_DP_DTO; 2296 - 2297 - switch (amdgpu_encoder->encoder_id) { 2298 - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 2299 - if (dig->linkb) 2300 - return ATOM_COMBOPHY_PLL1; 2301 - else 2302 - return ATOM_COMBOPHY_PLL0; 2303 - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 2304 - if (dig->linkb) 2305 - return ATOM_COMBOPHY_PLL3; 2306 - else 2307 - return ATOM_COMBOPHY_PLL2; 2308 - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 2309 - if (dig->linkb) 2310 - return ATOM_COMBOPHY_PLL5; 2311 - else 2312 - return ATOM_COMBOPHY_PLL4; 2313 - default: 2314 - DRM_ERROR("invalid encoder_id: 0x%x\n", amdgpu_encoder->encoder_id); 2315 - return ATOM_PPLL_INVALID; 2316 - } 2317 - } 2318 - 2319 - if (ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) { 2320 - if (adev->clock.dp_extclk) 2321 - /* skip PPLL programming if using ext clock */ 2322 - return ATOM_PPLL_INVALID; 2323 - else { 2324 - /* use the same PPLL for all DP monitors */ 2325 - pll = amdgpu_pll_get_shared_dp_ppll(crtc); 2326 - if (pll != ATOM_PPLL_INVALID) 2327 - return pll; 2328 - } 2329 - } else { 2330 - /* use the same PPLL for all monitors with the same clock */ 2331 - pll = amdgpu_pll_get_shared_nondp_ppll(crtc); 2332 - if (pll != ATOM_PPLL_INVALID) 2333 - return pll; 2334 - } 2335 - 2336 - /* XXX need to determine what plls are available on each DCE11 part */ 2337 - pll_in_use = amdgpu_pll_get_use_mask(crtc); 2338 - if (adev->flags & AMD_IS_APU) { 2339 - if (!(pll_in_use & (1 << ATOM_PPLL1))) 2340 - return ATOM_PPLL1; 2341 - if (!(pll_in_use & (1 << ATOM_PPLL0))) 2342 - return ATOM_PPLL0; 2343 - DRM_ERROR("unable to allocate a PPLL\n"); 2344 - return ATOM_PPLL_INVALID; 2345 - } else { 2346 - if (!(pll_in_use & (1 << ATOM_PPLL2))) 2347 - return ATOM_PPLL2; 2348 - if (!(pll_in_use & (1 << ATOM_PPLL1))) 2349 - return ATOM_PPLL1; 2350 - if (!(pll_in_use & (1 << ATOM_PPLL0))) 2351 - return ATOM_PPLL0; 2352 - DRM_ERROR("unable to allocate a PPLL\n"); 2353 - return ATOM_PPLL_INVALID; 2354 - } 2355 - return ATOM_PPLL_INVALID; 2356 - } 2357 - 2358 - static void dce_v11_0_lock_cursor(struct drm_crtc *crtc, bool lock) 2359 - { 2360 - struct amdgpu_device *adev = drm_to_adev(crtc->dev); 2361 - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2362 - uint32_t cur_lock; 2363 - 2364 - cur_lock = RREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset); 2365 - if (lock) 2366 - cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 1); 2367 - else 2368 - cur_lock = REG_SET_FIELD(cur_lock, CUR_UPDATE, CURSOR_UPDATE_LOCK, 0); 2369 - WREG32(mmCUR_UPDATE + amdgpu_crtc->crtc_offset, cur_lock); 2370 - } 2371 - 2372 - static void dce_v11_0_hide_cursor(struct drm_crtc *crtc) 2373 - { 2374 - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2375 - struct amdgpu_device *adev = drm_to_adev(crtc->dev); 2376 - u32 tmp; 2377 - 2378 - tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); 2379 - tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 0); 2380 - WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2381 - } 2382 - 2383 - static void dce_v11_0_show_cursor(struct drm_crtc *crtc) 2384 - { 2385 - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2386 - struct amdgpu_device *adev = drm_to_adev(crtc->dev); 2387 - u32 tmp; 2388 - 2389 - WREG32(mmCUR_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, 2390 - upper_32_bits(amdgpu_crtc->cursor_addr)); 2391 - WREG32(mmCUR_SURFACE_ADDRESS + amdgpu_crtc->crtc_offset, 2392 - lower_32_bits(amdgpu_crtc->cursor_addr)); 2393 - 2394 - tmp = RREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset); 2395 - tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_EN, 1); 2396 - tmp = REG_SET_FIELD(tmp, CUR_CONTROL, CURSOR_MODE, 2); 2397 - WREG32(mmCUR_CONTROL + amdgpu_crtc->crtc_offset, tmp); 2398 - } 2399 - 2400 - static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc, 2401 - int x, int y) 2402 - { 2403 - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2404 - struct amdgpu_device *adev = drm_to_adev(crtc->dev); 2405 - int xorigin = 0, yorigin = 0; 2406 - 2407 - amdgpu_crtc->cursor_x = x; 2408 - amdgpu_crtc->cursor_y = y; 2409 - 2410 - /* avivo cursor are offset into the total surface */ 2411 - x += crtc->x; 2412 - y += crtc->y; 2413 - DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); 2414 - 2415 - if (x < 0) { 2416 - xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); 2417 - x = 0; 2418 - } 2419 - if (y < 0) { 2420 - yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); 2421 - y = 0; 2422 - } 2423 - 2424 - WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y); 2425 - WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin); 2426 - WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset, 2427 - ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1)); 2428 - 2429 - return 0; 2430 - } 2431 - 2432 - static int dce_v11_0_crtc_cursor_move(struct drm_crtc *crtc, 2433 - int x, int y) 2434 - { 2435 - int ret; 2436 - 2437 - dce_v11_0_lock_cursor(crtc, true); 2438 - ret = dce_v11_0_cursor_move_locked(crtc, x, y); 2439 - dce_v11_0_lock_cursor(crtc, false); 2440 - 2441 - return ret; 2442 - } 2443 - 2444 - static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc, 2445 - struct drm_file *file_priv, 2446 - uint32_t handle, 2447 - uint32_t width, 2448 - uint32_t height, 2449 - int32_t hot_x, 2450 - int32_t hot_y) 2451 - { 2452 - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2453 - struct drm_gem_object *obj; 2454 - struct amdgpu_bo *aobj; 2455 - int ret; 2456 - 2457 - if (!handle) { 2458 - /* turn off cursor */ 2459 - dce_v11_0_hide_cursor(crtc); 2460 - obj = NULL; 2461 - goto unpin; 2462 - } 2463 - 2464 - if ((width > amdgpu_crtc->max_cursor_width) || 2465 - (height > amdgpu_crtc->max_cursor_height)) { 2466 - DRM_ERROR("bad cursor width or height %d x %d\n", width, height); 2467 - return -EINVAL; 2468 - } 2469 - 2470 - obj = drm_gem_object_lookup(file_priv, handle); 2471 - if (!obj) { 2472 - DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, amdgpu_crtc->crtc_id); 2473 - return -ENOENT; 2474 - } 2475 - 2476 - aobj = gem_to_amdgpu_bo(obj); 2477 - ret = amdgpu_bo_reserve(aobj, false); 2478 - if (ret != 0) { 2479 - drm_gem_object_put(obj); 2480 - return ret; 2481 - } 2482 - 2483 - aobj->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; 2484 - ret = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM); 2485 - amdgpu_bo_unreserve(aobj); 2486 - if (ret) { 2487 - DRM_ERROR("Failed to pin new cursor BO (%d)\n", ret); 2488 - drm_gem_object_put(obj); 2489 - return ret; 2490 - } 2491 - amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj); 2492 - 2493 - dce_v11_0_lock_cursor(crtc, true); 2494 - 2495 - if (width != amdgpu_crtc->cursor_width || 2496 - height != amdgpu_crtc->cursor_height || 2497 - hot_x != amdgpu_crtc->cursor_hot_x || 2498 - hot_y != amdgpu_crtc->cursor_hot_y) { 2499 - int x, y; 2500 - 2501 - x = amdgpu_crtc->cursor_x + amdgpu_crtc->cursor_hot_x - hot_x; 2502 - y = amdgpu_crtc->cursor_y + amdgpu_crtc->cursor_hot_y - hot_y; 2503 - 2504 - dce_v11_0_cursor_move_locked(crtc, x, y); 2505 - 2506 - amdgpu_crtc->cursor_width = width; 2507 - amdgpu_crtc->cursor_height = height; 2508 - amdgpu_crtc->cursor_hot_x = hot_x; 2509 - amdgpu_crtc->cursor_hot_y = hot_y; 2510 - } 2511 - 2512 - dce_v11_0_show_cursor(crtc); 2513 - dce_v11_0_lock_cursor(crtc, false); 2514 - 2515 - unpin: 2516 - if (amdgpu_crtc->cursor_bo) { 2517 - struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo); 2518 - ret = amdgpu_bo_reserve(aobj, true); 2519 - if (likely(ret == 0)) { 2520 - amdgpu_bo_unpin(aobj); 2521 - amdgpu_bo_unreserve(aobj); 2522 - } 2523 - drm_gem_object_put(amdgpu_crtc->cursor_bo); 2524 - } 2525 - 2526 - amdgpu_crtc->cursor_bo = obj; 2527 - return 0; 2528 - } 2529 - 2530 - static void dce_v11_0_cursor_reset(struct drm_crtc *crtc) 2531 - { 2532 - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2533 - 2534 - if (amdgpu_crtc->cursor_bo) { 2535 - dce_v11_0_lock_cursor(crtc, true); 2536 - 2537 - dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x, 2538 - amdgpu_crtc->cursor_y); 2539 - 2540 - dce_v11_0_show_cursor(crtc); 2541 - 2542 - dce_v11_0_lock_cursor(crtc, false); 2543 - } 2544 - } 2545 - 2546 - static int dce_v11_0_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, 2547 - u16 *blue, uint32_t size, 2548 - struct drm_modeset_acquire_ctx *ctx) 2549 - { 2550 - dce_v11_0_crtc_load_lut(crtc); 2551 - 2552 - return 0; 2553 - } 2554 - 2555 - static void dce_v11_0_crtc_destroy(struct drm_crtc *crtc) 2556 - { 2557 - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2558 - 2559 - drm_crtc_cleanup(crtc); 2560 - kfree(amdgpu_crtc); 2561 - } 2562 - 2563 - static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = { 2564 - .cursor_set2 = dce_v11_0_crtc_cursor_set2, 2565 - .cursor_move = dce_v11_0_crtc_cursor_move, 2566 - .gamma_set = dce_v11_0_crtc_gamma_set, 2567 - .set_config = amdgpu_display_crtc_set_config, 2568 - .destroy = dce_v11_0_crtc_destroy, 2569 - .page_flip_target = amdgpu_display_crtc_page_flip_target, 2570 - .get_vblank_counter = amdgpu_get_vblank_counter_kms, 2571 - .enable_vblank = amdgpu_enable_vblank_kms, 2572 - .disable_vblank = amdgpu_disable_vblank_kms, 2573 - .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, 2574 - }; 2575 - 2576 - static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) 2577 - { 2578 - struct drm_device *dev = crtc->dev; 2579 - struct amdgpu_device *adev = drm_to_adev(dev); 2580 - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2581 - unsigned type; 2582 - 2583 - switch (mode) { 2584 - case DRM_MODE_DPMS_ON: 2585 - amdgpu_crtc->enabled = true; 2586 - amdgpu_atombios_crtc_enable(crtc, ATOM_ENABLE); 2587 - dce_v11_0_vga_enable(crtc, true); 2588 - amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2589 - dce_v11_0_vga_enable(crtc, false); 2590 - /* Make sure VBLANK and PFLIP interrupts are still enabled */ 2591 - type = amdgpu_display_crtc_idx_to_irq_type(adev, 2592 - amdgpu_crtc->crtc_id); 2593 - amdgpu_irq_update(adev, &adev->crtc_irq, type); 2594 - amdgpu_irq_update(adev, &adev->pageflip_irq, type); 2595 - drm_crtc_vblank_on(crtc); 2596 - dce_v11_0_crtc_load_lut(crtc); 2597 - break; 2598 - case DRM_MODE_DPMS_STANDBY: 2599 - case DRM_MODE_DPMS_SUSPEND: 2600 - case DRM_MODE_DPMS_OFF: 2601 - drm_crtc_vblank_off(crtc); 2602 - if (amdgpu_crtc->enabled) { 2603 - dce_v11_0_vga_enable(crtc, true); 2604 - amdgpu_atombios_crtc_blank(crtc, ATOM_ENABLE); 2605 - dce_v11_0_vga_enable(crtc, false); 2606 - } 2607 - amdgpu_atombios_crtc_enable(crtc, ATOM_DISABLE); 2608 - amdgpu_crtc->enabled = false; 2609 - break; 2610 - } 2611 - /* adjust pm to dpms */ 2612 - amdgpu_dpm_compute_clocks(adev); 2613 - } 2614 - 2615 - static void dce_v11_0_crtc_prepare(struct drm_crtc *crtc) 2616 - { 2617 - /* disable crtc pair power gating before programming */ 2618 - amdgpu_atombios_crtc_powergate(crtc, ATOM_DISABLE); 2619 - amdgpu_atombios_crtc_lock(crtc, ATOM_ENABLE); 2620 - dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 2621 - } 2622 - 2623 - static void dce_v11_0_crtc_commit(struct drm_crtc *crtc) 2624 - { 2625 - dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 2626 - amdgpu_atombios_crtc_lock(crtc, ATOM_DISABLE); 2627 - } 2628 - 2629 - static void dce_v11_0_crtc_disable(struct drm_crtc *crtc) 2630 - { 2631 - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2632 - struct drm_device *dev = crtc->dev; 2633 - struct amdgpu_device *adev = drm_to_adev(dev); 2634 - struct amdgpu_atom_ss ss; 2635 - int i; 2636 - 2637 - dce_v11_0_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); 2638 - if (crtc->primary->fb) { 2639 - int r; 2640 - struct amdgpu_bo *abo; 2641 - 2642 - abo = gem_to_amdgpu_bo(crtc->primary->fb->obj[0]); 2643 - r = amdgpu_bo_reserve(abo, true); 2644 - if (unlikely(r)) 2645 - DRM_ERROR("failed to reserve abo before unpin\n"); 2646 - else { 2647 - amdgpu_bo_unpin(abo); 2648 - amdgpu_bo_unreserve(abo); 2649 - } 2650 - } 2651 - /* disable the GRPH */ 2652 - dce_v11_0_grph_enable(crtc, false); 2653 - 2654 - amdgpu_atombios_crtc_powergate(crtc, ATOM_ENABLE); 2655 - 2656 - for (i = 0; i < adev->mode_info.num_crtc; i++) { 2657 - if (adev->mode_info.crtcs[i] && 2658 - adev->mode_info.crtcs[i]->enabled && 2659 - i != amdgpu_crtc->crtc_id && 2660 - amdgpu_crtc->pll_id == adev->mode_info.crtcs[i]->pll_id) { 2661 - /* one other crtc is using this pll don't turn 2662 - * off the pll 2663 - */ 2664 - goto done; 2665 - } 2666 - } 2667 - 2668 - switch (amdgpu_crtc->pll_id) { 2669 - case ATOM_PPLL0: 2670 - case ATOM_PPLL1: 2671 - case ATOM_PPLL2: 2672 - /* disable the ppll */ 2673 - amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, amdgpu_crtc->pll_id, 2674 - 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); 2675 - break; 2676 - case ATOM_COMBOPHY_PLL0: 2677 - case ATOM_COMBOPHY_PLL1: 2678 - case ATOM_COMBOPHY_PLL2: 2679 - case ATOM_COMBOPHY_PLL3: 2680 - case ATOM_COMBOPHY_PLL4: 2681 - case ATOM_COMBOPHY_PLL5: 2682 - /* disable the ppll */ 2683 - amdgpu_atombios_crtc_program_pll(crtc, ATOM_CRTC_INVALID, amdgpu_crtc->pll_id, 2684 - 0, 0, ATOM_DISABLE, 0, 0, 0, 0, 0, false, &ss); 2685 - break; 2686 - default: 2687 - break; 2688 - } 2689 - done: 2690 - amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 2691 - amdgpu_crtc->adjusted_clock = 0; 2692 - amdgpu_crtc->encoder = NULL; 2693 - amdgpu_crtc->connector = NULL; 2694 - } 2695 - 2696 - static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc, 2697 - struct drm_display_mode *mode, 2698 - struct drm_display_mode *adjusted_mode, 2699 - int x, int y, struct drm_framebuffer *old_fb) 2700 - { 2701 - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2702 - struct drm_device *dev = crtc->dev; 2703 - struct amdgpu_device *adev = drm_to_adev(dev); 2704 - 2705 - if (!amdgpu_crtc->adjusted_clock) 2706 - return -EINVAL; 2707 - 2708 - if ((adev->asic_type == CHIP_POLARIS10) || 2709 - (adev->asic_type == CHIP_POLARIS11) || 2710 - (adev->asic_type == CHIP_POLARIS12) || 2711 - (adev->asic_type == CHIP_VEGAM)) { 2712 - struct amdgpu_encoder *amdgpu_encoder = 2713 - to_amdgpu_encoder(amdgpu_crtc->encoder); 2714 - int encoder_mode = 2715 - amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder); 2716 - 2717 - /* SetPixelClock calculates the plls and ss values now */ 2718 - amdgpu_atombios_crtc_program_pll(crtc, amdgpu_crtc->crtc_id, 2719 - amdgpu_crtc->pll_id, 2720 - encoder_mode, amdgpu_encoder->encoder_id, 2721 - adjusted_mode->clock, 0, 0, 0, 0, 2722 - amdgpu_crtc->bpc, amdgpu_crtc->ss_enabled, &amdgpu_crtc->ss); 2723 - } else { 2724 - amdgpu_atombios_crtc_set_pll(crtc, adjusted_mode); 2725 - } 2726 - amdgpu_atombios_crtc_set_dtd_timing(crtc, adjusted_mode); 2727 - dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0); 2728 - amdgpu_atombios_crtc_overscan_setup(crtc, mode, adjusted_mode); 2729 - amdgpu_atombios_crtc_scaler_setup(crtc); 2730 - dce_v11_0_cursor_reset(crtc); 2731 - /* update the hw version fpr dpm */ 2732 - amdgpu_crtc->hw_mode = *adjusted_mode; 2733 - 2734 - return 0; 2735 - } 2736 - 2737 - static bool dce_v11_0_crtc_mode_fixup(struct drm_crtc *crtc, 2738 - const struct drm_display_mode *mode, 2739 - struct drm_display_mode *adjusted_mode) 2740 - { 2741 - struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2742 - struct drm_device *dev = crtc->dev; 2743 - struct drm_encoder *encoder; 2744 - 2745 - /* assign the encoder to the amdgpu crtc to avoid repeated lookups later */ 2746 - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 2747 - if (encoder->crtc == crtc) { 2748 - amdgpu_crtc->encoder = encoder; 2749 - amdgpu_crtc->connector = amdgpu_get_connector_for_encoder(encoder); 2750 - break; 2751 - } 2752 - } 2753 - if ((amdgpu_crtc->encoder == NULL) || (amdgpu_crtc->connector == NULL)) { 2754 - amdgpu_crtc->encoder = NULL; 2755 - amdgpu_crtc->connector = NULL; 2756 - return false; 2757 - } 2758 - if (!amdgpu_display_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 2759 - return false; 2760 - if (amdgpu_atombios_crtc_prepare_pll(crtc, adjusted_mode)) 2761 - return false; 2762 - /* pick pll */ 2763 - amdgpu_crtc->pll_id = dce_v11_0_pick_pll(crtc); 2764 - /* if we can't get a PPLL for a non-DP encoder, fail */ 2765 - if ((amdgpu_crtc->pll_id == ATOM_PPLL_INVALID) && 2766 - !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(amdgpu_crtc->encoder))) 2767 - return false; 2768 - 2769 - return true; 2770 - } 2771 - 2772 - static int dce_v11_0_crtc_set_base(struct drm_crtc *crtc, int x, int y, 2773 - struct drm_framebuffer *old_fb) 2774 - { 2775 - return dce_v11_0_crtc_do_set_base(crtc, old_fb, x, y, 0); 2776 - } 2777 - 2778 - static int dce_v11_0_crtc_set_base_atomic(struct drm_crtc *crtc, 2779 - struct drm_framebuffer *fb, 2780 - int x, int y, enum mode_set_atomic state) 2781 - { 2782 - return dce_v11_0_crtc_do_set_base(crtc, fb, x, y, 1); 2783 - } 2784 - 2785 - static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = { 2786 - .dpms = dce_v11_0_crtc_dpms, 2787 - .mode_fixup = dce_v11_0_crtc_mode_fixup, 2788 - .mode_set = dce_v11_0_crtc_mode_set, 2789 - .mode_set_base = dce_v11_0_crtc_set_base, 2790 - .mode_set_base_atomic = dce_v11_0_crtc_set_base_atomic, 2791 - .prepare = dce_v11_0_crtc_prepare, 2792 - .commit = dce_v11_0_crtc_commit, 2793 - .disable = dce_v11_0_crtc_disable, 2794 - .get_scanout_position = amdgpu_crtc_get_scanout_position, 2795 - }; 2796 - 2797 - static void dce_v11_0_panic_flush(struct drm_plane *plane) 2798 - { 2799 - struct drm_framebuffer *fb; 2800 - struct amdgpu_crtc *amdgpu_crtc; 2801 - struct amdgpu_device *adev; 2802 - uint32_t fb_format; 2803 - 2804 - if (!plane->fb) 2805 - return; 2806 - 2807 - fb = plane->fb; 2808 - amdgpu_crtc = to_amdgpu_crtc(plane->crtc); 2809 - adev = drm_to_adev(fb->dev); 2810 - 2811 - /* Disable DC tiling */ 2812 - fb_format = RREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset); 2813 - fb_format &= ~GRPH_CONTROL__GRPH_ARRAY_MODE_MASK; 2814 - WREG32(mmGRPH_CONTROL + amdgpu_crtc->crtc_offset, fb_format); 2815 - 2816 - } 2817 - 2818 - static const struct drm_plane_helper_funcs dce_v11_0_drm_primary_plane_helper_funcs = { 2819 - .get_scanout_buffer = amdgpu_display_get_scanout_buffer, 2820 - .panic_flush = dce_v11_0_panic_flush, 2821 - }; 2822 - 2823 - static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index) 2824 - { 2825 - struct amdgpu_crtc *amdgpu_crtc; 2826 - 2827 - amdgpu_crtc = kzalloc(sizeof(struct amdgpu_crtc) + 2828 - (AMDGPUFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); 2829 - if (amdgpu_crtc == NULL) 2830 - return -ENOMEM; 2831 - 2832 - drm_crtc_init(adev_to_drm(adev), &amdgpu_crtc->base, &dce_v11_0_crtc_funcs); 2833 - 2834 - drm_mode_crtc_set_gamma_size(&amdgpu_crtc->base, 256); 2835 - amdgpu_crtc->crtc_id = index; 2836 - adev->mode_info.crtcs[index] = amdgpu_crtc; 2837 - 2838 - amdgpu_crtc->max_cursor_width = 128; 2839 - amdgpu_crtc->max_cursor_height = 128; 2840 - adev_to_drm(adev)->mode_config.cursor_width = amdgpu_crtc->max_cursor_width; 2841 - adev_to_drm(adev)->mode_config.cursor_height = amdgpu_crtc->max_cursor_height; 2842 - 2843 - switch (amdgpu_crtc->crtc_id) { 2844 - case 0: 2845 - default: 2846 - amdgpu_crtc->crtc_offset = CRTC0_REGISTER_OFFSET; 2847 - break; 2848 - case 1: 2849 - amdgpu_crtc->crtc_offset = CRTC1_REGISTER_OFFSET; 2850 - break; 2851 - case 2: 2852 - amdgpu_crtc->crtc_offset = CRTC2_REGISTER_OFFSET; 2853 - break; 2854 - case 3: 2855 - amdgpu_crtc->crtc_offset = CRTC3_REGISTER_OFFSET; 2856 - break; 2857 - case 4: 2858 - amdgpu_crtc->crtc_offset = CRTC4_REGISTER_OFFSET; 2859 - break; 2860 - case 5: 2861 - amdgpu_crtc->crtc_offset = CRTC5_REGISTER_OFFSET; 2862 - break; 2863 - } 2864 - 2865 - amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; 2866 - amdgpu_crtc->adjusted_clock = 0; 2867 - amdgpu_crtc->encoder = NULL; 2868 - amdgpu_crtc->connector = NULL; 2869 - drm_crtc_helper_add(&amdgpu_crtc->base, &dce_v11_0_crtc_helper_funcs); 2870 - drm_plane_helper_add(amdgpu_crtc->base.primary, &dce_v11_0_drm_primary_plane_helper_funcs); 2871 - 2872 - return 0; 2873 - } 2874 - 2875 - static int dce_v11_0_early_init(struct amdgpu_ip_block *ip_block) 2876 - { 2877 - struct amdgpu_device *adev = ip_block->adev; 2878 - 2879 - adev->audio_endpt_rreg = &dce_v11_0_audio_endpt_rreg; 2880 - adev->audio_endpt_wreg = &dce_v11_0_audio_endpt_wreg; 2881 - 2882 - dce_v11_0_set_display_funcs(adev); 2883 - 2884 - adev->mode_info.num_crtc = dce_v11_0_get_num_crtc(adev); 2885 - 2886 - switch (adev->asic_type) { 2887 - case CHIP_CARRIZO: 2888 - adev->mode_info.num_hpd = 6; 2889 - adev->mode_info.num_dig = 9; 2890 - break; 2891 - case CHIP_STONEY: 2892 - adev->mode_info.num_hpd = 6; 2893 - adev->mode_info.num_dig = 9; 2894 - break; 2895 - case CHIP_POLARIS10: 2896 - case CHIP_VEGAM: 2897 - adev->mode_info.num_hpd = 6; 2898 - adev->mode_info.num_dig = 6; 2899 - break; 2900 - case CHIP_POLARIS11: 2901 - case CHIP_POLARIS12: 2902 - adev->mode_info.num_hpd = 5; 2903 - adev->mode_info.num_dig = 5; 2904 - break; 2905 - default: 2906 - /* FIXME: not supported yet */ 2907 - return -EINVAL; 2908 - } 2909 - 2910 - dce_v11_0_set_irq_funcs(adev); 2911 - 2912 - return 0; 2913 - } 2914 - 2915 - static int dce_v11_0_sw_init(struct amdgpu_ip_block *ip_block) 2916 - { 2917 - int r, i; 2918 - struct amdgpu_device *adev = ip_block->adev; 2919 - 2920 - for (i = 0; i < adev->mode_info.num_crtc; i++) { 2921 - r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i + 1, &adev->crtc_irq); 2922 - if (r) 2923 - return r; 2924 - } 2925 - 2926 - for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; i < 20; i += 2) { 2927 - r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, i, &adev->pageflip_irq); 2928 - if (r) 2929 - return r; 2930 - } 2931 - 2932 - /* HPD hotplug */ 2933 - r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); 2934 - if (r) 2935 - return r; 2936 - 2937 - adev_to_drm(adev)->mode_config.funcs = &amdgpu_mode_funcs; 2938 - 2939 - adev_to_drm(adev)->mode_config.async_page_flip = true; 2940 - 2941 - adev_to_drm(adev)->mode_config.max_width = 16384; 2942 - adev_to_drm(adev)->mode_config.max_height = 16384; 2943 - 2944 - adev_to_drm(adev)->mode_config.preferred_depth = 24; 2945 - adev_to_drm(adev)->mode_config.prefer_shadow = 1; 2946 - 2947 - adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; 2948 - 2949 - r = amdgpu_display_modeset_create_props(adev); 2950 - if (r) 2951 - return r; 2952 - 2953 - adev_to_drm(adev)->mode_config.max_width = 16384; 2954 - adev_to_drm(adev)->mode_config.max_height = 16384; 2955 - 2956 - 2957 - /* allocate crtcs */ 2958 - for (i = 0; i < adev->mode_info.num_crtc; i++) { 2959 - r = dce_v11_0_crtc_init(adev, i); 2960 - if (r) 2961 - return r; 2962 - } 2963 - 2964 - if (amdgpu_atombios_get_connector_info_from_object_table(adev)) 2965 - amdgpu_display_print_display_setup(adev_to_drm(adev)); 2966 - else 2967 - return -EINVAL; 2968 - 2969 - /* setup afmt */ 2970 - r = dce_v11_0_afmt_init(adev); 2971 - if (r) 2972 - return r; 2973 - 2974 - r = dce_v11_0_audio_init(adev); 2975 - if (r) 2976 - return r; 2977 - 2978 - /* Disable vblank IRQs aggressively for power-saving */ 2979 - /* XXX: can this be enabled for DC? */ 2980 - adev_to_drm(adev)->vblank_disable_immediate = true; 2981 - 2982 - r = drm_vblank_init(adev_to_drm(adev), adev->mode_info.num_crtc); 2983 - if (r) 2984 - return r; 2985 - 2986 - INIT_DELAYED_WORK(&adev->hotplug_work, 2987 - amdgpu_display_hotplug_work_func); 2988 - 2989 - drm_kms_helper_poll_init(adev_to_drm(adev)); 2990 - 2991 - adev->mode_info.mode_config_initialized = true; 2992 - return 0; 2993 - } 2994 - 2995 - static int dce_v11_0_sw_fini(struct amdgpu_ip_block *ip_block) 2996 - { 2997 - struct amdgpu_device *adev = ip_block->adev; 2998 - 2999 - drm_edid_free(adev->mode_info.bios_hardcoded_edid); 3000 - 3001 - drm_kms_helper_poll_fini(adev_to_drm(adev)); 3002 - 3003 - dce_v11_0_audio_fini(adev); 3004 - 3005 - dce_v11_0_afmt_fini(adev); 3006 - 3007 - drm_mode_config_cleanup(adev_to_drm(adev)); 3008 - adev->mode_info.mode_config_initialized = false; 3009 - 3010 - return 0; 3011 - } 3012 - 3013 - static int dce_v11_0_hw_init(struct amdgpu_ip_block *ip_block) 3014 - { 3015 - int i; 3016 - struct amdgpu_device *adev = ip_block->adev; 3017 - 3018 - dce_v11_0_init_golden_registers(adev); 3019 - 3020 - /* disable vga render */ 3021 - dce_v11_0_set_vga_render_state(adev, false); 3022 - /* init dig PHYs, disp eng pll */ 3023 - amdgpu_atombios_crtc_powergate_init(adev); 3024 - amdgpu_atombios_encoder_init_dig(adev); 3025 - if ((adev->asic_type == CHIP_POLARIS10) || 3026 - (adev->asic_type == CHIP_POLARIS11) || 3027 - (adev->asic_type == CHIP_POLARIS12) || 3028 - (adev->asic_type == CHIP_VEGAM)) { 3029 - amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk, 3030 - DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS); 3031 - amdgpu_atombios_crtc_set_dce_clock(adev, 0, 3032 - DCE_CLOCK_TYPE_DPREFCLK, ATOM_GCK_DFS); 3033 - } else { 3034 - amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); 3035 - } 3036 - 3037 - /* initialize hpd */ 3038 - dce_v11_0_hpd_init(adev); 3039 - 3040 - for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 3041 - dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 3042 - } 3043 - 3044 - dce_v11_0_pageflip_interrupt_init(adev); 3045 - 3046 - return 0; 3047 - } 3048 - 3049 - static int dce_v11_0_hw_fini(struct amdgpu_ip_block *ip_block) 3050 - { 3051 - int i; 3052 - struct amdgpu_device *adev = ip_block->adev; 3053 - 3054 - dce_v11_0_hpd_fini(adev); 3055 - 3056 - for (i = 0; i < adev->mode_info.audio.num_pins; i++) { 3057 - dce_v11_0_audio_enable(adev, &adev->mode_info.audio.pin[i], false); 3058 - } 3059 - 3060 - dce_v11_0_pageflip_interrupt_fini(adev); 3061 - 3062 - flush_delayed_work(&adev->hotplug_work); 3063 - 3064 - return 0; 3065 - } 3066 - 3067 - static int dce_v11_0_suspend(struct amdgpu_ip_block *ip_block) 3068 - { 3069 - struct amdgpu_device *adev = ip_block->adev; 3070 - int r; 3071 - 3072 - r = amdgpu_display_suspend_helper(adev); 3073 - if (r) 3074 - return r; 3075 - 3076 - adev->mode_info.bl_level = 3077 - amdgpu_atombios_encoder_get_backlight_level_from_reg(adev); 3078 - 3079 - return dce_v11_0_hw_fini(ip_block); 3080 - } 3081 - 3082 - static int dce_v11_0_resume(struct amdgpu_ip_block *ip_block) 3083 - { 3084 - struct amdgpu_device *adev = ip_block->adev; 3085 - int ret; 3086 - 3087 - amdgpu_atombios_encoder_set_backlight_level_to_reg(adev, 3088 - adev->mode_info.bl_level); 3089 - 3090 - ret = dce_v11_0_hw_init(ip_block); 3091 - 3092 - /* turn on the BL */ 3093 - if (adev->mode_info.bl_encoder) { 3094 - u8 bl_level = amdgpu_display_backlight_get_level(adev, 3095 - adev->mode_info.bl_encoder); 3096 - amdgpu_display_backlight_set_level(adev, adev->mode_info.bl_encoder, 3097 - bl_level); 3098 - } 3099 - if (ret) 3100 - return ret; 3101 - 3102 - return amdgpu_display_resume_helper(adev); 3103 - } 3104 - 3105 - static bool dce_v11_0_is_idle(struct amdgpu_ip_block *ip_block) 3106 - { 3107 - return true; 3108 - } 3109 - 3110 - static int dce_v11_0_soft_reset(struct amdgpu_ip_block *ip_block) 3111 - { 3112 - u32 srbm_soft_reset = 0, tmp; 3113 - struct amdgpu_device *adev = ip_block->adev; 3114 - 3115 - if (dce_v11_0_is_display_hung(adev)) 3116 - srbm_soft_reset |= SRBM_SOFT_RESET__SOFT_RESET_DC_MASK; 3117 - 3118 - if (srbm_soft_reset) { 3119 - tmp = RREG32(mmSRBM_SOFT_RESET); 3120 - tmp |= srbm_soft_reset; 3121 - dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); 3122 - WREG32(mmSRBM_SOFT_RESET, tmp); 3123 - tmp = RREG32(mmSRBM_SOFT_RESET); 3124 - 3125 - udelay(50); 3126 - 3127 - tmp &= ~srbm_soft_reset; 3128 - WREG32(mmSRBM_SOFT_RESET, tmp); 3129 - tmp = RREG32(mmSRBM_SOFT_RESET); 3130 - 3131 - /* Wait a little for things to settle down */ 3132 - udelay(50); 3133 - } 3134 - return 0; 3135 - } 3136 - 3137 - static void dce_v11_0_set_crtc_vblank_interrupt_state(struct amdgpu_device *adev, 3138 - int crtc, 3139 - enum amdgpu_interrupt_state state) 3140 - { 3141 - u32 lb_interrupt_mask; 3142 - 3143 - if (crtc >= adev->mode_info.num_crtc) { 3144 - DRM_DEBUG("invalid crtc %d\n", crtc); 3145 - return; 3146 - } 3147 - 3148 - switch (state) { 3149 - case AMDGPU_IRQ_STATE_DISABLE: 3150 - lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); 3151 - lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, 3152 - VBLANK_INTERRUPT_MASK, 0); 3153 - WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); 3154 - break; 3155 - case AMDGPU_IRQ_STATE_ENABLE: 3156 - lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); 3157 - lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, 3158 - VBLANK_INTERRUPT_MASK, 1); 3159 - WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); 3160 - break; 3161 - default: 3162 - break; 3163 - } 3164 - } 3165 - 3166 - static void dce_v11_0_set_crtc_vline_interrupt_state(struct amdgpu_device *adev, 3167 - int crtc, 3168 - enum amdgpu_interrupt_state state) 3169 - { 3170 - u32 lb_interrupt_mask; 3171 - 3172 - if (crtc >= adev->mode_info.num_crtc) { 3173 - DRM_DEBUG("invalid crtc %d\n", crtc); 3174 - return; 3175 - } 3176 - 3177 - switch (state) { 3178 - case AMDGPU_IRQ_STATE_DISABLE: 3179 - lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); 3180 - lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, 3181 - VLINE_INTERRUPT_MASK, 0); 3182 - WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); 3183 - break; 3184 - case AMDGPU_IRQ_STATE_ENABLE: 3185 - lb_interrupt_mask = RREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc]); 3186 - lb_interrupt_mask = REG_SET_FIELD(lb_interrupt_mask, LB_INTERRUPT_MASK, 3187 - VLINE_INTERRUPT_MASK, 1); 3188 - WREG32(mmLB_INTERRUPT_MASK + crtc_offsets[crtc], lb_interrupt_mask); 3189 - break; 3190 - default: 3191 - break; 3192 - } 3193 - } 3194 - 3195 - static int dce_v11_0_set_hpd_irq_state(struct amdgpu_device *adev, 3196 - struct amdgpu_irq_src *source, 3197 - unsigned hpd, 3198 - enum amdgpu_interrupt_state state) 3199 - { 3200 - u32 tmp; 3201 - 3202 - if (hpd >= adev->mode_info.num_hpd) { 3203 - DRM_DEBUG("invalid hpd %d\n", hpd); 3204 - return 0; 3205 - } 3206 - 3207 - switch (state) { 3208 - case AMDGPU_IRQ_STATE_DISABLE: 3209 - tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); 3210 - tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 0); 3211 - WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); 3212 - break; 3213 - case AMDGPU_IRQ_STATE_ENABLE: 3214 - tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); 3215 - tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_EN, 1); 3216 - WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); 3217 - break; 3218 - default: 3219 - break; 3220 - } 3221 - 3222 - return 0; 3223 - } 3224 - 3225 - static int dce_v11_0_set_crtc_irq_state(struct amdgpu_device *adev, 3226 - struct amdgpu_irq_src *source, 3227 - unsigned type, 3228 - enum amdgpu_interrupt_state state) 3229 - { 3230 - switch (type) { 3231 - case AMDGPU_CRTC_IRQ_VBLANK1: 3232 - dce_v11_0_set_crtc_vblank_interrupt_state(adev, 0, state); 3233 - break; 3234 - case AMDGPU_CRTC_IRQ_VBLANK2: 3235 - dce_v11_0_set_crtc_vblank_interrupt_state(adev, 1, state); 3236 - break; 3237 - case AMDGPU_CRTC_IRQ_VBLANK3: 3238 - dce_v11_0_set_crtc_vblank_interrupt_state(adev, 2, state); 3239 - break; 3240 - case AMDGPU_CRTC_IRQ_VBLANK4: 3241 - dce_v11_0_set_crtc_vblank_interrupt_state(adev, 3, state); 3242 - break; 3243 - case AMDGPU_CRTC_IRQ_VBLANK5: 3244 - dce_v11_0_set_crtc_vblank_interrupt_state(adev, 4, state); 3245 - break; 3246 - case AMDGPU_CRTC_IRQ_VBLANK6: 3247 - dce_v11_0_set_crtc_vblank_interrupt_state(adev, 5, state); 3248 - break; 3249 - case AMDGPU_CRTC_IRQ_VLINE1: 3250 - dce_v11_0_set_crtc_vline_interrupt_state(adev, 0, state); 3251 - break; 3252 - case AMDGPU_CRTC_IRQ_VLINE2: 3253 - dce_v11_0_set_crtc_vline_interrupt_state(adev, 1, state); 3254 - break; 3255 - case AMDGPU_CRTC_IRQ_VLINE3: 3256 - dce_v11_0_set_crtc_vline_interrupt_state(adev, 2, state); 3257 - break; 3258 - case AMDGPU_CRTC_IRQ_VLINE4: 3259 - dce_v11_0_set_crtc_vline_interrupt_state(adev, 3, state); 3260 - break; 3261 - case AMDGPU_CRTC_IRQ_VLINE5: 3262 - dce_v11_0_set_crtc_vline_interrupt_state(adev, 4, state); 3263 - break; 3264 - case AMDGPU_CRTC_IRQ_VLINE6: 3265 - dce_v11_0_set_crtc_vline_interrupt_state(adev, 5, state); 3266 - break; 3267 - default: 3268 - break; 3269 - } 3270 - return 0; 3271 - } 3272 - 3273 - static int dce_v11_0_set_pageflip_irq_state(struct amdgpu_device *adev, 3274 - struct amdgpu_irq_src *src, 3275 - unsigned type, 3276 - enum amdgpu_interrupt_state state) 3277 - { 3278 - u32 reg; 3279 - 3280 - if (type >= adev->mode_info.num_crtc) { 3281 - DRM_ERROR("invalid pageflip crtc %d\n", type); 3282 - return -EINVAL; 3283 - } 3284 - 3285 - reg = RREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type]); 3286 - if (state == AMDGPU_IRQ_STATE_DISABLE) 3287 - WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type], 3288 - reg & ~GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 3289 - else 3290 - WREG32(mmGRPH_INTERRUPT_CONTROL + crtc_offsets[type], 3291 - reg | GRPH_INTERRUPT_CONTROL__GRPH_PFLIP_INT_MASK_MASK); 3292 - 3293 - return 0; 3294 - } 3295 - 3296 - static int dce_v11_0_pageflip_irq(struct amdgpu_device *adev, 3297 - struct amdgpu_irq_src *source, 3298 - struct amdgpu_iv_entry *entry) 3299 - { 3300 - unsigned long flags; 3301 - unsigned crtc_id; 3302 - struct amdgpu_crtc *amdgpu_crtc; 3303 - struct amdgpu_flip_work *works; 3304 - 3305 - crtc_id = (entry->src_id - 8) >> 1; 3306 - amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; 3307 - 3308 - if (crtc_id >= adev->mode_info.num_crtc) { 3309 - DRM_ERROR("invalid pageflip crtc %d\n", crtc_id); 3310 - return -EINVAL; 3311 - } 3312 - 3313 - if (RREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id]) & 3314 - GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_OCCURRED_MASK) 3315 - WREG32(mmGRPH_INTERRUPT_STATUS + crtc_offsets[crtc_id], 3316 - GRPH_INTERRUPT_STATUS__GRPH_PFLIP_INT_CLEAR_MASK); 3317 - 3318 - /* IRQ could occur when in initial stage */ 3319 - if(amdgpu_crtc == NULL) 3320 - return 0; 3321 - 3322 - spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); 3323 - works = amdgpu_crtc->pflip_works; 3324 - if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ 3325 - DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d != " 3326 - "AMDGPU_FLIP_SUBMITTED(%d)\n", 3327 - amdgpu_crtc->pflip_status, 3328 - AMDGPU_FLIP_SUBMITTED); 3329 - spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 3330 - return 0; 3331 - } 3332 - 3333 - /* page flip completed. clean up */ 3334 - amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; 3335 - amdgpu_crtc->pflip_works = NULL; 3336 - 3337 - /* wakeup usersapce */ 3338 - if(works->event) 3339 - drm_crtc_send_vblank_event(&amdgpu_crtc->base, works->event); 3340 - 3341 - spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); 3342 - 3343 - drm_crtc_vblank_put(&amdgpu_crtc->base); 3344 - schedule_work(&works->unpin_work); 3345 - 3346 - return 0; 3347 - } 3348 - 3349 - static void dce_v11_0_hpd_int_ack(struct amdgpu_device *adev, 3350 - int hpd) 3351 - { 3352 - u32 tmp; 3353 - 3354 - if (hpd >= adev->mode_info.num_hpd) { 3355 - DRM_DEBUG("invalid hpd %d\n", hpd); 3356 - return; 3357 - } 3358 - 3359 - tmp = RREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd]); 3360 - tmp = REG_SET_FIELD(tmp, DC_HPD_INT_CONTROL, DC_HPD_INT_ACK, 1); 3361 - WREG32(mmDC_HPD_INT_CONTROL + hpd_offsets[hpd], tmp); 3362 - } 3363 - 3364 - static void dce_v11_0_crtc_vblank_int_ack(struct amdgpu_device *adev, 3365 - int crtc) 3366 - { 3367 - u32 tmp; 3368 - 3369 - if (crtc < 0 || crtc >= adev->mode_info.num_crtc) { 3370 - DRM_DEBUG("invalid crtc %d\n", crtc); 3371 - return; 3372 - } 3373 - 3374 - tmp = RREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc]); 3375 - tmp = REG_SET_FIELD(tmp, LB_VBLANK_STATUS, VBLANK_ACK, 1); 3376 - WREG32(mmLB_VBLANK_STATUS + crtc_offsets[crtc], tmp); 3377 - } 3378 - 3379 - static void dce_v11_0_crtc_vline_int_ack(struct amdgpu_device *adev, 3380 - int crtc) 3381 - { 3382 - u32 tmp; 3383 - 3384 - if (crtc < 0 || crtc >= adev->mode_info.num_crtc) { 3385 - DRM_DEBUG("invalid crtc %d\n", crtc); 3386 - return; 3387 - } 3388 - 3389 - tmp = RREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc]); 3390 - tmp = REG_SET_FIELD(tmp, LB_VLINE_STATUS, VLINE_ACK, 1); 3391 - WREG32(mmLB_VLINE_STATUS + crtc_offsets[crtc], tmp); 3392 - } 3393 - 3394 - static int dce_v11_0_crtc_irq(struct amdgpu_device *adev, 3395 - struct amdgpu_irq_src *source, 3396 - struct amdgpu_iv_entry *entry) 3397 - { 3398 - unsigned crtc = entry->src_id - 1; 3399 - uint32_t disp_int = RREG32(interrupt_status_offsets[crtc].reg); 3400 - unsigned int irq_type = amdgpu_display_crtc_idx_to_irq_type(adev, 3401 - crtc); 3402 - 3403 - switch (entry->src_data[0]) { 3404 - case 0: /* vblank */ 3405 - if (disp_int & interrupt_status_offsets[crtc].vblank) 3406 - dce_v11_0_crtc_vblank_int_ack(adev, crtc); 3407 - else 3408 - DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); 3409 - 3410 - if (amdgpu_irq_enabled(adev, source, irq_type)) { 3411 - drm_handle_vblank(adev_to_drm(adev), crtc); 3412 - } 3413 - DRM_DEBUG("IH: D%d vblank\n", crtc + 1); 3414 - 3415 - break; 3416 - case 1: /* vline */ 3417 - if (disp_int & interrupt_status_offsets[crtc].vline) 3418 - dce_v11_0_crtc_vline_int_ack(adev, crtc); 3419 - else 3420 - DRM_DEBUG("IH: IH event w/o asserted irq bit?\n"); 3421 - 3422 - DRM_DEBUG("IH: D%d vline\n", crtc + 1); 3423 - 3424 - break; 3425 - default: 3426 - DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); 3427 - break; 3428 - } 3429 - 3430 - return 0; 3431 - } 3432 - 3433 - static int dce_v11_0_hpd_irq(struct amdgpu_device *adev, 3434 - struct amdgpu_irq_src *source, 3435 - struct amdgpu_iv_entry *entry) 3436 - { 3437 - uint32_t disp_int, mask; 3438 - unsigned hpd; 3439 - 3440 - if (entry->src_data[0] >= adev->mode_info.num_hpd) { 3441 - DRM_DEBUG("Unhandled interrupt: %d %d\n", entry->src_id, entry->src_data[0]); 3442 - return 0; 3443 - } 3444 - 3445 - hpd = entry->src_data[0]; 3446 - disp_int = RREG32(interrupt_status_offsets[hpd].reg); 3447 - mask = interrupt_status_offsets[hpd].hpd; 3448 - 3449 - if (disp_int & mask) { 3450 - dce_v11_0_hpd_int_ack(adev, hpd); 3451 - schedule_delayed_work(&adev->hotplug_work, 0); 3452 - DRM_DEBUG("IH: HPD%d\n", hpd + 1); 3453 - } 3454 - 3455 - return 0; 3456 - } 3457 - 3458 - static int dce_v11_0_set_clockgating_state(struct amdgpu_ip_block *ip_block, 3459 - enum amd_clockgating_state state) 3460 - { 3461 - return 0; 3462 - } 3463 - 3464 - static int dce_v11_0_set_powergating_state(struct amdgpu_ip_block *ip_block, 3465 - enum amd_powergating_state state) 3466 - { 3467 - return 0; 3468 - } 3469 - 3470 - static const struct amd_ip_funcs dce_v11_0_ip_funcs = { 3471 - .name = "dce_v11_0", 3472 - .early_init = dce_v11_0_early_init, 3473 - .sw_init = dce_v11_0_sw_init, 3474 - .sw_fini = dce_v11_0_sw_fini, 3475 - .hw_init = dce_v11_0_hw_init, 3476 - .hw_fini = dce_v11_0_hw_fini, 3477 - .suspend = dce_v11_0_suspend, 3478 - .resume = dce_v11_0_resume, 3479 - .is_idle = dce_v11_0_is_idle, 3480 - .soft_reset = dce_v11_0_soft_reset, 3481 - .set_clockgating_state = dce_v11_0_set_clockgating_state, 3482 - .set_powergating_state = dce_v11_0_set_powergating_state, 3483 - }; 3484 - 3485 - static void dce_v11_0_encoder_mode_set(struct drm_encoder *encoder, 3486 - struct drm_display_mode *mode, 3487 - struct drm_display_mode *adjusted_mode) 3488 - { 3489 - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3490 - 3491 - amdgpu_encoder->pixel_clock = adjusted_mode->clock; 3492 - 3493 - /* need to call this here rather than in prepare() since we need some crtc info */ 3494 - amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 3495 - 3496 - /* set scaler clears this on some chips */ 3497 - dce_v11_0_set_interleave(encoder->crtc, mode); 3498 - 3499 - if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { 3500 - dce_v11_0_afmt_enable(encoder, true); 3501 - dce_v11_0_afmt_setmode(encoder, adjusted_mode); 3502 - } 3503 - } 3504 - 3505 - static void dce_v11_0_encoder_prepare(struct drm_encoder *encoder) 3506 - { 3507 - struct amdgpu_device *adev = drm_to_adev(encoder->dev); 3508 - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3509 - struct drm_connector *connector = amdgpu_get_connector_for_encoder(encoder); 3510 - 3511 - if ((amdgpu_encoder->active_device & 3512 - (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) || 3513 - (amdgpu_encoder_get_dp_bridge_encoder_id(encoder) != 3514 - ENCODER_OBJECT_ID_NONE)) { 3515 - struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; 3516 - if (dig) { 3517 - dig->dig_encoder = dce_v11_0_pick_dig_encoder(encoder); 3518 - if (amdgpu_encoder->active_device & ATOM_DEVICE_DFP_SUPPORT) 3519 - dig->afmt = adev->mode_info.afmt[dig->dig_encoder]; 3520 - } 3521 - } 3522 - 3523 - amdgpu_atombios_scratch_regs_lock(adev, true); 3524 - 3525 - if (connector) { 3526 - struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); 3527 - 3528 - /* select the clock/data port if it uses a router */ 3529 - if (amdgpu_connector->router.cd_valid) 3530 - amdgpu_i2c_router_select_cd_port(amdgpu_connector); 3531 - 3532 - /* turn eDP panel on for mode set */ 3533 - if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) 3534 - amdgpu_atombios_encoder_set_edp_panel_power(connector, 3535 - ATOM_TRANSMITTER_ACTION_POWER_ON); 3536 - } 3537 - 3538 - /* this is needed for the pll/ss setup to work correctly in some cases */ 3539 - amdgpu_atombios_encoder_set_crtc_source(encoder); 3540 - /* set up the FMT blocks */ 3541 - dce_v11_0_program_fmt(encoder); 3542 - } 3543 - 3544 - static void dce_v11_0_encoder_commit(struct drm_encoder *encoder) 3545 - { 3546 - struct drm_device *dev = encoder->dev; 3547 - struct amdgpu_device *adev = drm_to_adev(dev); 3548 - 3549 - /* need to call this here as we need the crtc set up */ 3550 - amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_ON); 3551 - amdgpu_atombios_scratch_regs_lock(adev, false); 3552 - } 3553 - 3554 - static void dce_v11_0_encoder_disable(struct drm_encoder *encoder) 3555 - { 3556 - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3557 - struct amdgpu_encoder_atom_dig *dig; 3558 - 3559 - amdgpu_atombios_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); 3560 - 3561 - if (amdgpu_atombios_encoder_is_digital(encoder)) { 3562 - if (amdgpu_atombios_encoder_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) 3563 - dce_v11_0_afmt_enable(encoder, false); 3564 - dig = amdgpu_encoder->enc_priv; 3565 - dig->dig_encoder = -1; 3566 - } 3567 - amdgpu_encoder->active_device = 0; 3568 - } 3569 - 3570 - /* these are handled by the primary encoders */ 3571 - static void dce_v11_0_ext_prepare(struct drm_encoder *encoder) 3572 - { 3573 - 3574 - } 3575 - 3576 - static void dce_v11_0_ext_commit(struct drm_encoder *encoder) 3577 - { 3578 - 3579 - } 3580 - 3581 - static void 3582 - dce_v11_0_ext_mode_set(struct drm_encoder *encoder, 3583 - struct drm_display_mode *mode, 3584 - struct drm_display_mode *adjusted_mode) 3585 - { 3586 - 3587 - } 3588 - 3589 - static void dce_v11_0_ext_disable(struct drm_encoder *encoder) 3590 - { 3591 - 3592 - } 3593 - 3594 - static void 3595 - dce_v11_0_ext_dpms(struct drm_encoder *encoder, int mode) 3596 - { 3597 - 3598 - } 3599 - 3600 - static const struct drm_encoder_helper_funcs dce_v11_0_ext_helper_funcs = { 3601 - .dpms = dce_v11_0_ext_dpms, 3602 - .prepare = dce_v11_0_ext_prepare, 3603 - .mode_set = dce_v11_0_ext_mode_set, 3604 - .commit = dce_v11_0_ext_commit, 3605 - .disable = dce_v11_0_ext_disable, 3606 - /* no detect for TMDS/LVDS yet */ 3607 - }; 3608 - 3609 - static const struct drm_encoder_helper_funcs dce_v11_0_dig_helper_funcs = { 3610 - .dpms = amdgpu_atombios_encoder_dpms, 3611 - .mode_fixup = amdgpu_atombios_encoder_mode_fixup, 3612 - .prepare = dce_v11_0_encoder_prepare, 3613 - .mode_set = dce_v11_0_encoder_mode_set, 3614 - .commit = dce_v11_0_encoder_commit, 3615 - .disable = dce_v11_0_encoder_disable, 3616 - .detect = amdgpu_atombios_encoder_dig_detect, 3617 - }; 3618 - 3619 - static const struct drm_encoder_helper_funcs dce_v11_0_dac_helper_funcs = { 3620 - .dpms = amdgpu_atombios_encoder_dpms, 3621 - .mode_fixup = amdgpu_atombios_encoder_mode_fixup, 3622 - .prepare = dce_v11_0_encoder_prepare, 3623 - .mode_set = dce_v11_0_encoder_mode_set, 3624 - .commit = dce_v11_0_encoder_commit, 3625 - .detect = amdgpu_atombios_encoder_dac_detect, 3626 - }; 3627 - 3628 - static void dce_v11_0_encoder_destroy(struct drm_encoder *encoder) 3629 - { 3630 - struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); 3631 - if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3632 - amdgpu_atombios_encoder_fini_backlight(amdgpu_encoder); 3633 - kfree(amdgpu_encoder->enc_priv); 3634 - drm_encoder_cleanup(encoder); 3635 - kfree(amdgpu_encoder); 3636 - } 3637 - 3638 - static const struct drm_encoder_funcs dce_v11_0_encoder_funcs = { 3639 - .destroy = dce_v11_0_encoder_destroy, 3640 - }; 3641 - 3642 - static void dce_v11_0_encoder_add(struct amdgpu_device *adev, 3643 - uint32_t encoder_enum, 3644 - uint32_t supported_device, 3645 - u16 caps) 3646 - { 3647 - struct drm_device *dev = adev_to_drm(adev); 3648 - struct drm_encoder *encoder; 3649 - struct amdgpu_encoder *amdgpu_encoder; 3650 - 3651 - /* see if we already added it */ 3652 - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 3653 - amdgpu_encoder = to_amdgpu_encoder(encoder); 3654 - if (amdgpu_encoder->encoder_enum == encoder_enum) { 3655 - amdgpu_encoder->devices |= supported_device; 3656 - return; 3657 - } 3658 - 3659 - } 3660 - 3661 - /* add a new one */ 3662 - amdgpu_encoder = kzalloc(sizeof(struct amdgpu_encoder), GFP_KERNEL); 3663 - if (!amdgpu_encoder) 3664 - return; 3665 - 3666 - encoder = &amdgpu_encoder->base; 3667 - switch (adev->mode_info.num_crtc) { 3668 - case 1: 3669 - encoder->possible_crtcs = 0x1; 3670 - break; 3671 - case 2: 3672 - default: 3673 - encoder->possible_crtcs = 0x3; 3674 - break; 3675 - case 3: 3676 - encoder->possible_crtcs = 0x7; 3677 - break; 3678 - case 4: 3679 - encoder->possible_crtcs = 0xf; 3680 - break; 3681 - case 5: 3682 - encoder->possible_crtcs = 0x1f; 3683 - break; 3684 - case 6: 3685 - encoder->possible_crtcs = 0x3f; 3686 - break; 3687 - } 3688 - 3689 - amdgpu_encoder->enc_priv = NULL; 3690 - 3691 - amdgpu_encoder->encoder_enum = encoder_enum; 3692 - amdgpu_encoder->encoder_id = (encoder_enum & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; 3693 - amdgpu_encoder->devices = supported_device; 3694 - amdgpu_encoder->rmx_type = RMX_OFF; 3695 - amdgpu_encoder->underscan_type = UNDERSCAN_OFF; 3696 - amdgpu_encoder->is_ext_encoder = false; 3697 - amdgpu_encoder->caps = caps; 3698 - 3699 - switch (amdgpu_encoder->encoder_id) { 3700 - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1: 3701 - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2: 3702 - drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3703 - DRM_MODE_ENCODER_DAC, NULL); 3704 - drm_encoder_helper_add(encoder, &dce_v11_0_dac_helper_funcs); 3705 - break; 3706 - case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1: 3707 - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 3708 - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 3709 - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 3710 - case ENCODER_OBJECT_ID_INTERNAL_UNIPHY3: 3711 - if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 3712 - amdgpu_encoder->rmx_type = RMX_FULL; 3713 - drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3714 - DRM_MODE_ENCODER_LVDS, NULL); 3715 - amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_lcd_info(amdgpu_encoder); 3716 - } else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) { 3717 - drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3718 - DRM_MODE_ENCODER_DAC, NULL); 3719 - amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3720 - } else { 3721 - drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3722 - DRM_MODE_ENCODER_TMDS, NULL); 3723 - amdgpu_encoder->enc_priv = amdgpu_atombios_encoder_get_dig_info(amdgpu_encoder); 3724 - } 3725 - drm_encoder_helper_add(encoder, &dce_v11_0_dig_helper_funcs); 3726 - break; 3727 - case ENCODER_OBJECT_ID_SI170B: 3728 - case ENCODER_OBJECT_ID_CH7303: 3729 - case ENCODER_OBJECT_ID_EXTERNAL_SDVOA: 3730 - case ENCODER_OBJECT_ID_EXTERNAL_SDVOB: 3731 - case ENCODER_OBJECT_ID_TITFP513: 3732 - case ENCODER_OBJECT_ID_VT1623: 3733 - case ENCODER_OBJECT_ID_HDMI_SI1930: 3734 - case ENCODER_OBJECT_ID_TRAVIS: 3735 - case ENCODER_OBJECT_ID_NUTMEG: 3736 - /* these are handled by the primary encoders */ 3737 - amdgpu_encoder->is_ext_encoder = true; 3738 - if (amdgpu_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) 3739 - drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3740 - DRM_MODE_ENCODER_LVDS, NULL); 3741 - else if (amdgpu_encoder->devices & (ATOM_DEVICE_CRT_SUPPORT)) 3742 - drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3743 - DRM_MODE_ENCODER_DAC, NULL); 3744 - else 3745 - drm_encoder_init(dev, encoder, &dce_v11_0_encoder_funcs, 3746 - DRM_MODE_ENCODER_TMDS, NULL); 3747 - drm_encoder_helper_add(encoder, &dce_v11_0_ext_helper_funcs); 3748 - break; 3749 - } 3750 - } 3751 - 3752 - static const struct amdgpu_display_funcs dce_v11_0_display_funcs = { 3753 - .bandwidth_update = &dce_v11_0_bandwidth_update, 3754 - .vblank_get_counter = &dce_v11_0_vblank_get_counter, 3755 - .backlight_set_level = &amdgpu_atombios_encoder_set_backlight_level, 3756 - .backlight_get_level = &amdgpu_atombios_encoder_get_backlight_level, 3757 - .hpd_sense = &dce_v11_0_hpd_sense, 3758 - .hpd_set_polarity = &dce_v11_0_hpd_set_polarity, 3759 - .hpd_get_gpio_reg = &dce_v11_0_hpd_get_gpio_reg, 3760 - .page_flip = &dce_v11_0_page_flip, 3761 - .page_flip_get_scanoutpos = &dce_v11_0_crtc_get_scanoutpos, 3762 - .add_encoder = &dce_v11_0_encoder_add, 3763 - .add_connector = &amdgpu_connector_add, 3764 - }; 3765 - 3766 - static void dce_v11_0_set_display_funcs(struct amdgpu_device *adev) 3767 - { 3768 - adev->mode_info.funcs = &dce_v11_0_display_funcs; 3769 - } 3770 - 3771 - static const struct amdgpu_irq_src_funcs dce_v11_0_crtc_irq_funcs = { 3772 - .set = dce_v11_0_set_crtc_irq_state, 3773 - .process = dce_v11_0_crtc_irq, 3774 - }; 3775 - 3776 - static const struct amdgpu_irq_src_funcs dce_v11_0_pageflip_irq_funcs = { 3777 - .set = dce_v11_0_set_pageflip_irq_state, 3778 - .process = dce_v11_0_pageflip_irq, 3779 - }; 3780 - 3781 - static const struct amdgpu_irq_src_funcs dce_v11_0_hpd_irq_funcs = { 3782 - .set = dce_v11_0_set_hpd_irq_state, 3783 - .process = dce_v11_0_hpd_irq, 3784 - }; 3785 - 3786 - static void dce_v11_0_set_irq_funcs(struct amdgpu_device *adev) 3787 - { 3788 - if (adev->mode_info.num_crtc > 0) 3789 - adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VLINE1 + adev->mode_info.num_crtc; 3790 - else 3791 - adev->crtc_irq.num_types = 0; 3792 - adev->crtc_irq.funcs = &dce_v11_0_crtc_irq_funcs; 3793 - 3794 - adev->pageflip_irq.num_types = adev->mode_info.num_crtc; 3795 - adev->pageflip_irq.funcs = &dce_v11_0_pageflip_irq_funcs; 3796 - 3797 - adev->hpd_irq.num_types = adev->mode_info.num_hpd; 3798 - adev->hpd_irq.funcs = &dce_v11_0_hpd_irq_funcs; 3799 - } 3800 - 3801 - const struct amdgpu_ip_block_version dce_v11_0_ip_block = 3802 - { 3803 - .type = AMD_IP_BLOCK_TYPE_DCE, 3804 - .major = 11, 3805 - .minor = 0, 3806 - .rev = 0, 3807 - .funcs = &dce_v11_0_ip_funcs, 3808 - }; 3809 - 3810 - const struct amdgpu_ip_block_version dce_v11_2_ip_block = 3811 - { 3812 - .type = AMD_IP_BLOCK_TYPE_DCE, 3813 - .major = 11, 3814 - .minor = 2, 3815 - .rev = 0, 3816 - .funcs = &dce_v11_0_ip_funcs, 3817 - };
-32
drivers/gpu/drm/amd/amdgpu/dce_v11_0.h
··· 1 - /* 2 - * Copyright 2014 Advanced Micro Devices, Inc. 3 - * 4 - * Permission is hereby granted, free of charge, to any person obtaining a 5 - * copy of this software and associated documentation files (the "Software"), 6 - * to deal in the Software without restriction, including without limitation 7 - * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 - * and/or sell copies of the Software, and to permit persons to whom the 9 - * Software is furnished to do so, subject to the following conditions: 10 - * 11 - * The above copyright notice and this permission notice shall be included in 12 - * all copies or substantial portions of the Software. 13 - * 14 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 - * OTHER DEALINGS IN THE SOFTWARE. 21 - * 22 - */ 23 - 24 - #ifndef __DCE_V11_0_H__ 25 - #define __DCE_V11_0_H__ 26 - 27 - extern const struct amdgpu_ip_block_version dce_v11_0_ip_block; 28 - extern const struct amdgpu_ip_block_version dce_v11_2_ip_block; 29 - 30 - void dce_v11_0_disable_dce(struct amdgpu_device *adev); 31 - 32 - #endif
+2 -3
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
··· 4075 4075 struct dma_fence *f = NULL; 4076 4076 unsigned int index; 4077 4077 uint64_t gpu_addr; 4078 - volatile uint32_t *cpu_ptr; 4078 + uint32_t *cpu_ptr; 4079 4079 long r; 4080 4080 4081 4081 memset(&ib, 0, sizeof(ib)); ··· 4322 4322 return count; 4323 4323 } 4324 4324 4325 - static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev, 4326 - volatile u32 *buffer) 4325 + static void gfx_v10_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer) 4327 4326 { 4328 4327 u32 count = 0; 4329 4328 int ctx_reg_offset;
+17 -3
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
··· 603 603 struct dma_fence *f = NULL; 604 604 unsigned index; 605 605 uint64_t gpu_addr; 606 - volatile uint32_t *cpu_ptr; 606 + uint32_t *cpu_ptr; 607 607 long r; 608 608 609 609 /* MES KIQ fw hasn't indirect buffer support for now */ ··· 850 850 return count; 851 851 } 852 852 853 - static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev, 854 - volatile u32 *buffer) 853 + static void gfx_v11_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer) 855 854 { 856 855 u32 count = 0; 857 856 int ctx_reg_offset; ··· 1645 1646 adev->gfx.pfp_fw_version >= 2370 && 1646 1647 adev->gfx.mec_fw_version >= 2450 && 1647 1648 adev->mes.fw_version[0] >= 99) { 1649 + adev->gfx.enable_cleaner_shader = true; 1650 + r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); 1651 + if (r) { 1652 + adev->gfx.enable_cleaner_shader = false; 1653 + dev_err(adev->dev, "Failed to initialize cleaner shader\n"); 1654 + } 1655 + } 1656 + break; 1657 + case IP_VERSION(11, 0, 1): 1658 + case IP_VERSION(11, 0, 4): 1659 + adev->gfx.cleaner_shader_ptr = gfx_11_0_3_cleaner_shader_hex; 1660 + adev->gfx.cleaner_shader_size = sizeof(gfx_11_0_3_cleaner_shader_hex); 1661 + if (adev->gfx.pfp_fw_version >= 102 && 1662 + adev->gfx.mec_fw_version >= 66 && 1663 + adev->mes.fw_version[0] >= 128) { 1648 1664 adev->gfx.enable_cleaner_shader = true; 1649 1665 r = amdgpu_gfx_cleaner_shader_sw_init(adev, adev->gfx.cleaner_shader_size); 1650 1666 if (r) {
+2 -3
drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
··· 497 497 struct dma_fence *f = NULL; 498 498 unsigned index; 499 499 uint64_t gpu_addr; 500 - volatile uint32_t *cpu_ptr; 500 + uint32_t *cpu_ptr; 501 501 long r; 502 502 503 503 /* MES KIQ fw hasn't indirect buffer support for now */ ··· 685 685 return count; 686 686 } 687 687 688 - static void gfx_v12_0_get_csb_buffer(struct amdgpu_device *adev, 689 - volatile u32 *buffer) 688 + static void gfx_v12_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer) 690 689 { 691 690 u32 count = 0, clustercount = 0, i; 692 691 const struct cs_section_def *sect = NULL;
+3 -4
drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c
··· 86 86 MODULE_FIRMWARE("amdgpu/hainan_rlc.bin"); 87 87 88 88 static u32 gfx_v6_0_get_csb_size(struct amdgpu_device *adev); 89 - static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer); 89 + static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer); 90 90 //static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev); 91 91 static void gfx_v6_0_init_pg(struct amdgpu_device *adev); 92 92 ··· 2354 2354 static int gfx_v6_0_rlc_init(struct amdgpu_device *adev) 2355 2355 { 2356 2356 const u32 *src_ptr; 2357 - volatile u32 *dst_ptr; 2357 + u32 *dst_ptr; 2358 2358 u32 dws; 2359 2359 u64 reg_list_mc_addr; 2360 2360 const struct cs_section_def *cs_data; ··· 2855 2855 return count; 2856 2856 } 2857 2857 2858 - static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, 2859 - volatile u32 *buffer) 2858 + static void gfx_v6_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer) 2860 2859 { 2861 2860 u32 count = 0; 2862 2861
+2 -3
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
··· 883 883 }; 884 884 885 885 static u32 gfx_v7_0_get_csb_size(struct amdgpu_device *adev); 886 - static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, volatile u32 *buffer); 886 + static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer); 887 887 static void gfx_v7_0_init_pg(struct amdgpu_device *adev); 888 888 static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev); 889 889 ··· 3882 3882 return count; 3883 3883 } 3884 3884 3885 - static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, 3886 - volatile u32 *buffer) 3885 + static void gfx_v7_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer) 3887 3886 { 3888 3887 u32 count = 0; 3889 3888
+1 -2
drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
··· 1220 1220 return err; 1221 1221 } 1222 1222 1223 - static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, 1224 - volatile u32 *buffer) 1223 + static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer) 1225 1224 { 1226 1225 u32 count = 0; 1227 1226
+1 -2
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 1648 1648 return count; 1649 1649 } 1650 1650 1651 - static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev, 1652 - volatile u32 *buffer) 1651 + static void gfx_v9_0_get_csb_buffer(struct amdgpu_device *adev, u32 *buffer) 1653 1652 { 1654 1653 u32 count = 0; 1655 1654
+2 -1
drivers/gpu/drm/amd/amdgpu/gmc_v12_0.c
··· 337 337 int vmid, i; 338 338 339 339 if (adev->enable_uni_mes && adev->mes.ring[AMDGPU_MES_SCHED_PIPE].sched.ready && 340 - (adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x81) { 340 + (adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x83) { 341 341 struct mes_inv_tlbs_pasid_input input = {0}; 342 342 input.pasid = pasid; 343 343 input.flush_type = flush_type; ··· 521 521 *flags &= ~AMDGPU_PTE_NOALLOC; 522 522 523 523 if (vm_flags & AMDGPU_VM_PAGE_PRT) { 524 + *flags |= AMDGPU_PTE_PRT_GFX12; 524 525 *flags |= AMDGPU_PTE_SNOOPED; 525 526 *flags |= AMDGPU_PTE_SYSTEM; 526 527 *flags |= AMDGPU_PTE_IS_PTE;
+8
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
··· 1834 1834 1835 1835 static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev) 1836 1836 { 1837 + static const u32 regBIF_BIOS_SCRATCH_4 = 0x50; 1838 + u32 vram_info; 1839 + 1837 1840 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM; 1838 1841 adev->gmc.vram_width = 128 * 64; 1839 1842 1840 1843 if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 5, 0)) 1841 1844 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_HBM3E; 1845 + 1846 + if (!(adev->flags & AMD_IS_APU) && !amdgpu_sriov_vf(adev)) { 1847 + vram_info = RREG32(regBIF_BIOS_SCRATCH_4); 1848 + adev->gmc.vram_vendor = vram_info & 0xF; 1849 + } 1842 1850 } 1843 1851 1844 1852 static int gmc_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
+1 -1
drivers/gpu/drm/amd/amdgpu/jpeg_v1_0.c
··· 557 557 .nop = PACKET0(0x81ff, 0), 558 558 .support_64bit_ptrs = false, 559 559 .no_user_fence = true, 560 - .extra_dw = 64, 560 + .extra_bytes = 256, 561 561 .get_rptr = jpeg_v1_0_decode_ring_get_rptr, 562 562 .get_wptr = jpeg_v1_0_decode_ring_get_wptr, 563 563 .set_wptr = jpeg_v1_0_decode_ring_set_wptr,
+1 -57
drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
··· 23 23 24 24 #include "amdgpu.h" 25 25 #include "amdgpu_jpeg.h" 26 - #include "amdgpu_cs.h" 27 26 #include "amdgpu_pm.h" 28 27 #include "soc15.h" 29 28 #include "soc15d.h" ··· 805 806 .get_rptr = jpeg_v2_0_dec_ring_get_rptr, 806 807 .get_wptr = jpeg_v2_0_dec_ring_get_wptr, 807 808 .set_wptr = jpeg_v2_0_dec_ring_set_wptr, 808 - .parse_cs = jpeg_v2_dec_ring_parse_cs, 809 + .parse_cs = amdgpu_jpeg_dec_parse_cs, 809 810 .emit_frame_size = 810 811 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 811 812 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + ··· 853 854 .rev = 0, 854 855 .funcs = &jpeg_v2_0_ip_funcs, 855 856 }; 856 - 857 - /** 858 - * jpeg_v2_dec_ring_parse_cs - command submission parser 859 - * 860 - * @parser: Command submission parser context 861 - * @job: the job to parse 862 - * @ib: the IB to parse 863 - * 864 - * Parse the command stream, return -EINVAL for invalid packet, 865 - * 0 otherwise 866 - */ 867 - int jpeg_v2_dec_ring_parse_cs(struct amdgpu_cs_parser *parser, 868 - struct amdgpu_job *job, 869 - struct amdgpu_ib *ib) 870 - { 871 - u32 i, reg, res, cond, type; 872 - struct amdgpu_device *adev = parser->adev; 873 - 874 - for (i = 0; i < ib->length_dw ; i += 2) { 875 - reg = CP_PACKETJ_GET_REG(ib->ptr[i]); 876 - res = CP_PACKETJ_GET_RES(ib->ptr[i]); 877 - cond = CP_PACKETJ_GET_COND(ib->ptr[i]); 878 - type = CP_PACKETJ_GET_TYPE(ib->ptr[i]); 879 - 880 - if (res) /* only support 0 at the moment */ 881 - return -EINVAL; 882 - 883 - switch (type) { 884 - case PACKETJ_TYPE0: 885 - if (cond != PACKETJ_CONDITION_CHECK0 || reg < JPEG_REG_RANGE_START || 886 - reg > JPEG_REG_RANGE_END) { 887 - dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]); 888 - return -EINVAL; 889 - } 890 - break; 891 - case PACKETJ_TYPE3: 892 - if (cond != PACKETJ_CONDITION_CHECK3 || reg < JPEG_REG_RANGE_START || 893 - reg > JPEG_REG_RANGE_END) { 894 - dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]); 895 - return -EINVAL; 896 - } 897 - break; 898 - case PACKETJ_TYPE6: 899 - if (ib->ptr[i] == CP_PACKETJ_NOP) 900 - continue; 901 - dev_err(adev->dev, "Invalid packet [0x%08x]!\n", ib->ptr[i]); 902 - return -EINVAL; 903 - default: 904 - dev_err(adev->dev, "Unknown packet type %d !\n", type); 905 - return -EINVAL; 906 - } 907 - } 908 - 909 - return 0; 910 - }
-6
drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
··· 45 45 46 46 #define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000 47 47 48 - #define JPEG_REG_RANGE_START 0x4000 49 - #define JPEG_REG_RANGE_END 0x41c2 50 - 51 48 void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring); 52 49 void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring); 53 50 void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq, ··· 57 60 unsigned vmid, uint64_t pd_addr); 58 61 void jpeg_v2_0_dec_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, uint32_t val); 59 62 void jpeg_v2_0_dec_ring_nop(struct amdgpu_ring *ring, uint32_t count); 60 - int jpeg_v2_dec_ring_parse_cs(struct amdgpu_cs_parser *parser, 61 - struct amdgpu_job *job, 62 - struct amdgpu_ib *ib); 63 63 64 64 extern const struct amdgpu_ip_block_version jpeg_v2_0_ip_block; 65 65
+2 -2
drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
··· 696 696 .get_rptr = jpeg_v2_5_dec_ring_get_rptr, 697 697 .get_wptr = jpeg_v2_5_dec_ring_get_wptr, 698 698 .set_wptr = jpeg_v2_5_dec_ring_set_wptr, 699 - .parse_cs = jpeg_v2_dec_ring_parse_cs, 699 + .parse_cs = amdgpu_jpeg_dec_parse_cs, 700 700 .emit_frame_size = 701 701 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 702 702 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 + ··· 727 727 .get_rptr = jpeg_v2_5_dec_ring_get_rptr, 728 728 .get_wptr = jpeg_v2_5_dec_ring_get_wptr, 729 729 .set_wptr = jpeg_v2_5_dec_ring_set_wptr, 730 - .parse_cs = jpeg_v2_dec_ring_parse_cs, 730 + .parse_cs = amdgpu_jpeg_dec_parse_cs, 731 731 .emit_frame_size = 732 732 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 733 733 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+1 -1
drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c
··· 597 597 .get_rptr = jpeg_v3_0_dec_ring_get_rptr, 598 598 .get_wptr = jpeg_v3_0_dec_ring_get_wptr, 599 599 .set_wptr = jpeg_v3_0_dec_ring_set_wptr, 600 - .parse_cs = jpeg_v2_dec_ring_parse_cs, 600 + .parse_cs = amdgpu_jpeg_dec_parse_cs, 601 601 .emit_frame_size = 602 602 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 603 603 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+1 -1
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0.c
··· 762 762 .get_rptr = jpeg_v4_0_dec_ring_get_rptr, 763 763 .get_wptr = jpeg_v4_0_dec_ring_get_wptr, 764 764 .set_wptr = jpeg_v4_0_dec_ring_set_wptr, 765 - .parse_cs = jpeg_v2_dec_ring_parse_cs, 765 + .parse_cs = amdgpu_jpeg_dec_parse_cs, 766 766 .emit_frame_size = 767 767 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 768 768 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+1 -1
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_3.c
··· 1177 1177 .get_rptr = jpeg_v4_0_3_dec_ring_get_rptr, 1178 1178 .get_wptr = jpeg_v4_0_3_dec_ring_get_wptr, 1179 1179 .set_wptr = jpeg_v4_0_3_dec_ring_set_wptr, 1180 - .parse_cs = jpeg_v2_dec_ring_parse_cs, 1180 + .parse_cs = amdgpu_jpeg_dec_parse_cs, 1181 1181 .emit_frame_size = 1182 1182 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 1183 1183 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+1 -1
drivers/gpu/drm/amd/amdgpu/jpeg_v4_0_5.c
··· 807 807 .get_rptr = jpeg_v4_0_5_dec_ring_get_rptr, 808 808 .get_wptr = jpeg_v4_0_5_dec_ring_get_wptr, 809 809 .set_wptr = jpeg_v4_0_5_dec_ring_set_wptr, 810 - .parse_cs = jpeg_v2_dec_ring_parse_cs, 810 + .parse_cs = amdgpu_jpeg_dec_parse_cs, 811 811 .emit_frame_size = 812 812 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 813 813 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+1 -1
drivers/gpu/drm/amd/amdgpu/jpeg_v5_0_0.c
··· 683 683 .get_rptr = jpeg_v5_0_0_dec_ring_get_rptr, 684 684 .get_wptr = jpeg_v5_0_0_dec_ring_get_wptr, 685 685 .set_wptr = jpeg_v5_0_0_dec_ring_set_wptr, 686 - .parse_cs = jpeg_v2_dec_ring_parse_cs, 686 + .parse_cs = amdgpu_jpeg_dec_parse_cs, 687 687 .emit_frame_size = 688 688 SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 + 689 689 SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 8 +
+88 -7
drivers/gpu/drm/amd/amdgpu/mes_userqueue.c
··· 254 254 struct amdgpu_mqd *mqd_hw_default = &adev->mqds[queue->queue_type]; 255 255 struct drm_amdgpu_userq_in *mqd_user = args_in; 256 256 struct amdgpu_mqd_prop *userq_props; 257 + struct amdgpu_gfx_shadow_info shadow_info; 257 258 int r; 258 259 259 260 /* Structure to initialize MQD for userqueue using generic MQD init function */ ··· 262 261 if (!userq_props) { 263 262 DRM_ERROR("Failed to allocate memory for userq_props\n"); 264 263 return -ENOMEM; 265 - } 266 - 267 - if (!mqd_user->wptr_va || !mqd_user->rptr_va || 268 - !mqd_user->queue_va || mqd_user->queue_size == 0) { 269 - DRM_ERROR("Invalid MQD parameters for userqueue\n"); 270 - r = -EINVAL; 271 - goto free_props; 272 264 } 273 265 274 266 r = amdgpu_userq_create_object(uq_mgr, &queue->mqd, mqd_hw_default->mqd_size); ··· 280 286 userq_props->doorbell_index = queue->doorbell_index; 281 287 userq_props->fence_address = queue->fence_drv->gpu_addr; 282 288 289 + if (adev->gfx.funcs->get_gfx_shadow_info) 290 + adev->gfx.funcs->get_gfx_shadow_info(adev, &shadow_info, true); 283 291 if (queue->queue_type == AMDGPU_HW_IP_COMPUTE) { 284 292 struct drm_amdgpu_userq_mqd_compute_gfx11 *compute_mqd; 285 293 ··· 297 301 r = -ENOMEM; 298 302 goto free_mqd; 299 303 } 304 + 305 + if (amdgpu_userq_input_va_validate(queue->vm, compute_mqd->eop_va, 306 + max_t(u32, PAGE_SIZE, AMDGPU_GPU_PAGE_SIZE))) 307 + goto free_mqd; 300 308 301 309 userq_props->eop_gpu_addr = compute_mqd->eop_va; 302 310 userq_props->hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_NORMAL; ··· 329 329 userq_props->csa_addr = mqd_gfx_v11->csa_va; 330 330 userq_props->tmz_queue = 331 331 mqd_user->flags & AMDGPU_USERQ_CREATE_FLAGS_QUEUE_SECURE; 332 + 333 + if (amdgpu_userq_input_va_validate(queue->vm, mqd_gfx_v11->shadow_va, 334 + shadow_info.shadow_size)) 335 + goto free_mqd; 336 + 332 337 kfree(mqd_gfx_v11); 333 338 } else if (queue->queue_type == AMDGPU_HW_IP_DMA) { 334 339 struct drm_amdgpu_userq_mqd_sdma_gfx11 *mqd_sdma_v11; ··· 350 345 r = -ENOMEM; 351 346 goto free_mqd; 352 347 } 348 + 349 + if (amdgpu_userq_input_va_validate(queue->vm, mqd_sdma_v11->csa_va, 350 + shadow_info.csa_size)) 351 + goto free_mqd; 353 352 354 353 userq_props->csa_addr = mqd_sdma_v11->csa_va; 355 354 kfree(mqd_sdma_v11); ··· 404 395 amdgpu_userq_destroy_object(uq_mgr, &queue->mqd); 405 396 } 406 397 398 + static int mes_userq_preempt(struct amdgpu_userq_mgr *uq_mgr, 399 + struct amdgpu_usermode_queue *queue) 400 + { 401 + struct amdgpu_device *adev = uq_mgr->adev; 402 + struct mes_suspend_gang_input queue_input; 403 + struct amdgpu_userq_obj *ctx = &queue->fw_obj; 404 + signed long timeout = 2100000; /* 2100 ms */ 405 + u64 fence_gpu_addr; 406 + u32 fence_offset; 407 + u64 *fence_ptr; 408 + int i, r; 409 + 410 + if (queue->state != AMDGPU_USERQ_STATE_MAPPED) 411 + return 0; 412 + r = amdgpu_device_wb_get(adev, &fence_offset); 413 + if (r) 414 + return r; 415 + 416 + fence_gpu_addr = adev->wb.gpu_addr + (fence_offset * 4); 417 + fence_ptr = (u64 *)&adev->wb.wb[fence_offset]; 418 + *fence_ptr = 0; 419 + 420 + memset(&queue_input, 0x0, sizeof(struct mes_suspend_gang_input)); 421 + queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ; 422 + queue_input.suspend_fence_addr = fence_gpu_addr; 423 + queue_input.suspend_fence_value = 1; 424 + amdgpu_mes_lock(&adev->mes); 425 + r = adev->mes.funcs->suspend_gang(&adev->mes, &queue_input); 426 + amdgpu_mes_unlock(&adev->mes); 427 + if (r) { 428 + DRM_ERROR("Failed to suspend gang: %d\n", r); 429 + goto out; 430 + } 431 + 432 + for (i = 0; i < timeout; i++) { 433 + if (*fence_ptr == 1) 434 + goto out; 435 + udelay(1); 436 + } 437 + r = -ETIMEDOUT; 438 + 439 + out: 440 + amdgpu_device_wb_free(adev, fence_offset); 441 + return r; 442 + } 443 + 444 + static int mes_userq_restore(struct amdgpu_userq_mgr *uq_mgr, 445 + struct amdgpu_usermode_queue *queue) 446 + { 447 + struct amdgpu_device *adev = uq_mgr->adev; 448 + struct mes_resume_gang_input queue_input; 449 + struct amdgpu_userq_obj *ctx = &queue->fw_obj; 450 + int r; 451 + 452 + if (queue->state == AMDGPU_USERQ_STATE_HUNG) 453 + return -EINVAL; 454 + if (queue->state != AMDGPU_USERQ_STATE_PREEMPTED) 455 + return 0; 456 + 457 + memset(&queue_input, 0x0, sizeof(struct mes_resume_gang_input)); 458 + queue_input.gang_context_addr = ctx->gpu_addr + AMDGPU_USERQ_PROC_CTX_SZ; 459 + 460 + amdgpu_mes_lock(&adev->mes); 461 + r = adev->mes.funcs->resume_gang(&adev->mes, &queue_input); 462 + amdgpu_mes_unlock(&adev->mes); 463 + if (r) 464 + dev_err(adev->dev, "Failed to resume queue, err (%d)\n", r); 465 + return r; 466 + } 467 + 407 468 const struct amdgpu_userq_funcs userq_mes_funcs = { 408 469 .mqd_create = mes_userq_mqd_create, 409 470 .mqd_destroy = mes_userq_mqd_destroy, 410 471 .unmap = mes_userq_unmap, 411 472 .map = mes_userq_map, 412 473 .detect_and_reset = mes_userq_detect_and_reset, 474 + .preempt = mes_userq_preempt, 475 + .restore = mes_userq_restore, 413 476 };
+14
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
··· 202 202 case IDH_REQ_RAS_CPER_DUMP: 203 203 event = IDH_RAS_CPER_DUMP_READY; 204 204 break; 205 + case IDH_REQ_RAS_CHK_CRITI: 206 + event = IDH_REQ_RAS_CHK_CRITI_READY; 207 + break; 205 208 default: 206 209 break; 207 210 } ··· 559 556 return xgpu_nv_send_access_requests(adev, IDH_REQ_RAS_BAD_PAGES); 560 557 } 561 558 559 + static int xgpu_nv_check_vf_critical_region(struct amdgpu_device *adev, u64 addr) 560 + { 561 + uint32_t addr_hi, addr_lo; 562 + 563 + addr_hi = (uint32_t)(addr >> 32); 564 + addr_lo = (uint32_t)(addr & 0xFFFFFFFF); 565 + return xgpu_nv_send_access_requests_with_param( 566 + adev, IDH_REQ_RAS_CHK_CRITI, addr_hi, addr_lo, 0); 567 + } 568 + 562 569 const struct amdgpu_virt_ops xgpu_nv_virt_ops = { 563 570 .req_full_gpu = xgpu_nv_request_full_gpu_access, 564 571 .rel_full_gpu = xgpu_nv_release_full_gpu_access, ··· 582 569 .req_ras_err_count = xgpu_nv_req_ras_err_count, 583 570 .req_ras_cper_dump = xgpu_nv_req_ras_cper_dump, 584 571 .req_bad_pages = xgpu_nv_req_ras_bad_pages, 572 + .req_ras_chk_criti = xgpu_nv_check_vf_critical_region 585 573 };
+2
drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h
··· 43 43 IDH_REQ_RAS_ERROR_COUNT = 203, 44 44 IDH_REQ_RAS_CPER_DUMP = 204, 45 45 IDH_REQ_RAS_BAD_PAGES = 205, 46 + IDH_REQ_RAS_CHK_CRITI = 206 46 47 }; 47 48 48 49 enum idh_event { ··· 63 62 IDH_RAS_BAD_PAGES_READY = 15, 64 63 IDH_RAS_BAD_PAGES_NOTIFICATION = 16, 65 64 IDH_UNRECOV_ERR_NOTIFICATION = 17, 65 + IDH_REQ_RAS_CHK_CRITI_READY = 18, 66 66 67 67 IDH_TEXT_MESSAGE = 255, 68 68 };
+1 -4
drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
··· 743 743 adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; 744 744 adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; 745 745 746 - res = i2c_add_adapter(control); 746 + res = devm_i2c_add_adapter(adev->dev, control); 747 747 if (res) 748 748 DRM_ERROR("Failed to register hw i2c, err: %d\n", res); 749 749 ··· 752 752 753 753 void smu_v11_0_i2c_control_fini(struct amdgpu_device *adev) 754 754 { 755 - struct i2c_adapter *control = adev->pm.ras_eeprom_i2c_bus; 756 - 757 - i2c_del_adapter(control); 758 755 adev->pm.ras_eeprom_i2c_bus = NULL; 759 756 adev->pm.fru_eeprom_i2c_bus = NULL; 760 757 }
+3 -3
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
··· 193 193 adev->vcn.inst[0].pause_dpg_mode = vcn_v1_0_pause_dpg_mode; 194 194 195 195 if (amdgpu_vcnfw_log) { 196 - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; 196 + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; 197 197 198 198 fw_shared->present_flag_0 = 0; 199 199 amdgpu_vcn_fwlog_init(adev->vcn.inst); ··· 230 230 231 231 jpeg_v1_0_sw_fini(ip_block); 232 232 233 - r = amdgpu_vcn_sw_fini(adev, 0); 233 + amdgpu_vcn_sw_fini(adev, 0); 234 234 235 235 kfree(adev->vcn.ip_dump); 236 236 237 - return r; 237 + return 0; 238 238 } 239 239 240 240 /**
+7 -7
drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
··· 137 137 struct amdgpu_ring *ring; 138 138 int i, r; 139 139 struct amdgpu_device *adev = ip_block->adev; 140 - volatile struct amdgpu_fw_shared *fw_shared; 140 + struct amdgpu_fw_shared *fw_shared; 141 141 142 142 /* VCN DEC TRAP */ 143 143 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, ··· 252 252 { 253 253 int r, idx; 254 254 struct amdgpu_device *adev = ip_block->adev; 255 - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; 255 + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; 256 256 257 257 if (drm_dev_enter(adev_to_drm(adev), &idx)) { 258 258 fw_shared->present_flag_0 = 0; ··· 267 267 268 268 amdgpu_vcn_sysfs_reset_mask_fini(adev); 269 269 270 - r = amdgpu_vcn_sw_fini(adev, 0); 270 + amdgpu_vcn_sw_fini(adev, 0); 271 271 272 - return r; 272 + return 0; 273 273 } 274 274 275 275 /** ··· 853 853 static int vcn_v2_0_start_dpg_mode(struct amdgpu_vcn_inst *vinst, bool indirect) 854 854 { 855 855 struct amdgpu_device *adev = vinst->adev; 856 - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; 856 + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; 857 857 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; 858 858 uint32_t rb_bufsz, tmp; 859 859 int ret; ··· 1001 1001 static int vcn_v2_0_start(struct amdgpu_vcn_inst *vinst) 1002 1002 { 1003 1003 struct amdgpu_device *adev = vinst->adev; 1004 - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; 1004 + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; 1005 1005 struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; 1006 1006 uint32_t rb_bufsz, tmp; 1007 1007 uint32_t lmi_swap_cntl; ··· 1308 1308 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1309 1309 1310 1310 if (!ret_code) { 1311 - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; 1311 + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared.cpu_addr; 1312 1312 /* pause DPG */ 1313 1313 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; 1314 1314 WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
+6 -8
drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
··· 277 277 struct amdgpu_device *adev = ip_block->adev; 278 278 279 279 for (j = 0; j < adev->vcn.num_vcn_inst; j++) { 280 - volatile struct amdgpu_fw_shared *fw_shared; 280 + struct amdgpu_fw_shared *fw_shared; 281 281 282 282 if (adev->vcn.harvest_config & (1 << j)) 283 283 continue; ··· 420 420 { 421 421 int i, r, idx; 422 422 struct amdgpu_device *adev = ip_block->adev; 423 - volatile struct amdgpu_fw_shared *fw_shared; 423 + struct amdgpu_fw_shared *fw_shared; 424 424 425 425 if (drm_dev_enter(adev_to_drm(adev), &idx)) { 426 426 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { ··· 442 442 r = amdgpu_vcn_suspend(adev, i); 443 443 if (r) 444 444 return r; 445 - r = amdgpu_vcn_sw_fini(adev, i); 446 - if (r) 447 - return r; 445 + amdgpu_vcn_sw_fini(adev, i); 448 446 } 449 447 450 448 return 0; ··· 998 1000 { 999 1001 struct amdgpu_device *adev = vinst->adev; 1000 1002 int inst_idx = vinst->inst; 1001 - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; 1003 + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; 1002 1004 struct amdgpu_ring *ring; 1003 1005 uint32_t rb_bufsz, tmp; 1004 1006 int ret; ··· 1155 1157 { 1156 1158 struct amdgpu_device *adev = vinst->adev; 1157 1159 int i = vinst->inst; 1158 - volatile struct amdgpu_fw_shared *fw_shared = 1160 + struct amdgpu_fw_shared *fw_shared = 1159 1161 adev->vcn.inst[i].fw_shared.cpu_addr; 1160 1162 struct amdgpu_ring *ring; 1161 1163 uint32_t rb_bufsz, tmp; ··· 1667 1669 UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); 1668 1670 1669 1671 if (!ret_code) { 1670 - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; 1672 + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; 1671 1673 1672 1674 /* pause DPG */ 1673 1675 reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
+7 -9
drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
··· 191 191 } 192 192 193 193 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 194 - volatile struct amdgpu_fw_shared *fw_shared; 194 + struct amdgpu_fw_shared *fw_shared; 195 195 196 196 if (adev->vcn.harvest_config & (1 << i)) 197 197 continue; ··· 327 327 328 328 if (drm_dev_enter(adev_to_drm(adev), &idx)) { 329 329 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 330 - volatile struct amdgpu_fw_shared *fw_shared; 330 + struct amdgpu_fw_shared *fw_shared; 331 331 332 332 if (adev->vcn.harvest_config & (1 << i)) 333 333 continue; ··· 349 349 if (r) 350 350 return r; 351 351 352 - r = amdgpu_vcn_sw_fini(adev, i); 353 - if (r) 354 - return r; 352 + amdgpu_vcn_sw_fini(adev, i); 355 353 } 356 354 357 355 return 0; ··· 1029 1031 { 1030 1032 struct amdgpu_device *adev = vinst->adev; 1031 1033 int inst_idx = vinst->inst; 1032 - volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; 1034 + struct amdgpu_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; 1033 1035 struct amdgpu_ring *ring; 1034 1036 uint32_t rb_bufsz, tmp; 1035 1037 int ret; ··· 1194 1196 { 1195 1197 struct amdgpu_device *adev = vinst->adev; 1196 1198 int i = vinst->inst; 1197 - volatile struct amdgpu_fw_shared *fw_shared; 1199 + struct amdgpu_fw_shared *fw_shared; 1198 1200 struct amdgpu_ring *ring; 1199 1201 uint32_t rb_bufsz, tmp; 1200 1202 int j, k, r; ··· 1715 1717 { 1716 1718 struct amdgpu_device *adev = vinst->adev; 1717 1719 int inst_idx = vinst->inst; 1718 - volatile struct amdgpu_fw_shared *fw_shared; 1720 + struct amdgpu_fw_shared *fw_shared; 1719 1721 struct amdgpu_ring *ring; 1720 1722 uint32_t reg_data = 0; 1721 1723 int ret_code; ··· 1834 1836 static void vcn_v3_0_dec_ring_set_wptr(struct amdgpu_ring *ring) 1835 1837 { 1836 1838 struct amdgpu_device *adev = ring->adev; 1837 - volatile struct amdgpu_fw_shared *fw_shared; 1839 + struct amdgpu_fw_shared *fw_shared; 1838 1840 1839 1841 if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { 1840 1842 /*whenever update RBC_RB_WPTR, we save the wptr in shared rb.wptr and scratch2 */
+10 -13
drivers/gpu/drm/amd/amdgpu/vcn_v4_0.c
··· 148 148 149 149 static int vcn_v4_0_fw_shared_init(struct amdgpu_device *adev, int inst_idx) 150 150 { 151 - volatile struct amdgpu_vcn4_fw_shared *fw_shared; 151 + struct amdgpu_vcn4_fw_shared *fw_shared; 152 152 153 153 fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; 154 154 fw_shared->present_flag_0 = cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); ··· 278 278 279 279 if (drm_dev_enter(adev_to_drm(adev), &idx)) { 280 280 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 281 - volatile struct amdgpu_vcn4_fw_shared *fw_shared; 281 + struct amdgpu_vcn4_fw_shared *fw_shared; 282 282 283 283 if (adev->vcn.harvest_config & (1 << i)) 284 284 continue; ··· 302 302 303 303 amdgpu_vcn_sysfs_reset_mask_fini(adev); 304 304 305 - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 306 - r = amdgpu_vcn_sw_fini(adev, i); 307 - if (r) 308 - return r; 309 - } 305 + for (i = 0; i < adev->vcn.num_vcn_inst; i++) 306 + amdgpu_vcn_sw_fini(adev, i); 310 307 311 308 return 0; 312 309 } ··· 997 1000 { 998 1001 struct amdgpu_device *adev = vinst->adev; 999 1002 int inst_idx = vinst->inst; 1000 - volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; 1003 + struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; 1001 1004 struct amdgpu_ring *ring; 1002 1005 uint32_t tmp; 1003 1006 int ret; ··· 1137 1140 { 1138 1141 struct amdgpu_device *adev = vinst->adev; 1139 1142 int i = vinst->inst; 1140 - volatile struct amdgpu_vcn4_fw_shared *fw_shared; 1143 + struct amdgpu_vcn4_fw_shared *fw_shared; 1141 1144 struct amdgpu_ring *ring; 1142 1145 uint32_t tmp; 1143 1146 int j, k, r; ··· 1354 1357 struct mmsch_v4_0_cmd_end end = { {0} }; 1355 1358 struct mmsch_v4_0_init_header header; 1356 1359 1357 - volatile struct amdgpu_vcn4_fw_shared *fw_shared; 1358 - volatile struct amdgpu_fw_shared_rb_setup *rb_setup; 1360 + struct amdgpu_vcn4_fw_shared *fw_shared; 1361 + struct amdgpu_fw_shared_rb_setup *rb_setup; 1359 1362 1360 1363 direct_wt.cmd_header.command_type = 1361 1364 MMSCH_COMMAND__DIRECT_REG_WRITE; ··· 1606 1609 { 1607 1610 struct amdgpu_device *adev = vinst->adev; 1608 1611 int i = vinst->inst; 1609 - volatile struct amdgpu_vcn4_fw_shared *fw_shared; 1612 + struct amdgpu_vcn4_fw_shared *fw_shared; 1610 1613 uint32_t tmp; 1611 1614 int r = 0; 1612 1615 ··· 1977 1980 .type = AMDGPU_RING_TYPE_VCN_ENC, 1978 1981 .align_mask = 0x3f, 1979 1982 .nop = VCN_ENC_CMD_NO_OP, 1980 - .extra_dw = sizeof(struct amdgpu_vcn_rb_metadata), 1983 + .extra_bytes = sizeof(struct amdgpu_vcn_rb_metadata), 1981 1984 .get_rptr = vcn_v4_0_unified_ring_get_rptr, 1982 1985 .get_wptr = vcn_v4_0_unified_ring_get_wptr, 1983 1986 .set_wptr = vcn_v4_0_unified_ring_set_wptr,
+13 -12
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_3.c
··· 212 212 213 213 ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id); 214 214 sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id); 215 - r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0, 215 + 216 + /* There are no per-instance irq source IDs on 4.0.3, the IH 217 + * packets use a separate field to differentiate instances. 218 + */ 219 + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[0].irq, 0, 216 220 AMDGPU_RING_PRIO_DEFAULT, 217 221 &adev->vcn.inst[i].sched_score); 218 222 if (r) ··· 263 259 264 260 if (drm_dev_enter(&adev->ddev, &idx)) { 265 261 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 266 - volatile struct amdgpu_vcn4_fw_shared *fw_shared; 262 + struct amdgpu_vcn4_fw_shared *fw_shared; 267 263 268 264 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; 269 265 fw_shared->present_flag_0 = 0; ··· 283 279 284 280 amdgpu_vcn_sysfs_reset_mask_fini(adev); 285 281 286 - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 287 - r = amdgpu_vcn_sw_fini(adev, i); 288 - if (r) 289 - return r; 290 - } 282 + for (i = 0; i < adev->vcn.num_vcn_inst; i++) 283 + amdgpu_vcn_sw_fini(adev, i); 291 284 292 285 return 0; 293 286 } ··· 845 844 { 846 845 struct amdgpu_device *adev = vinst->adev; 847 846 int inst_idx = vinst->inst; 848 - volatile struct amdgpu_vcn4_fw_shared *fw_shared = 847 + struct amdgpu_vcn4_fw_shared *fw_shared = 849 848 adev->vcn.inst[inst_idx].fw_shared.cpu_addr; 850 849 struct amdgpu_ring *ring; 851 850 int vcn_inst, ret; ··· 1012 1011 struct mmsch_v4_0_cmd_end end = { {0} }; 1013 1012 struct mmsch_v4_0_3_init_header header; 1014 1013 1015 - volatile struct amdgpu_vcn4_fw_shared *fw_shared; 1016 - volatile struct amdgpu_fw_shared_rb_setup *rb_setup; 1014 + struct amdgpu_vcn4_fw_shared *fw_shared; 1015 + struct amdgpu_fw_shared_rb_setup *rb_setup; 1017 1016 1018 1017 direct_wt.cmd_header.command_type = 1019 1018 MMSCH_COMMAND__DIRECT_REG_WRITE; ··· 1187 1186 { 1188 1187 struct amdgpu_device *adev = vinst->adev; 1189 1188 int i = vinst->inst; 1190 - volatile struct amdgpu_vcn4_fw_shared *fw_shared; 1189 + struct amdgpu_vcn4_fw_shared *fw_shared; 1191 1190 struct amdgpu_ring *ring; 1192 1191 int j, k, r, vcn_inst; 1193 1192 uint32_t tmp; ··· 1397 1396 { 1398 1397 struct amdgpu_device *adev = vinst->adev; 1399 1398 int i = vinst->inst; 1400 - volatile struct amdgpu_vcn4_fw_shared *fw_shared; 1399 + struct amdgpu_vcn4_fw_shared *fw_shared; 1401 1400 int r = 0, vcn_inst; 1402 1401 uint32_t tmp; 1403 1402
+6 -8
drivers/gpu/drm/amd/amdgpu/vcn_v4_0_5.c
··· 149 149 int i, r; 150 150 151 151 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 152 - volatile struct amdgpu_vcn4_fw_shared *fw_shared; 152 + struct amdgpu_vcn4_fw_shared *fw_shared; 153 153 154 154 if (adev->vcn.harvest_config & (1 << i)) 155 155 continue; ··· 249 249 250 250 if (drm_dev_enter(adev_to_drm(adev), &idx)) { 251 251 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 252 - volatile struct amdgpu_vcn4_fw_shared *fw_shared; 252 + struct amdgpu_vcn4_fw_shared *fw_shared; 253 253 254 254 if (adev->vcn.harvest_config & (1 << i)) 255 255 continue; ··· 270 270 if (r) 271 271 return r; 272 272 273 - r = amdgpu_vcn_sw_fini(adev, i); 274 - if (r) 275 - return r; 273 + amdgpu_vcn_sw_fini(adev, i); 276 274 } 277 275 278 276 return 0; ··· 910 912 { 911 913 struct amdgpu_device *adev = vinst->adev; 912 914 int inst_idx = vinst->inst; 913 - volatile struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; 915 + struct amdgpu_vcn4_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; 914 916 struct amdgpu_ring *ring; 915 917 uint32_t tmp; 916 918 int ret; ··· 1047 1049 { 1048 1050 struct amdgpu_device *adev = vinst->adev; 1049 1051 int i = vinst->inst; 1050 - volatile struct amdgpu_vcn4_fw_shared *fw_shared; 1052 + struct amdgpu_vcn4_fw_shared *fw_shared; 1051 1053 struct amdgpu_ring *ring; 1052 1054 uint32_t tmp; 1053 1055 int j, k, r; ··· 1266 1268 { 1267 1269 struct amdgpu_device *adev = vinst->adev; 1268 1270 int i = vinst->inst; 1269 - volatile struct amdgpu_vcn4_fw_shared *fw_shared; 1271 + struct amdgpu_vcn4_fw_shared *fw_shared; 1270 1272 uint32_t tmp; 1271 1273 int r = 0; 1272 1274
+7 -10
drivers/gpu/drm/amd/amdgpu/vcn_v5_0_0.c
··· 129 129 int i, r; 130 130 131 131 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 132 - volatile struct amdgpu_vcn5_fw_shared *fw_shared; 132 + struct amdgpu_vcn5_fw_shared *fw_shared; 133 133 134 134 if (adev->vcn.harvest_config & (1 << i)) 135 135 continue; ··· 211 211 212 212 if (drm_dev_enter(adev_to_drm(adev), &idx)) { 213 213 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 214 - volatile struct amdgpu_vcn5_fw_shared *fw_shared; 214 + struct amdgpu_vcn5_fw_shared *fw_shared; 215 215 216 216 if (adev->vcn.harvest_config & (1 << i)) 217 217 continue; ··· 232 232 233 233 amdgpu_vcn_sysfs_reset_mask_fini(adev); 234 234 235 - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 236 - r = amdgpu_vcn_sw_fini(adev, i); 237 - if (r) 238 - return r; 239 - } 235 + for (i = 0; i < adev->vcn.num_vcn_inst; i++) 236 + amdgpu_vcn_sw_fini(adev, i); 240 237 241 238 return 0; 242 239 } ··· 692 695 { 693 696 struct amdgpu_device *adev = vinst->adev; 694 697 int inst_idx = vinst->inst; 695 - volatile struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; 698 + struct amdgpu_vcn5_fw_shared *fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; 696 699 struct amdgpu_ring *ring; 697 700 uint32_t tmp; 698 701 int ret; ··· 802 805 { 803 806 struct amdgpu_device *adev = vinst->adev; 804 807 int i = vinst->inst; 805 - volatile struct amdgpu_vcn5_fw_shared *fw_shared; 808 + struct amdgpu_vcn5_fw_shared *fw_shared; 806 809 struct amdgpu_ring *ring; 807 810 uint32_t tmp; 808 811 int j, k, r; ··· 995 998 { 996 999 struct amdgpu_device *adev = vinst->adev; 997 1000 int i = vinst->inst; 998 - volatile struct amdgpu_vcn5_fw_shared *fw_shared; 1001 + struct amdgpu_vcn5_fw_shared *fw_shared; 999 1002 uint32_t tmp; 1000 1003 int r = 0; 1001 1004
+9 -12
drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c
··· 226 226 227 227 if (drm_dev_enter(adev_to_drm(adev), &idx)) { 228 228 for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 229 - volatile struct amdgpu_vcn5_fw_shared *fw_shared; 229 + struct amdgpu_vcn5_fw_shared *fw_shared; 230 230 231 231 fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; 232 232 fw_shared->present_flag_0 = 0; ··· 245 245 return r; 246 246 } 247 247 248 - for (i = 0; i < adev->vcn.num_vcn_inst; i++) { 249 - r = amdgpu_vcn_sw_fini(adev, i); 250 - if (r) 251 - return r; 252 - } 253 - 254 248 amdgpu_vcn_sysfs_reset_mask_fini(adev); 249 + 250 + for (i = 0; i < adev->vcn.num_vcn_inst; i++) 251 + amdgpu_vcn_sw_fini(adev, i); 255 252 256 253 return 0; 257 254 } ··· 640 643 { 641 644 struct amdgpu_device *adev = vinst->adev; 642 645 int inst_idx = vinst->inst; 643 - volatile struct amdgpu_vcn5_fw_shared *fw_shared = 646 + struct amdgpu_vcn5_fw_shared *fw_shared = 644 647 adev->vcn.inst[inst_idx].fw_shared.cpu_addr; 645 648 struct amdgpu_ring *ring; 646 649 struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__PAUSE}; ··· 776 779 struct mmsch_v5_0_cmd_end end = { {0} }; 777 780 struct mmsch_v5_0_init_header header; 778 781 779 - volatile struct amdgpu_vcn5_fw_shared *fw_shared; 780 - volatile struct amdgpu_fw_shared_rb_setup *rb_setup; 782 + struct amdgpu_vcn5_fw_shared *fw_shared; 783 + struct amdgpu_fw_shared_rb_setup *rb_setup; 781 784 782 785 direct_wt.cmd_header.command_type = 783 786 MMSCH_COMMAND__DIRECT_REG_WRITE; ··· 951 954 { 952 955 struct amdgpu_device *adev = vinst->adev; 953 956 int i = vinst->inst; 954 - volatile struct amdgpu_vcn5_fw_shared *fw_shared; 957 + struct amdgpu_vcn5_fw_shared *fw_shared; 955 958 struct amdgpu_ring *ring; 956 959 uint32_t tmp; 957 960 int j, k, r, vcn_inst; ··· 1143 1146 { 1144 1147 struct amdgpu_device *adev = vinst->adev; 1145 1148 int i = vinst->inst; 1146 - volatile struct amdgpu_vcn5_fw_shared *fw_shared; 1149 + struct amdgpu_vcn5_fw_shared *fw_shared; 1147 1150 uint32_t tmp; 1148 1151 int r = 0, vcn_inst; 1149 1152
-7
drivers/gpu/drm/amd/amdgpu/vi.c
··· 67 67 #include "sdma_v2_4.h" 68 68 #include "sdma_v3_0.h" 69 69 #include "dce_v10_0.h" 70 - #include "dce_v11_0.h" 71 70 #include "iceland_ih.h" 72 71 #include "tonga_ih.h" 73 72 #include "cz_ih.h" ··· 2123 2124 else if (amdgpu_device_has_dc_support(adev)) 2124 2125 amdgpu_device_ip_block_add(adev, &dm_ip_block); 2125 2126 #endif 2126 - else 2127 - amdgpu_device_ip_block_add(adev, &dce_v11_2_ip_block); 2128 2127 amdgpu_device_ip_block_add(adev, &uvd_v6_3_ip_block); 2129 2128 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); 2130 2129 break; ··· 2139 2142 else if (amdgpu_device_has_dc_support(adev)) 2140 2143 amdgpu_device_ip_block_add(adev, &dm_ip_block); 2141 2144 #endif 2142 - else 2143 - amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); 2144 2145 amdgpu_device_ip_block_add(adev, &uvd_v6_0_ip_block); 2145 2146 amdgpu_device_ip_block_add(adev, &vce_v3_1_ip_block); 2146 2147 #if defined(CONFIG_DRM_AMD_ACP) ··· 2158 2163 else if (amdgpu_device_has_dc_support(adev)) 2159 2164 amdgpu_device_ip_block_add(adev, &dm_ip_block); 2160 2165 #endif 2161 - else 2162 - amdgpu_device_ip_block_add(adev, &dce_v11_0_ip_block); 2163 2166 amdgpu_device_ip_block_add(adev, &uvd_v6_2_ip_block); 2164 2167 amdgpu_device_ip_block_add(adev, &vce_v3_4_ip_block); 2165 2168 #if defined(CONFIG_DRM_AMD_ACP)
+3 -9
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
··· 521 521 cu_mask_size = sizeof(uint32_t) * (max_num_cus/32); 522 522 } 523 523 524 - minfo.cu_mask.ptr = kzalloc(cu_mask_size, GFP_KERNEL); 525 - if (!minfo.cu_mask.ptr) 526 - return -ENOMEM; 527 - 528 - retval = copy_from_user(minfo.cu_mask.ptr, cu_mask_ptr, cu_mask_size); 529 - if (retval) { 524 + minfo.cu_mask.ptr = memdup_user(cu_mask_ptr, cu_mask_size); 525 + if (IS_ERR(minfo.cu_mask.ptr)) { 530 526 pr_debug("Could not copy CU mask from userspace"); 531 - retval = -EFAULT; 532 - goto out; 527 + return PTR_ERR(minfo.cu_mask.ptr); 533 528 } 534 529 535 530 mutex_lock(&p->mutex); ··· 533 538 534 539 mutex_unlock(&p->mutex); 535 540 536 - out: 537 541 kfree(minfo.cu_mask.ptr); 538 542 return retval; 539 543 }
+36
drivers/gpu/drm/amd/amdkfd/kfd_device.c
··· 1550 1550 return ret; 1551 1551 } 1552 1552 1553 + int kgd2kfd_start_sched_all_nodes(struct kfd_dev *kfd) 1554 + { 1555 + struct kfd_node *node; 1556 + int i, r; 1557 + 1558 + if (!kfd->init_complete) 1559 + return 0; 1560 + 1561 + for (i = 0; i < kfd->num_nodes; i++) { 1562 + node = kfd->nodes[i]; 1563 + r = node->dqm->ops.unhalt(node->dqm); 1564 + if (r) { 1565 + dev_err(kfd_device, "Error in starting scheduler\n"); 1566 + return r; 1567 + } 1568 + } 1569 + return 0; 1570 + } 1571 + 1553 1572 int kgd2kfd_stop_sched(struct kfd_dev *kfd, uint32_t node_id) 1554 1573 { 1555 1574 struct kfd_node *node; ··· 1584 1565 1585 1566 node = kfd->nodes[node_id]; 1586 1567 return node->dqm->ops.halt(node->dqm); 1568 + } 1569 + 1570 + int kgd2kfd_stop_sched_all_nodes(struct kfd_dev *kfd) 1571 + { 1572 + struct kfd_node *node; 1573 + int i, r; 1574 + 1575 + if (!kfd->init_complete) 1576 + return 0; 1577 + 1578 + for (i = 0; i < kfd->num_nodes; i++) { 1579 + node = kfd->nodes[i]; 1580 + r = node->dqm->ops.halt(node->dqm); 1581 + if (r) 1582 + return r; 1583 + } 1584 + return 0; 1587 1585 } 1588 1586 1589 1587 bool kgd2kfd_compute_active(struct kfd_dev *kfd, uint32_t node_id)
+64 -63
drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
··· 39 39 #endif 40 40 #define dev_fmt(fmt) "kfd_migrate: " fmt 41 41 42 - static uint64_t 43 - svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr) 42 + static u64 43 + svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, u64 addr) 44 44 { 45 45 return addr + amdgpu_ttm_domain_start(adev, TTM_PL_VRAM); 46 46 } 47 47 48 48 static int 49 - svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, 50 - dma_addr_t *addr, uint64_t *gart_addr, uint64_t flags) 49 + svm_migrate_gart_map(struct amdgpu_ring *ring, u64 npages, 50 + dma_addr_t *addr, u64 *gart_addr, u64 flags) 51 51 { 52 52 struct amdgpu_device *adev = ring->adev; 53 53 struct amdgpu_job *job; 54 54 unsigned int num_dw, num_bytes; 55 55 struct dma_fence *fence; 56 - uint64_t src_addr, dst_addr; 57 - uint64_t pte_flags; 56 + u64 src_addr, dst_addr; 57 + u64 pte_flags; 58 58 void *cpu_addr; 59 59 int r; 60 60 ··· 123 123 124 124 static int 125 125 svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys, 126 - uint64_t *vram, uint64_t npages, 126 + u64 *vram, u64 npages, 127 127 enum MIGRATION_COPY_DIR direction, 128 128 struct dma_fence **mfence) 129 129 { 130 - const uint64_t GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE; 130 + const u64 GTT_MAX_PAGES = AMDGPU_GTT_MAX_TRANSFER_SIZE; 131 131 struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; 132 - uint64_t gart_s, gart_d; 132 + u64 gart_s, gart_d; 133 133 struct dma_fence *next; 134 - uint64_t size; 134 + u64 size; 135 135 int r; 136 136 137 137 mutex_lock(&adev->mman.gtt_window_lock); ··· 261 261 put_page(page); 262 262 } 263 263 264 - static long 264 + static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate) 265 + { 266 + unsigned long mpages = 0; 267 + unsigned long i; 268 + 269 + for (i = 0; i < migrate->npages; i++) { 270 + if (migrate->dst[i] & MIGRATE_PFN_VALID && 271 + migrate->src[i] & MIGRATE_PFN_MIGRATE) 272 + mpages++; 273 + } 274 + return mpages; 275 + } 276 + 277 + static int 265 278 svm_migrate_copy_to_vram(struct kfd_node *node, struct svm_range *prange, 266 279 struct migrate_vma *migrate, struct dma_fence **mfence, 267 - dma_addr_t *scratch, uint64_t ttm_res_offset) 280 + dma_addr_t *scratch, u64 ttm_res_offset) 268 281 { 269 - uint64_t npages = migrate->npages; 282 + u64 npages = migrate->npages; 270 283 struct amdgpu_device *adev = node->adev; 271 284 struct device *dev = adev->dev; 272 285 struct amdgpu_res_cursor cursor; 273 - long mpages; 286 + u64 mpages = 0; 274 287 dma_addr_t *src; 275 - uint64_t *dst; 276 - uint64_t i, j; 288 + u64 *dst; 289 + u64 i, j; 277 290 int r; 278 291 279 292 pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start, 280 293 prange->last, ttm_res_offset); 281 294 282 295 src = scratch; 283 - dst = (uint64_t *)(scratch + npages); 296 + dst = (u64 *)(scratch + npages); 284 297 285 298 amdgpu_res_first(prange->ttm_res, ttm_res_offset, 286 299 npages << PAGE_SHIFT, &cursor); 287 - mpages = 0; 288 300 for (i = j = 0; (i < npages) && (mpages < migrate->cpages); i++) { 289 301 struct page *spage; 290 302 ··· 357 345 out_free_vram_pages: 358 346 if (r) { 359 347 pr_debug("failed %d to copy memory to vram\n", r); 360 - while (i-- && mpages) { 348 + for (i = 0; i < npages && mpages; i++) { 361 349 if (!dst[i]) 362 350 continue; 363 351 svm_migrate_put_vram_page(adev, dst[i]); 364 352 migrate->dst[i] = 0; 365 353 mpages--; 366 354 } 367 - mpages = r; 368 355 } 369 356 370 357 #ifdef DEBUG_FORCE_MIXED_DOMAINS ··· 381 370 } 382 371 #endif 383 372 384 - return mpages; 373 + return r; 385 374 } 386 375 387 376 static long 388 377 svm_migrate_vma_to_vram(struct kfd_node *node, struct svm_range *prange, 389 - struct vm_area_struct *vma, uint64_t start, 390 - uint64_t end, uint32_t trigger, uint64_t ttm_res_offset) 378 + struct vm_area_struct *vma, u64 start, 379 + u64 end, uint32_t trigger, u64 ttm_res_offset) 391 380 { 392 381 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); 393 - uint64_t npages = (end - start) >> PAGE_SHIFT; 382 + u64 npages = (end - start) >> PAGE_SHIFT; 394 383 struct amdgpu_device *adev = node->adev; 395 384 struct kfd_process_device *pdd; 396 385 struct dma_fence *mfence = NULL; 397 386 struct migrate_vma migrate = { 0 }; 398 387 unsigned long cpages = 0; 399 - long mpages = 0; 388 + unsigned long mpages = 0; 400 389 dma_addr_t *scratch; 401 390 void *buf; 402 391 int r = -ENOMEM; ··· 409 398 migrate.pgmap_owner = SVM_ADEV_PGMAP_OWNER(adev); 410 399 411 400 buf = kvcalloc(npages, 412 - 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t), 401 + 2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t), 413 402 GFP_KERNEL); 414 403 if (!buf) 415 404 goto out; ··· 442 431 else 443 432 pr_debug("0x%lx pages collected\n", cpages); 444 433 445 - mpages = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset); 434 + r = svm_migrate_copy_to_vram(node, prange, &migrate, &mfence, scratch, ttm_res_offset); 446 435 migrate_vma_pages(&migrate); 447 436 448 437 svm_migrate_copy_done(adev, mfence); 449 438 migrate_vma_finalize(&migrate); 450 439 451 - if (mpages >= 0) 452 - pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n", 453 - mpages, cpages, migrate.npages); 454 - else 455 - r = mpages; 440 + mpages = svm_migrate_successful_pages(&migrate); 441 + pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n", 442 + mpages, cpages, migrate.npages); 456 443 457 444 svm_range_dma_unmap_dev(adev->dev, scratch, 0, npages); 458 445 ··· 460 451 start >> PAGE_SHIFT, end >> PAGE_SHIFT, 461 452 0, node->id, trigger, r); 462 453 out: 463 - if (!r && mpages > 0) { 454 + if (!r && mpages) { 464 455 pdd = svm_range_get_pdd_by_node(prange, node); 465 456 if (pdd) 466 457 WRITE_ONCE(pdd->page_in, pdd->page_in + mpages); 467 - } 468 458 469 - return r ? r : mpages; 459 + return mpages; 460 + } 461 + return r; 470 462 } 471 463 472 464 /** ··· 491 481 { 492 482 unsigned long addr, start, end; 493 483 struct vm_area_struct *vma; 494 - uint64_t ttm_res_offset; 484 + u64 ttm_res_offset; 495 485 struct kfd_node *node; 496 486 unsigned long mpages = 0; 497 487 long r = 0; ··· 578 568 } 579 569 } 580 570 581 - static long 571 + static int 582 572 svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, 583 573 struct migrate_vma *migrate, struct dma_fence **mfence, 584 - dma_addr_t *scratch, uint64_t npages) 574 + dma_addr_t *scratch, u64 npages) 585 575 { 586 576 struct device *dev = adev->dev; 587 - uint64_t *src; 577 + u64 *src; 588 578 dma_addr_t *dst; 589 579 struct page *dpage; 590 - long mpages; 591 - uint64_t i = 0, j; 592 - uint64_t addr; 580 + u64 i = 0, j; 581 + u64 addr; 593 582 int r = 0; 594 583 595 584 pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start, ··· 596 587 597 588 addr = migrate->start; 598 589 599 - src = (uint64_t *)(scratch + npages); 590 + src = (u64 *)(scratch + npages); 600 591 dst = scratch; 601 592 602 - mpages = 0; 603 593 for (i = 0, j = 0; i < npages; i++, addr += PAGE_SIZE) { 604 594 struct page *spage; 605 595 ··· 647 639 dst[i] >> PAGE_SHIFT, page_to_pfn(dpage)); 648 640 649 641 migrate->dst[i] = migrate_pfn(page_to_pfn(dpage)); 650 - mpages++; 651 642 j++; 652 643 } 653 644 ··· 656 649 out_oom: 657 650 if (r) { 658 651 pr_debug("failed %d copy to ram\n", r); 659 - while (i-- && mpages) { 660 - if (!migrate->dst[i]) 661 - continue; 652 + while (i--) { 662 653 svm_migrate_put_sys_page(dst[i]); 663 654 migrate->dst[i] = 0; 664 - mpages--; 665 655 } 666 - mpages = r; 667 656 } 668 657 669 - return mpages; 658 + return r; 670 659 } 671 660 672 661 /** ··· 684 681 */ 685 682 static long 686 683 svm_migrate_vma_to_ram(struct kfd_node *node, struct svm_range *prange, 687 - struct vm_area_struct *vma, uint64_t start, uint64_t end, 684 + struct vm_area_struct *vma, u64 start, u64 end, 688 685 uint32_t trigger, struct page *fault_page) 689 686 { 690 687 struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); 691 - uint64_t npages = (end - start) >> PAGE_SHIFT; 688 + u64 npages = (end - start) >> PAGE_SHIFT; 692 689 unsigned long cpages = 0; 693 - long mpages = 0; 690 + unsigned long mpages = 0; 694 691 struct amdgpu_device *adev = node->adev; 695 692 struct kfd_process_device *pdd; 696 693 struct dma_fence *mfence = NULL; ··· 710 707 migrate.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE; 711 708 712 709 buf = kvcalloc(npages, 713 - 2 * sizeof(*migrate.src) + sizeof(uint64_t) + sizeof(dma_addr_t), 710 + 2 * sizeof(*migrate.src) + sizeof(u64) + sizeof(dma_addr_t), 714 711 GFP_KERNEL); 715 712 if (!buf) 716 713 goto out; ··· 744 741 else 745 742 pr_debug("0x%lx pages collected\n", cpages); 746 743 747 - mpages = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence, 744 + r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence, 748 745 scratch, npages); 749 746 migrate_vma_pages(&migrate); 750 747 751 - if (mpages >= 0) 752 - pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n", 748 + mpages = svm_migrate_successful_pages(&migrate); 749 + pr_debug("migrated/collected/requested 0x%lx/0x%lx/0x%lx\n", 753 750 mpages, cpages, migrate.npages); 754 - else 755 - r = mpages; 756 751 757 752 svm_migrate_copy_done(adev, mfence); 758 753 migrate_vma_finalize(&migrate); ··· 763 762 start >> PAGE_SHIFT, end >> PAGE_SHIFT, 764 763 node->id, 0, trigger, r); 765 764 out: 766 - if (!r && mpages > 0) { 765 + if (!r && mpages) { 767 766 pdd = svm_range_get_pdd_by_node(prange, node); 768 767 if (pdd) 769 768 WRITE_ONCE(pdd->page_out, pdd->page_out + mpages); ··· 847 846 848 847 if (r >= 0) { 849 848 WARN_ONCE(prange->vram_pages < mpages, 850 - "Recorded vram pages(0x%llx) should not be less than migration pages(0x%lx).", 851 - prange->vram_pages, mpages); 849 + "Recorded vram pages(0x%llx) should not be less than migration pages(0x%lx).", 850 + prange->vram_pages, mpages); 852 851 prange->vram_pages -= mpages; 853 852 854 853 /* prange does not have vram page set its actual_loc to system
+102 -40
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 2081 2081 2082 2082 dc_hardware_init(adev->dm.dc); 2083 2083 2084 + adev->dm.restore_backlight = true; 2085 + 2084 2086 adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev); 2085 2087 if (!adev->dm.hpd_rx_offload_wq) { 2086 2088 drm_err(adev_to_drm(adev), "failed to create hpd rx offload workqueue.\n"); ··· 2947 2945 return -ENOMEM; 2948 2946 } 2949 2947 2950 - r = i2c_add_adapter(&oem_i2c->base); 2948 + r = devm_i2c_add_adapter(adev->dev, &oem_i2c->base); 2951 2949 if (r) { 2952 2950 drm_info(adev_to_drm(adev), "Failed to register oem i2c\n"); 2953 2951 kfree(oem_i2c); ··· 2957 2955 } 2958 2956 2959 2957 return 0; 2960 - } 2961 - 2962 - static void dm_oem_i2c_hw_fini(struct amdgpu_device *adev) 2963 - { 2964 - struct amdgpu_display_manager *dm = &adev->dm; 2965 - 2966 - if (dm->oem_i2c) { 2967 - i2c_del_adapter(&dm->oem_i2c->base); 2968 - kfree(dm->oem_i2c); 2969 - dm->oem_i2c = NULL; 2970 - } 2971 2958 } 2972 2959 2973 2960 /** ··· 3009 3018 { 3010 3019 struct amdgpu_device *adev = ip_block->adev; 3011 3020 3012 - dm_oem_i2c_hw_fini(adev); 3013 - 3014 3021 amdgpu_dm_hpd_fini(adev); 3015 3022 3016 3023 amdgpu_dm_irq_fini(adev); ··· 3036 3047 drm_warn(adev_to_drm(adev), "Failed to %s pflip interrupts\n", 3037 3048 enable ? "enable" : "disable"); 3038 3049 3039 - if (enable) { 3040 - if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state))) 3041 - rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true); 3042 - } else 3043 - rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false); 3050 + if (dc_supports_vrr(adev->dm.dc->ctx->dce_version)) { 3051 + if (enable) { 3052 + if (amdgpu_dm_crtc_vrr_active( 3053 + to_dm_crtc_state(acrtc->base.state))) 3054 + rc = amdgpu_dm_crtc_set_vupdate_irq( 3055 + &acrtc->base, true); 3056 + } else 3057 + rc = amdgpu_dm_crtc_set_vupdate_irq( 3058 + &acrtc->base, false); 3044 3059 3045 - if (rc) 3046 - drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n", enable ? "en" : "dis"); 3060 + if (rc) 3061 + drm_warn(adev_to_drm(adev), "Failed to %sable vupdate interrupt\n", 3062 + enable ? "en" : "dis"); 3063 + } 3047 3064 3048 3065 irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; 3049 3066 /* During gpu-reset we disable and then enable vblank irq, so ··· 3438 3443 dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); 3439 3444 3440 3445 dc_resume(dm->dc); 3446 + adev->dm.restore_backlight = true; 3441 3447 3442 3448 amdgpu_dm_irq_resume_early(adev); 3443 3449 ··· 6423 6427 && aconnector 6424 6428 && aconnector->force_yuv420_output) 6425 6429 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; 6430 + else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR422) 6431 + && aconnector 6432 + && aconnector->force_yuv422_output) 6433 + timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR422; 6426 6434 else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444) 6427 6435 && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) 6428 6436 timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; ··· 7384 7384 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux); 7385 7385 drm_connector_unregister(connector); 7386 7386 drm_connector_cleanup(connector); 7387 - if (aconnector->i2c) { 7388 - i2c_del_adapter(&aconnector->i2c->base); 7389 - kfree(aconnector->i2c); 7390 - } 7391 7387 kfree(aconnector->dm_dp_aux.aux.name); 7392 7388 7393 7389 kfree(connector); ··· 7683 7687 bpc_limit = 8; 7684 7688 7685 7689 do { 7690 + drm_dbg_kms(connector->dev, "Trying with %d bpc\n", requested_bpc); 7686 7691 stream = create_stream_for_sink(connector, drm_mode, 7687 7692 dm_state, old_stream, 7688 7693 requested_bpc); ··· 7719 7722 7720 7723 } while (stream == NULL && requested_bpc >= bpc_limit); 7721 7724 7722 - if ((dc_result == DC_FAIL_ENC_VALIDATE || 7723 - dc_result == DC_EXCEED_DONGLE_CAP) && 7724 - !aconnector->force_yuv420_output) { 7725 - DRM_DEBUG_KMS("%s:%d Retry forcing yuv420 encoding\n", 7726 - __func__, __LINE__); 7727 - 7728 - aconnector->force_yuv420_output = true; 7725 + switch (dc_result) { 7726 + /* 7727 + * If we failed to validate DP bandwidth stream with the requested RGB color depth, 7728 + * we try to fallback and configure in order: 7729 + * YUV422 (8bpc, 6bpc) 7730 + * YUV420 (8bpc, 6bpc) 7731 + */ 7732 + case DC_FAIL_ENC_VALIDATE: 7733 + case DC_EXCEED_DONGLE_CAP: 7734 + case DC_NO_DP_LINK_BANDWIDTH: 7735 + /* recursively entered twice and already tried both YUV422 and YUV420 */ 7736 + if (aconnector->force_yuv422_output && aconnector->force_yuv420_output) 7737 + break; 7738 + /* first failure; try YUV422 */ 7739 + if (!aconnector->force_yuv422_output) { 7740 + drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV422\n", 7741 + __func__, __LINE__, dc_result); 7742 + aconnector->force_yuv422_output = true; 7743 + /* recursively entered and YUV422 failed, try YUV420 */ 7744 + } else if (!aconnector->force_yuv420_output) { 7745 + drm_dbg_kms(connector->dev, "%s:%d Validation failed with %d, retrying w/ YUV420\n", 7746 + __func__, __LINE__, dc_result); 7747 + aconnector->force_yuv420_output = true; 7748 + } 7729 7749 stream = create_validate_stream_for_sink(connector, drm_mode, 7730 - dm_state, old_stream); 7750 + dm_state, old_stream); 7751 + aconnector->force_yuv422_output = false; 7731 7752 aconnector->force_yuv420_output = false; 7753 + break; 7754 + case DC_OK: 7755 + break; 7756 + default: 7757 + drm_dbg_kms(connector->dev, "%s:%d Unhandled validation failure %d\n", 7758 + __func__, __LINE__, dc_result); 7759 + break; 7732 7760 } 7733 7761 7734 7762 return stream; ··· 8741 8719 } 8742 8720 8743 8721 aconnector->i2c = i2c; 8744 - res = i2c_add_adapter(&i2c->base); 8722 + res = devm_i2c_add_adapter(dm->adev->dev, &i2c->base); 8745 8723 8746 8724 if (res) { 8747 8725 drm_err(adev_to_drm(dm->adev), "Failed to register hw i2c %d\n", link->link_index); ··· 8839 8817 static void manage_dm_interrupts(struct amdgpu_device *adev, 8840 8818 struct amdgpu_crtc *acrtc, 8841 8819 struct dm_crtc_state *acrtc_state) 8842 - { 8820 + { /* 8821 + * We cannot be sure that the frontend index maps to the same 8822 + * backend index - some even map to more than one. 8823 + * So we have to go through the CRTC to find the right IRQ. 8824 + */ 8825 + int irq_type = amdgpu_display_crtc_idx_to_irq_type( 8826 + adev, 8827 + acrtc->crtc_id); 8828 + struct drm_device *dev = adev_to_drm(adev); 8829 + 8843 8830 struct drm_vblank_crtc_config config = {0}; 8844 8831 struct dc_crtc_timing *timing; 8845 8832 int offdelay; ··· 8901 8870 8902 8871 drm_crtc_vblank_on_config(&acrtc->base, 8903 8872 &config); 8873 + /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_get.*/ 8874 + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 8875 + case IP_VERSION(3, 0, 0): 8876 + case IP_VERSION(3, 0, 2): 8877 + case IP_VERSION(3, 0, 3): 8878 + case IP_VERSION(3, 2, 0): 8879 + if (amdgpu_irq_get(adev, &adev->pageflip_irq, irq_type)) 8880 + drm_err(dev, "DM_IRQ: Cannot get pageflip irq!\n"); 8881 + #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 8882 + if (amdgpu_irq_get(adev, &adev->vline0_irq, irq_type)) 8883 + drm_err(dev, "DM_IRQ: Cannot get vline0 irq!\n"); 8884 + #endif 8885 + } 8886 + 8904 8887 } else { 8888 + /* Allow RX6xxx, RX7700, RX7800 GPUs to call amdgpu_irq_put.*/ 8889 + switch (amdgpu_ip_version(adev, DCE_HWIP, 0)) { 8890 + case IP_VERSION(3, 0, 0): 8891 + case IP_VERSION(3, 0, 2): 8892 + case IP_VERSION(3, 0, 3): 8893 + case IP_VERSION(3, 2, 0): 8894 + #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) 8895 + if (amdgpu_irq_put(adev, &adev->vline0_irq, irq_type)) 8896 + drm_err(dev, "DM_IRQ: Cannot put vline0 irq!\n"); 8897 + #endif 8898 + if (amdgpu_irq_put(adev, &adev->pageflip_irq, irq_type)) 8899 + drm_err(dev, "DM_IRQ: Cannot put pageflip irq!\n"); 8900 + } 8901 + 8905 8902 drm_crtc_vblank_off(&acrtc->base); 8906 8903 } 8907 8904 } ··· 9951 9892 bool mode_set_reset_required = false; 9952 9893 u32 i; 9953 9894 struct dc_commit_streams_params params = {dc_state->streams, dc_state->stream_count}; 9954 - bool set_backlight_level = false; 9955 9895 9956 9896 /* Disable writeback */ 9957 9897 for_each_old_connector_in_state(state, connector, old_con_state, i) { ··· 10070 10012 acrtc->hw_mode = new_crtc_state->mode; 10071 10013 crtc->hwmode = new_crtc_state->mode; 10072 10014 mode_set_reset_required = true; 10073 - set_backlight_level = true; 10074 10015 } else if (modereset_required(new_crtc_state)) { 10075 10016 drm_dbg_atomic(dev, 10076 10017 "Atomic commit: RESET. crtc id %d:[%p]\n", ··· 10126 10069 * to fix a flicker issue. 10127 10070 * It will cause the dm->actual_brightness is not the current panel brightness 10128 10071 * level. (the dm->brightness is the correct panel level) 10129 - * So we set the backlight level with dm->brightness value after set mode 10072 + * So we set the backlight level with dm->brightness value after initial 10073 + * set mode. Use restore_backlight flag to avoid setting backlight level 10074 + * for every subsequent mode set. 10130 10075 */ 10131 - if (set_backlight_level) { 10076 + if (dm->restore_backlight) { 10132 10077 for (i = 0; i < dm->num_of_edps; i++) { 10133 10078 if (dm->backlight_dev[i]) 10134 10079 amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); 10135 10080 } 10081 + dm->restore_backlight = false; 10136 10082 } 10137 10083 } 10138 10084 ··· 10848 10788 } else { 10849 10789 config.state = VRR_STATE_INACTIVE; 10850 10790 } 10791 + } else { 10792 + config.state = VRR_STATE_UNSUPPORTED; 10851 10793 } 10852 10794 out: 10853 10795 new_crtc_state->freesync_config = config; ··· 12751 12689 12752 12690 dm_con_state = to_dm_connector_state(connector->state); 12753 12691 12754 - if (!adev->dm.freesync_module) 12692 + if (!adev->dm.freesync_module || !dc_supports_vrr(sink->ctx->dce_version)) 12755 12693 goto update; 12756 12694 12757 12695 edid = drm_edid_raw(drm_edid); // FIXME: Get rid of drm_edid_raw()
+8
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
··· 631 631 u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP]; 632 632 633 633 /** 634 + * @restore_backlight: 635 + * 636 + * Flag to indicate whether to restore backlight after modeset. 637 + */ 638 + bool restore_backlight; 639 + 640 + /** 634 641 * @aux_hpd_discon_quirk: 635 642 * 636 643 * quirk for hpd discon while aux is on-going. ··· 806 799 807 800 bool fake_enable; 808 801 bool force_yuv420_output; 802 + bool force_yuv422_output; 809 803 struct dsc_preferred_settings dsc_settings; 810 804 union dp_downstream_port_present mst_downstream_port_present; 811 805 /* Cached display modes */
+10 -6
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
··· 317 317 dc->config.disable_ips != DMUB_IPS_DISABLE_ALL && 318 318 sr_supported && vblank->config.disable_immediate) 319 319 drm_crtc_vblank_restore(crtc); 320 + } 320 321 321 - /* vblank irq on -> Only need vupdate irq in vrr mode */ 322 - if (amdgpu_dm_crtc_vrr_active(acrtc_state)) 323 - rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true); 324 - } else { 325 - /* vblank irq off -> vupdate irq off */ 326 - rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false); 322 + if (dc_supports_vrr(dm->dc->ctx->dce_version)) { 323 + if (enable) { 324 + /* vblank irq on -> Only need vupdate irq in vrr mode */ 325 + if (amdgpu_dm_crtc_vrr_active(acrtc_state)) 326 + rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true); 327 + } else { 328 + /* vblank irq off -> vupdate irq off */ 329 + rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false); 330 + } 327 331 } 328 332 329 333 if (rc)
+2 -2
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
··· 146 146 147 147 if (*cap - *size < 1) { 148 148 uint64_t new_cap = *cap * 2; 149 - uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL); 149 + uint64_t *new_mods = kmalloc_array(new_cap, sizeof(uint64_t), GFP_KERNEL); 150 150 151 151 if (!new_mods) { 152 152 kfree(*mods); ··· 732 732 if (adev->family < AMDGPU_FAMILY_AI) 733 733 return 0; 734 734 735 - *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL); 735 + *mods = kmalloc_array(capacity, sizeof(uint64_t), GFP_KERNEL); 736 736 737 737 if (plane_type == DRM_PLANE_TYPE_CURSOR) { 738 738 amdgpu_dm_plane_add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
+1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
··· 98 98 const struct dm_pp_single_disp_config *dc_cfg = 99 99 &pp_display_cfg->disp_configs[i]; 100 100 adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1; 101 + adev->pm.pm_display_cfg.displays[i].pixel_clock = dc_cfg->pixel_clock; 101 102 } 102 103 103 104 amdgpu_dpm_display_configuration_change(adev, &adev->pm.pm_display_cfg);
+1 -1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c
··· 31 31 #include "amdgpu_dm.h" 32 32 #include "modules/power/power_helpers.h" 33 33 #include "dmub/inc/dmub_cmd.h" 34 - #include "dc/inc/link.h" 34 + #include "dc/inc/link_service.h" 35 35 36 36 /* 37 37 * amdgpu_dm_link_supports_replay() - check if the link supports replay
+1 -1
drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
··· 28 28 #include "dccg.h" 29 29 #include "clk_mgr_internal.h" 30 30 #include "dc_state_priv.h" 31 - #include "link.h" 31 + #include "link_service.h" 32 32 33 33 #include "dce100/dce_clk_mgr.h" 34 34 #include "dce110/dce110_clk_mgr.h"
+1 -1
drivers/gpu/drm/amd/display/dc/clk_mgr/dce110/dce110_clk_mgr.c
··· 164 164 stream->link->cur_link_settings.link_rate; 165 165 cfg->link_settings.link_spread = 166 166 stream->link->cur_link_settings.link_spread; 167 - cfg->sym_clock = stream->phy_pix_clk; 167 + cfg->pixel_clock = stream->phy_pix_clk; 168 168 /* Round v_refresh*/ 169 169 cfg->v_refresh = stream->timing.pix_clk_100hz * 100; 170 170 cfg->v_refresh /= stream->timing.h_total;
+1 -1
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
··· 47 47 #include "dcn30/dcn30_clk_mgr.h" 48 48 49 49 #include "dc_dmub_srv.h" 50 - #include "link.h" 50 + #include "link_service.h" 51 51 52 52 #include "logger_types.h" 53 53
+1 -1
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c
··· 48 48 #include "dcn31/dcn31_clk_mgr.h" 49 49 50 50 #include "dc_dmub_srv.h" 51 - #include "link.h" 51 + #include "link_service.h" 52 52 #include "dcn314_smu.h" 53 53 54 54
+1 -1
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c
··· 46 46 #define DC_LOGGER \ 47 47 clk_mgr->base.base.ctx->logger 48 48 49 - #include "link.h" 49 + #include "link_service.h" 50 50 51 51 #define TO_CLK_MGR_DCN315(clk_mgr)\ 52 52 container_of(clk_mgr, struct clk_mgr_dcn315, base)
+1 -1
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c
··· 39 39 #include "dcn316_smu.h" 40 40 #include "dm_helpers.h" 41 41 #include "dc_dmub_srv.h" 42 - #include "link.h" 42 + #include "link_service.h" 43 43 44 44 // DCN316 this is CLK1 instance 45 45 #define MAX_INSTANCE 7
+1 -1
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c
··· 33 33 #include "reg_helper.h" 34 34 #include "core_types.h" 35 35 #include "dm_helpers.h" 36 - #include "link.h" 36 + #include "link_service.h" 37 37 #include "dc_state_priv.h" 38 38 #include "atomfirmware.h" 39 39 #include "dcn32_smu13_driver_if.h"
+1 -1
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
··· 44 44 #include "dcn31/dcn31_clk_mgr.h" 45 45 46 46 #include "dc_dmub_srv.h" 47 - #include "link.h" 47 + #include "link_service.h" 48 48 #include "logger_types.h" 49 49 50 50 #undef DC_LOGGER
+1 -1
drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
··· 13 13 #include "reg_helper.h" 14 14 #include "core_types.h" 15 15 #include "dm_helpers.h" 16 - #include "link.h" 16 + #include "link_service.h" 17 17 #include "dc_state_priv.h" 18 18 #include "atomfirmware.h" 19 19
+3 -3
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 60 60 #include "link_encoder.h" 61 61 #include "link_enc_cfg.h" 62 62 63 - #include "link.h" 63 + #include "link_service.h" 64 64 #include "dm_helpers.h" 65 65 #include "mem_input.h" 66 66 ··· 5622 5622 subvp_pipe_type[i] = dc_state_get_pipe_subvp_type(context, pipe); 5623 5623 } 5624 5624 } 5625 - 5626 - DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n", 5625 + if (!dc->caps.is_apu) 5626 + DC_LOG_DC("%s: allow_idle=%d\n HardMinUClk_Khz=%d HardMinDramclk_Khz=%d\n Pipe_0=%d Pipe_1=%d Pipe_2=%d Pipe_3=%d Pipe_4=%d Pipe_5=%d (caller=%s)\n", 5627 5627 __func__, allow, idle_fclk_khz, idle_dramclk_khz, subvp_pipe_type[0], subvp_pipe_type[1], subvp_pipe_type[2], 5628 5628 subvp_pipe_type[3], subvp_pipe_type[4], subvp_pipe_type[5], caller_name); 5629 5629
+15 -22
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
··· 40 40 #include "virtual/virtual_stream_encoder.h" 41 41 #include "dpcd_defs.h" 42 42 #include "link_enc_cfg.h" 43 - #include "link.h" 43 + #include "link_service.h" 44 44 #include "clk_mgr.h" 45 45 #include "dc_state_priv.h" 46 46 #include "dc_stream_priv.h" ··· 95 95 #define DC_LOGGER \ 96 96 dc->ctx->logger 97 97 #define DC_LOGGER_INIT(logger) 98 - 99 98 #include "dml2/dml2_wrapper.h" 100 99 101 100 #define UNABLE_TO_SPLIT -1 ··· 2148 2149 h_active = timing->h_addressable + 2149 2150 timing->h_border_left + 2150 2151 timing->h_border_right + 2151 - otg_master->hblank_borrow; 2152 + otg_master->dsc_padding_params.dsc_hactive_padding; 2152 2153 width = h_active / count; 2153 2154 2154 2155 if (otg_master->stream_res.tg) ··· 4266 4267 return res; 4267 4268 } 4268 4269 4270 + #if defined(CONFIG_DRM_AMD_DC_FP) 4271 + #endif /* CONFIG_DRM_AMD_DC_FP */ 4272 + 4269 4273 /** 4270 - * decide_hblank_borrow - Decides the horizontal blanking borrow value for a given pipe context. 4274 + * calculate_timing_params_for_dsc_with_padding - Calculates timing parameters for DSC with padding. 4271 4275 * @pipe_ctx: Pointer to the pipe context structure. 4272 4276 * 4273 - * This function calculates the horizontal blanking borrow value for a given pipe context based on the 4277 + * This function calculates the timing parameters for a given pipe context based on the 4274 4278 * display stream compression (DSC) configuration. If the horizontal active pixels (hactive) are less 4275 - * than the total width of the DSC slices, it sets the hblank_borrow value to the difference. If the 4276 - * total horizontal timing minus the hblank_borrow value is less than 32, it resets the hblank_borrow 4279 + * than the total width of the DSC slices, it sets the dsc_hactive_padding value to the difference. If the 4280 + * total horizontal timing minus the dsc_hactive_padding value is less than 32, it resets the dsc_hactive_padding 4277 4281 * value to 0. 4278 4282 */ 4279 - static void decide_hblank_borrow(struct pipe_ctx *pipe_ctx) 4283 + static void calculate_timing_params_for_dsc_with_padding(struct pipe_ctx *pipe_ctx) 4280 4284 { 4281 - uint32_t hactive; 4282 - uint32_t ceil_slice_width; 4283 4285 struct dc_stream_state *stream = NULL; 4284 4286 4285 4287 if (!pipe_ctx) 4286 4288 return; 4287 4289 4288 4290 stream = pipe_ctx->stream; 4291 + pipe_ctx->dsc_padding_params.dsc_hactive_padding = 0; 4292 + pipe_ctx->dsc_padding_params.dsc_htotal_padding = 0; 4289 4293 4290 - if (stream->timing.flags.DSC) { 4291 - hactive = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; 4294 + if (stream) 4295 + pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz = stream->timing.pix_clk_100hz; 4292 4296 4293 - /* Assume if determined slices does not divide Hactive evenly, Hborrow is needed for padding*/ 4294 - if (hactive % stream->timing.dsc_cfg.num_slices_h != 0) { 4295 - ceil_slice_width = (hactive / stream->timing.dsc_cfg.num_slices_h) + 1; 4296 - pipe_ctx->hblank_borrow = ceil_slice_width * stream->timing.dsc_cfg.num_slices_h - hactive; 4297 - 4298 - if (stream->timing.h_total - hactive - pipe_ctx->hblank_borrow < 32) 4299 - pipe_ctx->hblank_borrow = 0; 4300 - } 4301 - } 4302 4297 } 4303 4298 4304 4299 /** ··· 4335 4342 4336 4343 /* Decide whether hblank borrow is needed and save it in pipe_ctx */ 4337 4344 if (dc->debug.enable_hblank_borrow) 4338 - decide_hblank_borrow(pipe_ctx); 4345 + calculate_timing_params_for_dsc_with_padding(pipe_ctx); 4339 4346 4340 4347 if (dc->res_pool->funcs->patch_unknown_plane_state && 4341 4348 pipe_ctx->plane_state &&
+1 -1
drivers/gpu/drm/amd/display/dc/core/dc_state.c
··· 211 211 return NULL; 212 212 } 213 213 214 - if (!dml2_create(dc, &dc->dml2_dc_power_options, &state->bw_ctx.dml2_dc_power_source)) { 214 + if (dc->caps.dcmode_power_limits_present && !dml2_create(dc, &dc->dml2_dc_power_options, &state->bw_ctx.dml2_dc_power_source)) { 215 215 dc_state_release(state); 216 216 return NULL; 217 217 }
+1 -1
drivers/gpu/drm/amd/display/dc/dc.h
··· 55 55 struct set_config_cmd_payload; 56 56 struct dmub_notification; 57 57 58 - #define DC_VER "3.2.349" 58 + #define DC_VER "3.2.350" 59 59 60 60 /** 61 61 * MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
+5
drivers/gpu/drm/amd/display/dc/dc_helper.c
··· 755 755 return "Unknown"; 756 756 } 757 757 } 758 + 759 + bool dc_supports_vrr(const enum dce_version v) 760 + { 761 + return v >= DCE_VERSION_8_0; 762 + }
+1 -1
drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
··· 128 128 spl_in->odm_slice_index = resource_get_odm_slice_index(pipe_ctx); 129 129 // Make spl input basic out info output_size width point to stream h active 130 130 spl_in->basic_out.output_size.width = 131 - stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->hblank_borrow; 131 + stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->dsc_padding_params.dsc_hactive_padding; 132 132 // Make spl input basic out info output_size height point to v active 133 133 spl_in->basic_out.output_size.height = 134 134 stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top;
+1 -1
drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
··· 3 3 // Copyright 2024 Advanced Micro Devices, Inc. 4 4 5 5 #include "dc.h" 6 - #include "link.h" 6 + #include "link_service.h" 7 7 #include "dc_dmub_srv.h" 8 8 #include "dmub/dmub_srv.h" 9 9 #include "core_types.h"
+1 -1
drivers/gpu/drm/amd/display/dc/dio/dcn10/dcn10_stream_encoder.c
··· 28 28 #include "dcn10_stream_encoder.h" 29 29 #include "reg_helper.h" 30 30 #include "hw_shared.h" 31 - #include "link.h" 31 + #include "link_service.h" 32 32 #include "dpcd_defs.h" 33 33 #include "dcn30/dcn30_afmt.h" 34 34
+1 -1
drivers/gpu/drm/amd/display/dc/dio/dcn20/dcn20_stream_encoder.c
··· 29 29 #include "dcn20_stream_encoder.h" 30 30 #include "reg_helper.h" 31 31 #include "hw_shared.h" 32 - #include "link.h" 32 + #include "link_service.h" 33 33 #include "dpcd_defs.h" 34 34 35 35 #define DC_LOGGER \
+1 -1
drivers/gpu/drm/amd/display/dc/dio/dcn314/dcn314_dio_stream_encoder.c
··· 30 30 #include "dcn314_dio_stream_encoder.h" 31 31 #include "reg_helper.h" 32 32 #include "hw_shared.h" 33 - #include "link.h" 33 + #include "link_service.h" 34 34 #include "dpcd_defs.h" 35 35 36 36 #define DC_LOGGER \
+1 -1
drivers/gpu/drm/amd/display/dc/dio/dcn32/dcn32_dio_stream_encoder.c
··· 29 29 #include "dcn32_dio_stream_encoder.h" 30 30 #include "reg_helper.h" 31 31 #include "hw_shared.h" 32 - #include "link.h" 32 + #include "link_service.h" 33 33 #include "dpcd_defs.h" 34 34 35 35 #define DC_LOGGER \
+1 -1
drivers/gpu/drm/amd/display/dc/dio/dcn35/dcn35_dio_stream_encoder.c
··· 29 29 #include "dcn35_dio_stream_encoder.h" 30 30 #include "reg_helper.h" 31 31 #include "hw_shared.h" 32 - #include "link.h" 32 + #include "link_service.h" 33 33 #include "dpcd_defs.h" 34 34 35 35 #define DC_LOGGER \
+1 -1
drivers/gpu/drm/amd/display/dc/dio/dcn401/dcn401_dio_stream_encoder.c
··· 32 32 #include "dcn401_dio_stream_encoder.h" 33 33 #include "reg_helper.h" 34 34 #include "hw_shared.h" 35 - #include "link.h" 35 + #include "link_service.h" 36 36 #include "dpcd_defs.h" 37 37 38 38 #define DC_LOGGER \
+2
drivers/gpu/drm/amd/display/dc/dm_services.h
··· 311 311 312 312 char *dce_version_to_string(const int version); 313 313 314 + bool dc_supports_vrr(const enum dce_version v); 315 + 314 316 #endif /* __DM_SERVICES_H__ */
+1 -1
drivers/gpu/drm/amd/display/dc/dm_services_types.h
··· 127 127 uint32_t src_height; 128 128 uint32_t src_width; 129 129 uint32_t v_refresh; 130 - uint32_t sym_clock; /* HDMI only */ 130 + uint32_t pixel_clock; /* Pixel clock in KHz (for HDMI only: normalized) */ 131 131 struct dc_link_settings link_settings; /* DP only */ 132 132 }; 133 133
+1 -2
drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
··· 30 30 #include "dcn20/dcn20_resource.h" 31 31 #include "dcn21/dcn21_resource.h" 32 32 #include "clk_mgr/dcn21/rn_clk_mgr.h" 33 - 34 - #include "link.h" 33 + #include "link_service.h" 35 34 #include "dcn20_fpu.h" 36 35 #include "dc_state_priv.h" 37 36
+1 -1
drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
··· 31 31 // We need this includes for WATERMARKS_* defines 32 32 #include "clk_mgr/dcn32/dcn32_smu13_driver_if.h" 33 33 #include "dcn30/dcn30_resource.h" 34 - #include "link.h" 34 + #include "link_service.h" 35 35 #include "dc_state_priv.h" 36 36 37 37 #define DC_LOGGER_INIT(logger)
+1 -1
drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c
··· 31 31 #include "dml/dcn31/dcn31_fpu.h" 32 32 #include "dml/dml_inline_defs.h" 33 33 34 - #include "link.h" 34 + #include "link_service.h" 35 35 36 36 #define DC_LOGGER_INIT(logger) 37 37
+1 -1
drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c
··· 10 10 #include "dml/dcn35/dcn35_fpu.h" 11 11 #include "dml/dml_inline_defs.h" 12 12 13 - #include "link.h" 13 + #include "link_service.h" 14 14 15 15 #define DC_LOGGER_INIT(logger) 16 16
+1 -1
drivers/gpu/drm/amd/display/dc/dml2/display_mode_core.c
··· 6529 6529 mode_lib->ms.TotImmediateFlipBytes = 0; 6530 6530 for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) { 6531 6531 if (!(mode_lib->ms.policy.ImmediateFlipRequirement[k] == dml_immediate_flip_not_required)) { 6532 - mode_lib->ms.TotImmediateFlipBytes = mode_lib->ms.TotImmediateFlipBytes + mode_lib->ms.NoOfDPP[j][k] * mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k] + mode_lib->ms.MetaRowBytes[j][k]; 6532 + mode_lib->ms.TotImmediateFlipBytes = mode_lib->ms.TotImmediateFlipBytes + mode_lib->ms.NoOfDPP[j][k] * (mode_lib->ms.PDEAndMetaPTEBytesPerFrame[j][k] + mode_lib->ms.MetaRowBytes[j][k]); 6533 6533 if (mode_lib->ms.use_one_row_for_frame_flip[j][k]) { 6534 6534 mode_lib->ms.TotImmediateFlipBytes = mode_lib->ms.TotImmediateFlipBytes + mode_lib->ms.NoOfDPP[j][k] * (2 * mode_lib->ms.DPTEBytesPerRow[j][k]); 6535 6535 } else {
+18 -23
drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
··· 84 84 85 85 static void populate_dml21_timing_config_from_stream_state(struct dml2_timing_cfg *timing, 86 86 struct dc_stream_state *stream, 87 + struct pipe_ctx *pipe_ctx, 87 88 struct dml2_context *dml_ctx) 88 89 { 89 90 unsigned int hblank_start, vblank_start, min_hardware_refresh_in_uhz; 91 + uint32_t pix_clk_100hz; 90 92 91 - timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right; 93 + timing->h_active = stream->timing.h_addressable + stream->timing.h_border_left + stream->timing.h_border_right + pipe_ctx->dsc_padding_params.dsc_hactive_padding; 92 94 timing->v_active = stream->timing.v_addressable + stream->timing.v_border_bottom + stream->timing.v_border_top; 93 95 timing->h_front_porch = stream->timing.h_front_porch; 94 96 timing->v_front_porch = stream->timing.v_front_porch; 95 97 timing->pixel_clock_khz = stream->timing.pix_clk_100hz / 10; 98 + if (pipe_ctx->dsc_padding_params.dsc_hactive_padding != 0) 99 + timing->pixel_clock_khz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz / 10; 96 100 if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) 97 101 timing->pixel_clock_khz *= 2; 98 - timing->h_total = stream->timing.h_total; 102 + timing->h_total = stream->timing.h_total + pipe_ctx->dsc_padding_params.dsc_htotal_padding; 99 103 timing->v_total = stream->timing.v_total; 100 104 timing->h_sync_width = stream->timing.h_sync_width; 101 105 timing->interlaced = stream->timing.flags.INTERLACE; 102 106 103 107 hblank_start = stream->timing.h_total - stream->timing.h_front_porch; 104 108 105 - timing->h_blank_end = hblank_start - stream->timing.h_addressable 109 + timing->h_blank_end = hblank_start - stream->timing.h_addressable - pipe_ctx->dsc_padding_params.dsc_hactive_padding 106 110 - stream->timing.h_border_left - stream->timing.h_border_right; 107 111 108 112 if (hblank_start < stream->timing.h_addressable) ··· 125 121 /* limit min refresh rate to DC cap */ 126 122 min_hardware_refresh_in_uhz = stream->timing.min_refresh_in_uhz; 127 123 if (stream->ctx->dc->caps.max_v_total != 0) { 128 - min_hardware_refresh_in_uhz = div64_u64((stream->timing.pix_clk_100hz * 100000000ULL), 129 - (stream->timing.h_total * (long long)calc_max_hardware_v_total(stream))); 124 + if (pipe_ctx->dsc_padding_params.dsc_hactive_padding != 0) { 125 + pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz; 126 + } else { 127 + pix_clk_100hz = stream->timing.pix_clk_100hz; 128 + } 129 + min_hardware_refresh_in_uhz = div64_u64((pix_clk_100hz * 100000000ULL), 130 + (timing->h_total * (long long)calc_max_hardware_v_total(stream))); 130 131 } 131 132 132 133 timing->drr_config.min_refresh_uhz = max(stream->timing.min_refresh_in_uhz, min_hardware_refresh_in_uhz); ··· 180 171 } 181 172 182 173 timing->vblank_nom = timing->v_total - timing->v_active; 183 - } 184 - 185 - /** 186 - * adjust_dml21_hblank_timing_config_from_pipe_ctx - Adjusts the horizontal blanking timing configuration 187 - * based on the pipe context. 188 - * @timing: Pointer to the dml2_timing_cfg structure to be adjusted. 189 - * @pipe: Pointer to the pipe_ctx structure containing the horizontal blanking borrow value. 190 - * 191 - * This function modifies the horizontal active and blank end timings by adding and subtracting 192 - * the horizontal blanking borrow value from the pipe context, respectively. 193 - */ 194 - static void adjust_dml21_hblank_timing_config_from_pipe_ctx(struct dml2_timing_cfg *timing, struct pipe_ctx *pipe) 195 - { 196 - timing->h_active += pipe->hblank_borrow; 197 - timing->h_blank_end -= pipe->hblank_borrow; 198 174 } 199 175 200 176 static void populate_dml21_output_config_from_stream_state(struct dml2_link_output_cfg *output, ··· 481 487 temp_pipe->plane_state = pipe->plane_state; 482 488 temp_pipe->plane_res.scl_data.taps = pipe->plane_res.scl_data.taps; 483 489 temp_pipe->stream_res = pipe->stream_res; 484 - temp_pipe->hblank_borrow = pipe->hblank_borrow; 490 + temp_pipe->dsc_padding_params.dsc_hactive_padding = pipe->dsc_padding_params.dsc_hactive_padding; 491 + temp_pipe->dsc_padding_params.dsc_htotal_padding = pipe->dsc_padding_params.dsc_htotal_padding; 492 + temp_pipe->dsc_padding_params.dsc_pix_clk_100hz = pipe->dsc_padding_params.dsc_pix_clk_100hz; 485 493 dml_ctx->config.callbacks.build_scaling_params(temp_pipe); 486 494 break; 487 495 } ··· 751 755 disp_cfg_stream_location = dml_dispcfg->num_streams++; 752 756 753 757 ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location < __DML2_WRAPPER_MAX_STREAMS_PLANES__); 754 - populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], dml_ctx); 755 - adjust_dml21_hblank_timing_config_from_pipe_ctx(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, &context->res_ctx.pipe_ctx[stream_index]); 758 + populate_dml21_timing_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].timing, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index], dml_ctx); 756 759 populate_dml21_output_config_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location].output, context->streams[stream_index], &context->res_ctx.pipe_ctx[stream_index]); 757 760 populate_dml21_stream_overrides_from_stream_state(&dml_dispcfg->stream_descriptors[disp_cfg_stream_location], context->streams[stream_index], &context->stream_status[stream_index]); 758 761
+4
drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_wrapper.c
··· 224 224 dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context); 225 225 226 226 /* Populate stream, plane mappings and other fields in display config. */ 227 + DC_FP_START(); 227 228 result = dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx); 229 + DC_FP_END(); 228 230 if (!result) 229 231 return false; 230 232 ··· 281 279 dml_ctx->config.svp_pstate.callbacks.release_phantom_streams_and_planes(in_dc, context); 282 280 283 281 mode_support->dml2_instance = dml_init->dml2_instance; 282 + DC_FP_START(); 284 283 dml21_map_dc_state_into_dml_display_cfg(in_dc, context, dml_ctx); 284 + DC_FP_END(); 285 285 dml_ctx->v21.mode_programming.dml2_instance->scratch.build_mode_programming_locals.mode_programming_params.programming = dml_ctx->v21.mode_programming.programming; 286 286 DC_FP_START(); 287 287 is_supported = dml2_check_mode_supported(mode_support);
+3 -3
drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
··· 16 16 17 17 enum dml2_project_id { 18 18 dml2_project_invalid = 0, 19 - dml2_project_dcn4x_stage1 = 1, 20 - dml2_project_dcn4x_stage2 = 2, 21 - dml2_project_dcn4x_stage2_auto_drr_svp = 3, 19 + dml2_project_dcn4x_stage1, 20 + dml2_project_dcn4x_stage2, 21 + dml2_project_dcn4x_stage2_auto_drr_svp, 22 22 }; 23 23 24 24 enum dml2_pstate_change_support {
+2 -1
drivers/gpu/drm/amd/display/dc/dsc/dcn20/dcn20_dsc.c
··· 406 406 dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1; 407 407 dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0; 408 408 409 + // Need to find the ceiling value for the slice width 410 + dsc_reg_vals->pps.slice_width = (dsc_cfg->pic_width + dsc_cfg->dc_dsc_cfg.num_slices_h - 1) / dsc_cfg->dc_dsc_cfg.num_slices_h; 409 411 // TODO: in addition to validating slice height (pic height must be divisible by slice height), 410 412 // see what happens when the same condition doesn't apply for slice_width/pic_width. 411 - dsc_reg_vals->pps.slice_width = dsc_cfg->pic_width / dsc_cfg->dc_dsc_cfg.num_slices_h; 412 413 dsc_reg_vals->pps.slice_height = dsc_cfg->pic_height / dsc_cfg->dc_dsc_cfg.num_slices_v; 413 414 414 415 ASSERT(dsc_reg_vals->pps.slice_height * dsc_cfg->dc_dsc_cfg.num_slices_v == dsc_cfg->pic_height);
+1 -1
drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c
··· 28 28 #include "include/hdcp_msg_types.h" 29 29 #include "include/signal_types.h" 30 30 #include "core_types.h" 31 - #include "link.h" 31 + #include "link_service.h" 32 32 #include "link_hwss.h" 33 33 #include "link/protocols/link_dpcd.h" 34 34
+21 -19
drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
··· 48 48 #include "link_encoder.h" 49 49 #include "link_enc_cfg.h" 50 50 #include "link_hwss.h" 51 - #include "link.h" 51 + #include "link_service.h" 52 52 #include "dccg.h" 53 53 #include "clock_source.h" 54 54 #include "clk_mgr.h" ··· 1601 1601 } 1602 1602 1603 1603 if (pipe_ctx->stream_res.audio != NULL) { 1604 - build_audio_output(context, pipe_ctx, &pipe_ctx->stream_res.audio_output); 1604 + struct audio_output audio_output = {0}; 1605 1605 1606 - link_hwss->setup_audio_output(pipe_ctx, &pipe_ctx->stream_res.audio_output, 1606 + build_audio_output(context, pipe_ctx, &audio_output); 1607 + 1608 + link_hwss->setup_audio_output(pipe_ctx, &audio_output, 1607 1609 pipe_ctx->stream_res.audio->inst); 1608 1610 1609 1611 pipe_ctx->stream_res.audio->funcs->az_configure( 1610 1612 pipe_ctx->stream_res.audio, 1611 1613 pipe_ctx->stream->signal, 1612 - &pipe_ctx->stream_res.audio_output.crtc_info, 1614 + &audio_output.crtc_info, 1613 1615 &pipe_ctx->stream->audio_info, 1614 - &pipe_ctx->stream_res.audio_output.dp_link_info); 1616 + &audio_output.dp_link_info); 1615 1617 1616 1618 if (dc->config.disable_hbr_audio_dp2) 1617 1619 if (pipe_ctx->stream_res.audio->funcs->az_disable_hbr_audio && ··· 1925 1923 1926 1924 get_edp_streams(context, edp_streams, &edp_stream_num); 1927 1925 1928 - // Check fastboot support, disable on DCE8 because of blank screens 1929 - if (edp_num && edp_stream_num && dc->ctx->dce_version != DCE_VERSION_8_0 && 1930 - dc->ctx->dce_version != DCE_VERSION_8_1 && 1931 - dc->ctx->dce_version != DCE_VERSION_8_3) { 1926 + /* Check fastboot support, disable on DCE 6-8 because of blank screens */ 1927 + if (edp_num && edp_stream_num && dc->ctx->dce_version < DCE_VERSION_10_0) { 1932 1928 for (i = 0; i < edp_num; i++) { 1933 1929 edp_link = edp_links[i]; 1934 1930 if (edp_link != edp_streams[0]->link) ··· 2385 2385 if (pipe_ctx->stream->signal != SIGNAL_TYPE_HDMI_TYPE_A) 2386 2386 continue; 2387 2387 if (pipe_ctx->stream_res.audio != NULL) { 2388 - build_audio_output(context, pipe_ctx, &pipe_ctx->stream_res.audio_output); 2388 + struct audio_output audio_output; 2389 + 2390 + build_audio_output(context, pipe_ctx, &audio_output); 2389 2391 2390 2392 if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->set_audio_dtbclk_dto) { 2391 2393 struct dtbclk_dto_params dto_params = {0}; ··· 2398 2396 pipe_ctx->stream_res.audio->funcs->wall_dto_setup( 2399 2397 pipe_ctx->stream_res.audio, 2400 2398 pipe_ctx->stream->signal, 2401 - &pipe_ctx->stream_res.audio_output.crtc_info, 2402 - &pipe_ctx->stream_res.audio_output.pll_info); 2399 + &audio_output.crtc_info, 2400 + &audio_output.pll_info); 2403 2401 } else 2404 2402 pipe_ctx->stream_res.audio->funcs->wall_dto_setup( 2405 2403 pipe_ctx->stream_res.audio, 2406 2404 pipe_ctx->stream->signal, 2407 - &pipe_ctx->stream_res.audio_output.crtc_info, 2408 - &pipe_ctx->stream_res.audio_output.pll_info); 2405 + &audio_output.crtc_info, 2406 + &audio_output.pll_info); 2409 2407 break; 2410 2408 } 2411 2409 } ··· 2425 2423 continue; 2426 2424 2427 2425 if (pipe_ctx->stream_res.audio != NULL) { 2428 - build_audio_output(context, 2429 - pipe_ctx, 2430 - &pipe_ctx->stream_res.audio_output); 2426 + struct audio_output audio_output = {0}; 2427 + 2428 + build_audio_output(context, pipe_ctx, &audio_output); 2431 2429 2432 2430 pipe_ctx->stream_res.audio->funcs->wall_dto_setup( 2433 2431 pipe_ctx->stream_res.audio, 2434 2432 pipe_ctx->stream->signal, 2435 - &pipe_ctx->stream_res.audio_output.crtc_info, 2436 - &pipe_ctx->stream_res.audio_output.pll_info); 2433 + &audio_output.crtc_info, 2434 + &audio_output.pll_info); 2437 2435 break; 2438 2436 } 2439 2437 }
+1 -1
drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
··· 55 55 #include "dce/dmub_hw_lock_mgr.h" 56 56 #include "dc_trace.h" 57 57 #include "dce/dmub_outbox.h" 58 - #include "link.h" 58 + #include "link_service.h" 59 59 #include "dc_state_priv.h" 60 60 61 61 #define DC_LOGGER \
+1 -1
drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c
··· 54 54 #include "dpcd_defs.h" 55 55 #include "inc/link_enc_cfg.h" 56 56 #include "link_hwss.h" 57 - #include "link.h" 57 + #include "link_service.h" 58 58 #include "dc_state_priv.h" 59 59 60 60 #define DC_LOGGER \
+1 -1
drivers/gpu/drm/amd/display/dc/hwss/dcn21/dcn21_hwseq.c
··· 35 35 #include "hw/clk_mgr.h" 36 36 #include "dc_dmub_srv.h" 37 37 #include "abm.h" 38 - #include "link.h" 38 + #include "link_service.h" 39 39 40 40 #define DC_LOGGER_INIT(logger) 41 41
+1 -1
drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
··· 50 50 #include "dpcd_defs.h" 51 51 #include "dcn20/dcn20_hwseq.h" 52 52 #include "dcn30/dcn30_resource.h" 53 - #include "link.h" 53 + #include "link_service.h" 54 54 #include "dc_state_priv.h" 55 55 56 56
+1 -1
drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
··· 45 45 #include "link_hwss.h" 46 46 #include "dpcd_defs.h" 47 47 #include "dce/dmub_outbox.h" 48 - #include "link.h" 48 + #include "link_service.h" 49 49 #include "dcn10/dcn10_hwseq.h" 50 50 #include "dcn21/dcn21_hwseq.h" 51 51 #include "inc/link_enc_cfg.h"
+1 -1
drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c
··· 46 46 #include "link_hwss.h" 47 47 #include "dpcd_defs.h" 48 48 #include "dce/dmub_outbox.h" 49 - #include "link.h" 49 + #include "link_service.h" 50 50 #include "dcn10/dcn10_hwseq.h" 51 51 #include "inc/link_enc_cfg.h" 52 52 #include "dcn30/dcn30_vpg.h"
+2 -2
drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c
··· 49 49 #include "dcn20/dcn20_optc.h" 50 50 #include "dce/dmub_hw_lock_mgr.h" 51 51 #include "dcn32/dcn32_resource.h" 52 - #include "link.h" 52 + #include "link_service.h" 53 53 #include "../dcn20/dcn20_hwseq.h" 54 54 #include "dc_state_priv.h" 55 55 ··· 1052 1052 } 1053 1053 1054 1054 /* Enable DSC hw block */ 1055 - dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->hblank_borrow + 1055 + dsc_cfg.pic_width = (stream->timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding + 1056 1056 stream->timing.h_border_left + stream->timing.h_border_right) / opp_cnt; 1057 1057 dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom; 1058 1058 dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
+1 -1
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
··· 46 46 #include "link_hwss.h" 47 47 #include "dpcd_defs.h" 48 48 #include "dce/dmub_outbox.h" 49 - #include "link.h" 49 + #include "link_service.h" 50 50 #include "dcn10/dcn10_hwseq.h" 51 51 #include "inc/link_enc_cfg.h" 52 52 #include "dcn30/dcn30_vpg.h"
+7 -4
drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
··· 25 25 #include "dpcd_defs.h" 26 26 #include "clk_mgr.h" 27 27 #include "dsc.h" 28 - #include "link.h" 28 + #include "link_service.h" 29 29 30 30 #include "dce/dmub_hw_lock_mgr.h" 31 31 #include "dcn10/dcn10_cm_common.h" ··· 810 810 if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal))) 811 811 dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx); 812 812 813 - /* if we are borrowing from hblank, h_addressable needs to be adjusted */ 814 - if (dc->debug.enable_hblank_borrow) 815 - patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->hblank_borrow; 813 + /* if we are padding, h_addressable needs to be adjusted */ 814 + if (dc->debug.enable_hblank_borrow) { 815 + patched_crtc_timing.h_addressable = patched_crtc_timing.h_addressable + pipe_ctx->dsc_padding_params.dsc_hactive_padding; 816 + patched_crtc_timing.h_total = patched_crtc_timing.h_total + pipe_ctx->dsc_padding_params.dsc_htotal_padding; 817 + patched_crtc_timing.pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz; 818 + } 816 819 817 820 pipe_ctx->stream_res.tg->funcs->program_timing( 818 821 pipe_ctx->stream_res.tg,
+10 -5
drivers/gpu/drm/amd/display/dc/inc/core_types.h
··· 228 228 enum dc_status (*update_dc_state_for_encoder_switch)(struct dc_link *link, 229 229 struct dc_link_settings *link_setting, 230 230 uint8_t pipe_count, 231 - struct pipe_ctx *pipes); 231 + struct pipe_ctx *pipes, 232 + struct audio_output *audio_output); 232 233 }; 233 234 234 235 struct audio_support{ ··· 361 360 uint8_t gsl_group; 362 361 363 362 struct test_pattern_params test_pattern_params; 364 - 365 - struct audio_output audio_output; 366 363 }; 367 364 368 365 struct plane_resource { ··· 436 437 P_STATE_V_BLANK_SUB_VP, 437 438 }; 438 439 440 + struct dsc_padding_params { 441 + /* pixels borrowed from hblank to hactive */ 442 + uint8_t dsc_hactive_padding; 443 + uint32_t dsc_htotal_padding; 444 + uint32_t dsc_pix_clk_100hz; 445 + }; 446 + 439 447 struct pipe_ctx { 440 448 struct dc_plane_state *plane_state; 441 449 struct dc_stream_state *stream; ··· 500 494 /* subvp_index: only valid if the pipe is a SUBVP_MAIN*/ 501 495 uint8_t subvp_index; 502 496 struct pixel_rate_divider pixel_rate_divider; 503 - /* pixels borrowed from hblank to hactive */ 504 - uint8_t hblank_borrow; 497 + struct dsc_padding_params dsc_padding_params; 505 498 /* next vupdate */ 506 499 uint32_t next_vupdate; 507 500 uint32_t wait_frame_count;
+1
drivers/gpu/drm/amd/display/dc/inc/resource.h
··· 45 45 struct resource_caps { 46 46 int num_timing_generator; 47 47 int num_opp; 48 + int num_dpp; 48 49 int num_video_plane; 49 50 int num_audio; 50 51 int num_stream_encoder;
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dce120/dce120_resource.c
··· 67 67 #include "reg_helper.h" 68 68 69 69 #include "dce100/dce100_resource.h" 70 - #include "link.h" 70 + #include "link_service.h" 71 71 72 72 #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL 73 73 #define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f
+10 -1
drivers/gpu/drm/amd/display/dc/resource/dce60/dce60_resource.c
··· 881 881 context->bw_ctx.bw.dce.dispclk_khz = 681000; 882 882 context->bw_ctx.bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER_CZ; 883 883 } else { 884 - context->bw_ctx.bw.dce.dispclk_khz = 0; 884 + /* On DCE 6.0 and 6.4 the PLL0 is both the display engine clock and 885 + * the DP clock, and shouldn't be turned off. Just select the display 886 + * clock value from its low power mode. 887 + */ 888 + if (dc->ctx->dce_version == DCE_VERSION_6_0 || 889 + dc->ctx->dce_version == DCE_VERSION_6_4) 890 + context->bw_ctx.bw.dce.dispclk_khz = 352000; 891 + else 892 + context->bw_ctx.bw.dce.dispclk_khz = 0; 893 + 885 894 context->bw_ctx.bw.dce.yclk_khz = 0; 886 895 } 887 896
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
··· 85 85 #include "vm_helper.h" 86 86 87 87 #include "link_enc_cfg.h" 88 - #include "link.h" 88 + #include "link_service.h" 89 89 90 90 #define DC_LOGGER_INIT(logger) 91 91
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn30/dcn30_resource.c
··· 60 60 #include "dml/display_mode_vba.h" 61 61 #include "dcn30/dcn30_dccg.h" 62 62 #include "dcn10/dcn10_resource.h" 63 - #include "link.h" 63 + #include "link_service.h" 64 64 #include "dce/dce_panel_cntl.h" 65 65 66 66 #include "dcn30/dcn30_dwb.h"
+2 -1
drivers/gpu/drm/amd/display/dc/resource/dcn302/dcn302_resource.c
··· 47 47 48 48 #include "dcn10/dcn10_resource.h" 49 49 50 - #include "link.h" 50 + #include "link_service.h" 51 + 51 52 #include "dce/dce_abm.h" 52 53 #include "dce/dce_audio.h" 53 54 #include "dce/dce_aux.h"
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn303/dcn303_resource.c
··· 47 47 48 48 #include "dcn10/dcn10_resource.h" 49 49 50 - #include "link.h" 50 + #include "link_service.h" 51 51 52 52 #include "dce/dce_abm.h" 53 53 #include "dce/dce_audio.h"
+3 -2
drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.c
··· 2239 2239 enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link, 2240 2240 struct dc_link_settings *link_setting, 2241 2241 uint8_t pipe_count, 2242 - struct pipe_ctx *pipes) 2242 + struct pipe_ctx *pipes, 2243 + struct audio_output *audio_output) 2243 2244 { 2244 2245 struct dc_state *state = link->dc->current_state; 2245 2246 int i; ··· 2255 2254 2256 2255 // Setup audio 2257 2256 if (pipes[i].stream_res.audio != NULL) 2258 - build_audio_output(state, &pipes[i], &pipes[i].stream_res.audio_output); 2257 + build_audio_output(state, &pipes[i], &audio_output[i]); 2259 2258 } 2260 2259 #else 2261 2260 /* This DCN requires rate divider updates and audio reprogramming to allow DP1<-->DP2 link rate switching,
+2 -1
drivers/gpu/drm/amd/display/dc/resource/dcn31/dcn31_resource.h
··· 69 69 enum dc_status dcn31_update_dc_state_for_encoder_switch(struct dc_link *link, 70 70 struct dc_link_settings *link_setting, 71 71 uint8_t pipe_count, 72 - struct pipe_ctx *pipes); 72 + struct pipe_ctx *pipes, 73 + struct audio_output *audio_output); 73 74 74 75 /*temp: B0 specific before switch to dcn313 headers*/ 75 76 #ifndef regPHYPLLF_PIXCLK_RESYNC_CNTL
+2 -2
drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c
··· 69 69 #include "dml/display_mode_vba.h" 70 70 #include "dcn32/dcn32_dccg.h" 71 71 #include "dcn10/dcn10_resource.h" 72 - #include "link.h" 72 + #include "link_service.h" 73 73 #include "dcn31/dcn31_panel_cntl.h" 74 74 75 75 #include "dcn30/dcn30_dwb.h" ··· 2852 2852 free_pipe->plane_res.xfm = pool->transforms[free_pipe_idx]; 2853 2853 free_pipe->plane_res.dpp = pool->dpps[free_pipe_idx]; 2854 2854 free_pipe->plane_res.mpcc_inst = pool->dpps[free_pipe_idx]->inst; 2855 - free_pipe->hblank_borrow = otg_master->hblank_borrow; 2855 + free_pipe->dsc_padding_params = otg_master->dsc_padding_params; 2856 2856 if (free_pipe->stream->timing.flags.DSC == 1) { 2857 2857 dcn20_acquire_dsc(free_pipe->stream->ctx->dc, 2858 2858 &new_ctx->res_ctx,
+1 -1
drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c
··· 72 72 #include "dml/display_mode_vba.h" 73 73 #include "dcn32/dcn32_dccg.h" 74 74 #include "dcn10/dcn10_resource.h" 75 - #include "link.h" 75 + #include "link_service.h" 76 76 #include "dcn31/dcn31_panel_cntl.h" 77 77 78 78 #include "dcn30/dcn30_dwb.h"
+1 -4
drivers/gpu/drm/amd/display/dc/resource/dcn35/dcn35_resource.c
··· 61 61 #include "dcn31/dcn31_hpo_dp_stream_encoder.h" 62 62 #include "dcn31/dcn31_hpo_dp_link_encoder.h" 63 63 #include "dcn32/dcn32_hpo_dp_link_encoder.h" 64 - #include "link.h" 64 + #include "link_service.h" 65 65 #include "dcn31/dcn31_apg.h" 66 66 #include "dcn32/dcn32_dio_link_encoder.h" 67 67 #include "dcn31/dcn31_vpg.h" ··· 1896 1896 dc->caps.color.mpc.ogam_rom_caps.hlg = 0; 1897 1897 dc->caps.color.mpc.ocsc = 1; 1898 1898 dc->caps.color.mpc.preblend = true; 1899 - 1900 - dc->caps.num_of_host_routers = 2; 1901 - dc->caps.num_of_dpias_per_host_router = 2; 1902 1899 1903 1900 dc->caps.num_of_host_routers = 2; 1904 1901 dc->caps.num_of_dpias_per_host_router = 2;
+1 -4
drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c
··· 40 40 #include "dcn31/dcn31_hpo_dp_stream_encoder.h" 41 41 #include "dcn31/dcn31_hpo_dp_link_encoder.h" 42 42 #include "dcn32/dcn32_hpo_dp_link_encoder.h" 43 - #include "link.h" 43 + #include "link_service.h" 44 44 #include "dcn31/dcn31_apg.h" 45 45 #include "dcn32/dcn32_dio_link_encoder.h" 46 46 #include "dcn31/dcn31_vpg.h" ··· 1868 1868 dc->caps.color.mpc.ogam_rom_caps.hlg = 0; 1869 1869 dc->caps.color.mpc.ocsc = 1; 1870 1870 dc->caps.color.mpc.preblend = true; 1871 - 1872 - dc->caps.num_of_host_routers = 2; 1873 - dc->caps.num_of_dpias_per_host_router = 2; 1874 1871 1875 1872 dc->caps.num_of_host_routers = 2; 1876 1873 dc->caps.num_of_dpias_per_host_router = 2;
+1 -4
drivers/gpu/drm/amd/display/dc/resource/dcn36/dcn36_resource.c
··· 40 40 #include "dcn31/dcn31_hpo_dp_stream_encoder.h" 41 41 #include "dcn31/dcn31_hpo_dp_link_encoder.h" 42 42 #include "dcn32/dcn32_hpo_dp_link_encoder.h" 43 - #include "link.h" 43 + #include "link_service.h" 44 44 #include "dcn31/dcn31_apg.h" 45 45 #include "dcn32/dcn32_dio_link_encoder.h" 46 46 #include "dcn31/dcn31_vpg.h" ··· 1869 1869 dc->caps.color.mpc.ogam_rom_caps.hlg = 0; 1870 1870 dc->caps.color.mpc.ocsc = 1; 1871 1871 dc->caps.color.mpc.preblend = true; 1872 - 1873 - dc->caps.num_of_host_routers = 2; 1874 - dc->caps.num_of_dpias_per_host_router = 2; 1875 1872 1876 1873 dc->caps.num_of_host_routers = 2; 1877 1874 dc->caps.num_of_dpias_per_host_router = 2;
+4 -1
drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
··· 50 50 #include "dml/display_mode_vba.h" 51 51 #include "dcn401/dcn401_dccg.h" 52 52 #include "dcn10/dcn10_resource.h" 53 - #include "link.h" 53 + #include "link_service.h" 54 54 #include "link_enc_cfg.h" 55 55 #include "dcn31/dcn31_panel_cntl.h" 56 56 ··· 1698 1698 struct pixel_clk_params *pixel_clk_params = &pipe_ctx->stream_res.pix_clk_params; 1699 1699 1700 1700 pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz; 1701 + 1702 + if (pipe_ctx->dsc_padding_params.dsc_hactive_padding != 0) 1703 + pixel_clk_params->requested_pix_clk_100hz = pipe_ctx->dsc_padding_params.dsc_pix_clk_100hz; 1701 1704 1702 1705 if (!pipe_ctx->stream->ctx->dc->config.unify_link_enc_assignment) 1703 1706 link_enc = link_enc_cfg_get_link_enc(link);
+28 -25
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.c
··· 89 89 void dmub_dcn32_reset(struct dmub_srv *dmub) 90 90 { 91 91 union dmub_gpint_data_register cmd; 92 - const uint32_t timeout = 30; 93 - uint32_t in_reset, scratch, i; 92 + const uint32_t timeout = 100000; 93 + uint32_t in_reset, is_enabled, scratch, i, pwait_mode; 94 94 95 95 REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset); 96 + REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_enabled); 96 97 97 - if (in_reset == 0) { 98 + if (in_reset == 0 && is_enabled != 0) { 98 99 cmd.bits.status = 1; 99 100 cmd.bits.command_code = DMUB_GPINT__STOP_FW; 100 101 cmd.bits.param = 0; 101 102 102 103 dmub->hw_funcs.set_gpint(dmub, cmd); 103 104 104 - /** 105 - * Timeout covers both the ACK and the wait 106 - * for remaining work to finish. 107 - * 108 - * This is mostly bound by the PHY disable sequence. 109 - * Each register check will be greater than 1us, so 110 - * don't bother using udelay. 111 - */ 112 - 113 105 for (i = 0; i < timeout; ++i) { 114 106 if (dmub->hw_funcs.is_gpint_acked(dmub, cmd)) 115 107 break; 108 + 109 + udelay(1); 116 110 } 117 111 118 112 for (i = 0; i < timeout; ++i) { 119 - scratch = dmub->hw_funcs.get_gpint_response(dmub); 113 + scratch = REG_READ(DMCUB_SCRATCH7); 120 114 if (scratch == DMUB_GPINT__STOP_FW_RESPONSE) 121 115 break; 116 + 117 + udelay(1); 122 118 } 123 119 120 + for (i = 0; i < timeout; ++i) { 121 + REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode); 122 + if (pwait_mode & (1 << 0)) 123 + break; 124 + 125 + udelay(1); 126 + } 124 127 /* Force reset in case we timed out, DMCUB is likely hung. */ 125 128 } 126 129 127 - REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1); 128 - REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); 129 - REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1); 130 + if (is_enabled) { 131 + REG_UPDATE(DMCUB_CNTL2, DMCUB_SOFT_RESET, 1); 132 + udelay(1); 133 + REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0); 134 + } 135 + 130 136 REG_WRITE(DMCUB_INBOX1_RPTR, 0); 131 137 REG_WRITE(DMCUB_INBOX1_WPTR, 0); 132 138 REG_WRITE(DMCUB_OUTBOX1_RPTR, 0); ··· 141 135 REG_WRITE(DMCUB_OUTBOX0_WPTR, 0); 142 136 REG_WRITE(DMCUB_SCRATCH0, 0); 143 137 144 - /* Clear the GPINT command manually so we don't reset again. */ 138 + /* Clear the GPINT command manually so we don't send anything during boot. */ 145 139 cmd.all = 0; 146 140 dmub->hw_funcs.set_gpint(dmub, cmd); 147 141 } ··· 425 419 426 420 void dmub_dcn32_get_diagnostic_data(struct dmub_srv *dmub) 427 421 { 428 - uint32_t is_dmub_enabled, is_soft_reset, is_sec_reset; 429 - uint32_t is_traceport_enabled, is_cw0_enabled, is_cw6_enabled; 422 + uint32_t is_dmub_enabled, is_soft_reset, is_pwait; 423 + uint32_t is_traceport_enabled, is_cw6_enabled; 430 424 struct dmub_timeout_info timeout = {0}; 431 425 432 426 if (!dmub) ··· 476 470 REG_GET(DMCUB_CNTL, DMCUB_ENABLE, &is_dmub_enabled); 477 471 dmub->debug.is_dmcub_enabled = is_dmub_enabled; 478 472 473 + REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &is_pwait); 474 + dmub->debug.is_pwait = is_pwait; 475 + 479 476 REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &is_soft_reset); 480 477 dmub->debug.is_dmcub_soft_reset = is_soft_reset; 481 478 482 - REG_GET(DMCUB_SEC_CNTL, DMCUB_SEC_RESET_STATUS, &is_sec_reset); 483 - dmub->debug.is_dmcub_secure_reset = is_sec_reset; 484 - 485 479 REG_GET(DMCUB_CNTL, DMCUB_TRACEPORT_EN, &is_traceport_enabled); 486 480 dmub->debug.is_traceport_en = is_traceport_enabled; 487 - 488 - REG_GET(DMCUB_REGION3_CW0_TOP_ADDRESS, DMCUB_REGION3_CW0_ENABLE, &is_cw0_enabled); 489 - dmub->debug.is_cw0_enabled = is_cw0_enabled; 490 481 491 482 REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled); 492 483 dmub->debug.is_cw6_enabled = is_cw6_enabled;
+7 -1
drivers/gpu/drm/amd/display/dmub/src/dmub_dcn32.h
··· 89 89 DMUB_SR(DMCUB_REGION5_OFFSET) \ 90 90 DMUB_SR(DMCUB_REGION5_OFFSET_HIGH) \ 91 91 DMUB_SR(DMCUB_REGION5_TOP_ADDRESS) \ 92 + DMUB_SR(DMCUB_REGION6_OFFSET) \ 93 + DMUB_SR(DMCUB_REGION6_OFFSET_HIGH) \ 94 + DMUB_SR(DMCUB_REGION6_TOP_ADDRESS) \ 92 95 DMUB_SR(DMCUB_SCRATCH0) \ 93 96 DMUB_SR(DMCUB_SCRATCH1) \ 94 97 DMUB_SR(DMCUB_SCRATCH2) \ ··· 158 155 DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \ 159 156 DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_TOP_ADDRESS) \ 160 157 DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_ENABLE) \ 158 + DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_TOP_ADDRESS) \ 159 + DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_ENABLE) \ 161 160 DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \ 162 161 DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \ 163 162 DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \ ··· 167 162 DMUB_SF(DMCUB_INBOX0_WPTR, DMCUB_INBOX0_WPTR) \ 168 163 DMUB_SF(DMCUB_REGION3_TMR_AXI_SPACE, DMCUB_REGION3_TMR_AXI_SPACE) \ 169 164 DMUB_SF(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN) \ 170 - DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK) 165 + DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK) \ 166 + DMUB_SF(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS) 171 167 172 168 struct dmub_srv_dcn32_reg_offset { 173 169 #define DMUB_SR(reg) uint32_t reg;
+66 -32
drivers/gpu/drm/amd/include/amd_shared.h
··· 239 239 AMD_HARVEST_IP_DMU_MASK = 0x4, 240 240 }; 241 241 242 + /** 243 + * enum DC_FEATURE_MASK - Bits that control DC feature defaults 244 + */ 242 245 enum DC_FEATURE_MASK { 243 246 //Default value can be found at "uint amdgpu_dc_feature_mask" 244 - DC_FBC_MASK = (1 << 0), //0x1, disabled by default 245 - DC_MULTI_MON_PP_MCLK_SWITCH_MASK = (1 << 1), //0x2, enabled by default 246 - DC_DISABLE_FRACTIONAL_PWM_MASK = (1 << 2), //0x4, disabled by default 247 - DC_PSR_MASK = (1 << 3), //0x8, disabled by default for dcn < 3.1 248 - DC_EDP_NO_POWER_SEQUENCING = (1 << 4), //0x10, disabled by default 249 - DC_DISABLE_LTTPR_DP1_4A = (1 << 5), //0x20, disabled by default 250 - DC_DISABLE_LTTPR_DP2_0 = (1 << 6), //0x40, disabled by default 251 - DC_PSR_ALLOW_SMU_OPT = (1 << 7), //0x80, disabled by default 252 - DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), //0x100, disabled by default 253 - DC_REPLAY_MASK = (1 << 9), //0x200, disabled by default for dcn < 3.1.4 247 + /** 248 + * @DC_FBC_MASK: (0x1) disabled by default 249 + */ 250 + DC_FBC_MASK = (1 << 0), 251 + /** 252 + * @DC_MULTI_MON_PP_MCLK_SWITCH_MASK: (0x2) enabled by default 253 + */ 254 + DC_MULTI_MON_PP_MCLK_SWITCH_MASK = (1 << 1), 255 + /** 256 + * @DC_DISABLE_FRACTIONAL_PWM_MASK: (0x4) disabled by default 257 + */ 258 + DC_DISABLE_FRACTIONAL_PWM_MASK = (1 << 2), 259 + /** 260 + * @DC_PSR_MASK: (0x8) disabled by default for DCN < 3.1 261 + */ 262 + DC_PSR_MASK = (1 << 3), 263 + /** 264 + * @DC_EDP_NO_POWER_SEQUENCING: (0x10) disabled by default 265 + */ 266 + DC_EDP_NO_POWER_SEQUENCING = (1 << 4), 267 + /** 268 + * @DC_DISABLE_LTTPR_DP1_4A: (0x20) disabled by default 269 + */ 270 + DC_DISABLE_LTTPR_DP1_4A = (1 << 5), 271 + /** 272 + * @DC_DISABLE_LTTPR_DP2_0: (0x40) disabled by default 273 + */ 274 + DC_DISABLE_LTTPR_DP2_0 = (1 << 6), 275 + /** 276 + * @DC_PSR_ALLOW_SMU_OPT: (0x80) disabled by default 277 + */ 278 + DC_PSR_ALLOW_SMU_OPT = (1 << 7), 279 + /** 280 + * @DC_PSR_ALLOW_MULTI_DISP_OPT: (0x100) disabled by default 281 + */ 282 + DC_PSR_ALLOW_MULTI_DISP_OPT = (1 << 8), 283 + /** 284 + * @DC_REPLAY_MASK: (0x200) disabled by default for DCN < 3.1.4 285 + */ 286 + DC_REPLAY_MASK = (1 << 9), 254 287 }; 255 288 256 289 /** ··· 291 258 */ 292 259 enum DC_DEBUG_MASK { 293 260 /** 294 - * @DC_DISABLE_PIPE_SPLIT: If set, disable pipe-splitting 261 + * @DC_DISABLE_PIPE_SPLIT: (0x1) If set, disable pipe-splitting 295 262 */ 296 263 DC_DISABLE_PIPE_SPLIT = 0x1, 297 264 298 265 /** 299 - * @DC_DISABLE_STUTTER: If set, disable memory stutter mode 266 + * @DC_DISABLE_STUTTER: (0x2) If set, disable memory stutter mode 300 267 */ 301 268 DC_DISABLE_STUTTER = 0x2, 302 269 303 270 /** 304 - * @DC_DISABLE_DSC: If set, disable display stream compression 271 + * @DC_DISABLE_DSC: (0x4) If set, disable display stream compression 305 272 */ 306 273 DC_DISABLE_DSC = 0x4, 307 274 308 275 /** 309 - * @DC_DISABLE_CLOCK_GATING: If set, disable clock gating optimizations 276 + * @DC_DISABLE_CLOCK_GATING: (0x8) If set, disable clock gating optimizations 310 277 */ 311 278 DC_DISABLE_CLOCK_GATING = 0x8, 312 279 313 280 /** 314 - * @DC_DISABLE_PSR: If set, disable Panel self refresh v1 and PSR-SU 281 + * @DC_DISABLE_PSR: (0x10) If set, disable Panel self refresh v1 and PSR-SU 315 282 */ 316 283 DC_DISABLE_PSR = 0x10, 317 284 318 285 /** 319 - * @DC_FORCE_SUBVP_MCLK_SWITCH: If set, force mclk switch in subvp, even 286 + * @DC_FORCE_SUBVP_MCLK_SWITCH: (0x20) If set, force mclk switch in subvp, even 320 287 * if mclk switch in vblank is possible 321 288 */ 322 289 DC_FORCE_SUBVP_MCLK_SWITCH = 0x20, 323 290 324 291 /** 325 - * @DC_DISABLE_MPO: If set, disable multi-plane offloading 292 + * @DC_DISABLE_MPO: (0x40) If set, disable multi-plane offloading 326 293 */ 327 294 DC_DISABLE_MPO = 0x40, 328 295 329 296 /** 330 - * @DC_ENABLE_DPIA_TRACE: If set, enable trace logging for DPIA 297 + * @DC_ENABLE_DPIA_TRACE: (0x80) If set, enable trace logging for DPIA 331 298 */ 332 299 DC_ENABLE_DPIA_TRACE = 0x80, 333 300 334 301 /** 335 - * @DC_ENABLE_DML2: If set, force usage of DML2, even if the DCN version 302 + * @DC_ENABLE_DML2: (0x100) If set, force usage of DML2, even if the DCN version 336 303 * does not default to it. 337 304 */ 338 305 DC_ENABLE_DML2 = 0x100, 339 306 340 307 /** 341 - * @DC_DISABLE_PSR_SU: If set, disable PSR SU 308 + * @DC_DISABLE_PSR_SU: (0x200) If set, disable PSR SU 342 309 */ 343 310 DC_DISABLE_PSR_SU = 0x200, 344 311 345 312 /** 346 - * @DC_DISABLE_REPLAY: If set, disable Panel Replay 313 + * @DC_DISABLE_REPLAY: (0x400) If set, disable Panel Replay 347 314 */ 348 315 DC_DISABLE_REPLAY = 0x400, 349 316 350 317 /** 351 - * @DC_DISABLE_IPS: If set, disable all Idle Power States, all the time. 318 + * @DC_DISABLE_IPS: (0x800) If set, disable all Idle Power States, all the time. 352 319 * If more than one IPS debug bit is set, the lowest bit takes 353 320 * precedence. For example, if DC_FORCE_IPS_ENABLE and 354 321 * DC_DISABLE_IPS_DYNAMIC are set, then DC_DISABLE_IPS_DYNAMIC takes ··· 357 324 DC_DISABLE_IPS = 0x800, 358 325 359 326 /** 360 - * @DC_DISABLE_IPS_DYNAMIC: If set, disable all IPS, all the time, 327 + * @DC_DISABLE_IPS_DYNAMIC: (0x1000) If set, disable all IPS, all the time, 361 328 * *except* when driver goes into suspend. 362 329 */ 363 330 DC_DISABLE_IPS_DYNAMIC = 0x1000, 364 331 365 332 /** 366 - * @DC_DISABLE_IPS2_DYNAMIC: If set, disable IPS2 (IPS1 allowed) if 333 + * @DC_DISABLE_IPS2_DYNAMIC: (0x2000) If set, disable IPS2 (IPS1 allowed) if 367 334 * there is an enabled display. Otherwise, enable all IPS. 368 335 */ 369 336 DC_DISABLE_IPS2_DYNAMIC = 0x2000, 370 337 371 338 /** 372 - * @DC_FORCE_IPS_ENABLE: If set, force enable all IPS, all the time. 339 + * @DC_FORCE_IPS_ENABLE: (0x4000) If set, force enable all IPS, all the time. 373 340 */ 374 341 DC_FORCE_IPS_ENABLE = 0x4000, 375 342 /** 376 - * @DC_DISABLE_ACPI_EDID: If set, don't attempt to fetch EDID for 343 + * @DC_DISABLE_ACPI_EDID: (0x8000) If set, don't attempt to fetch EDID for 377 344 * eDP display from ACPI _DDC method. 378 345 */ 379 346 DC_DISABLE_ACPI_EDID = 0x8000, 380 347 381 348 /** 382 - * @DC_DISABLE_HDMI_CEC: If set, disable HDMI-CEC feature in amdgpu driver. 349 + * @DC_DISABLE_HDMI_CEC: (0x10000) If set, disable HDMI-CEC feature in amdgpu driver. 383 350 */ 384 351 DC_DISABLE_HDMI_CEC = 0x10000, 385 352 386 353 /** 387 - * @DC_DISABLE_SUBVP_FAMS: If set, disable DCN Sub-Viewport & Firmware Assisted 354 + * @DC_DISABLE_SUBVP_FAMS: (0x20000) If set, disable DCN Sub-Viewport & Firmware Assisted 388 355 * Memory Clock Switching (FAMS) feature in amdgpu driver. 389 356 */ 390 357 DC_DISABLE_SUBVP_FAMS = 0x20000, 391 358 /** 392 - * @DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE: If set, disable support for custom brightness curves 359 + * @DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE: (0x40000) If set, disable support for custom 360 + * brightness curves 393 361 */ 394 362 DC_DISABLE_CUSTOM_BRIGHTNESS_CURVE = 0x40000, 395 363 396 364 /** 397 - * @DC_HDCP_LC_FORCE_FW_ENABLE: If set, use HDCP Locality Check FW 365 + * @DC_HDCP_LC_FORCE_FW_ENABLE: (0x80000) If set, use HDCP Locality Check FW 398 366 * path regardless of reported HW capabilities. 399 367 */ 400 368 DC_HDCP_LC_FORCE_FW_ENABLE = 0x80000, 401 369 402 370 /** 403 - * @DC_HDCP_LC_ENABLE_SW_FALLBACK: If set, upon HDCP Locality Check FW 371 + * @DC_HDCP_LC_ENABLE_SW_FALLBACK: (0x100000) If set, upon HDCP Locality Check FW 404 372 * path failure, retry using legacy SW path. 405 373 */ 406 374 DC_HDCP_LC_ENABLE_SW_FALLBACK = 0x100000, 407 375 408 376 /** 409 - * @DC_SKIP_DETECTION_LT: If set, skip detection link training 377 + * @DC_SKIP_DETECTION_LT: (0x200000) If set, skip detection link training 410 378 */ 411 379 DC_SKIP_DETECTION_LT = 0x200000, 412 380 };
+1
drivers/gpu/drm/amd/include/dm_pp_interface.h
··· 65 65 uint32_t view_resolution_cy; 66 66 enum amd_pp_display_config_type displayconfigtype; 67 67 uint32_t vertical_refresh; /* for active display */ 68 + uint32_t pixel_clock; /* Pixel clock in KHz (for HDMI only: normalized) */ 68 69 }; 69 70 70 71 #define MAX_NUM_DISPLAY 32
+4
drivers/gpu/drm/amd/include/kgd_pp_interface.h
··· 162 162 AMDGPU_PP_SENSOR_PEAK_PSTATE_SCLK, 163 163 AMDGPU_PP_SENSOR_PEAK_PSTATE_MCLK, 164 164 AMDGPU_PP_SENSOR_VCN_LOAD, 165 + AMDGPU_PP_SENSOR_NODEPOWERLIMIT, 166 + AMDGPU_PP_SENSOR_NODEPOWER, 167 + AMDGPU_PP_SENSOR_GPPTRESIDENCY, 168 + AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT, 165 169 }; 166 170 167 171 enum amd_pp_task {
+43 -50
drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
··· 27 27 #include "amdgpu_smu.h" 28 28 #include "amdgpu_dpm_internal.h" 29 29 30 - void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev) 30 + void amdgpu_dpm_get_display_cfg(struct amdgpu_device *adev) 31 31 { 32 32 struct drm_device *ddev = adev_to_drm(adev); 33 + struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg; 34 + struct single_display_configuration *display_cfg; 33 35 struct drm_crtc *crtc; 34 36 struct amdgpu_crtc *amdgpu_crtc; 37 + struct amdgpu_connector *conn; 38 + int num_crtcs = 0; 39 + int vrefresh; 40 + u32 vblank_in_pixels, vblank_time_us; 35 41 36 - adev->pm.dpm.new_active_crtcs = 0; 37 - adev->pm.dpm.new_active_crtc_count = 0; 38 - if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 39 - list_for_each_entry(crtc, 40 - &ddev->mode_config.crtc_list, head) { 41 - amdgpu_crtc = to_amdgpu_crtc(crtc); 42 - if (amdgpu_crtc->enabled) { 43 - adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id); 44 - adev->pm.dpm.new_active_crtc_count++; 45 - } 46 - } 47 - } 48 - } 49 - 50 - u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) 51 - { 52 - struct drm_device *dev = adev_to_drm(adev); 53 - struct drm_crtc *crtc; 54 - struct amdgpu_crtc *amdgpu_crtc; 55 - u32 vblank_in_pixels; 56 - u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ 42 + cfg->min_vblank_time = 0xffffffff; /* if the displays are off, vblank time is max */ 57 43 58 44 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 59 - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 45 + list_for_each_entry(crtc, &ddev->mode_config.crtc_list, head) { 60 46 amdgpu_crtc = to_amdgpu_crtc(crtc); 61 - if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { 47 + 48 + /* The array should only contain active displays. */ 49 + if (!amdgpu_crtc->enabled) 50 + continue; 51 + 52 + conn = to_amdgpu_connector(amdgpu_crtc->connector); 53 + display_cfg = &adev->pm.pm_display_cfg.displays[num_crtcs++]; 54 + 55 + if (amdgpu_crtc->hw_mode.clock) { 56 + vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); 57 + 62 58 vblank_in_pixels = 63 59 amdgpu_crtc->hw_mode.crtc_htotal * 64 60 (amdgpu_crtc->hw_mode.crtc_vblank_end - 65 61 amdgpu_crtc->hw_mode.crtc_vdisplay + 66 62 (amdgpu_crtc->v_border * 2)); 67 63 68 - vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock; 64 + vblank_time_us = 65 + vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock; 69 66 70 - /* we have issues with mclk switching with 71 - * refresh rates over 120 hz on the non-DC code. 67 + /* The legacy (non-DC) code has issues with mclk switching 68 + * with refresh rates over 120 Hz. Disable mclk switching. 72 69 */ 73 - if (drm_mode_vrefresh(&amdgpu_crtc->hw_mode) > 120) 70 + if (vrefresh > 120) 74 71 vblank_time_us = 0; 75 72 76 - break; 73 + /* Find minimum vblank time. */ 74 + if (vblank_time_us < cfg->min_vblank_time) 75 + cfg->min_vblank_time = vblank_time_us; 76 + 77 + /* Find vertical refresh rate of first active display. */ 78 + if (!cfg->vrefresh) 79 + cfg->vrefresh = vrefresh; 77 80 } 81 + 82 + if (amdgpu_crtc->crtc_id < cfg->crtc_index) { 83 + /* Find first active CRTC and its line time. */ 84 + cfg->crtc_index = amdgpu_crtc->crtc_id; 85 + cfg->line_time_in_us = amdgpu_crtc->line_time; 86 + } 87 + 88 + display_cfg->controller_id = amdgpu_crtc->crtc_id; 89 + display_cfg->pixel_clock = conn->pixelclock_for_modeset; 78 90 } 79 91 } 80 92 81 - return vblank_time_us; 82 - } 83 - 84 - u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev) 85 - { 86 - struct drm_device *dev = adev_to_drm(adev); 87 - struct drm_crtc *crtc; 88 - struct amdgpu_crtc *amdgpu_crtc; 89 - u32 vrefresh = 0; 90 - 91 - if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { 92 - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 93 - amdgpu_crtc = to_amdgpu_crtc(crtc); 94 - if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { 95 - vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); 96 - break; 97 - } 98 - } 99 - } 100 - 101 - return vrefresh; 93 + cfg->display_clk = adev->clock.default_dispclk; 94 + cfg->num_display = num_crtcs; 102 95 }
+192 -42
drivers/gpu/drm/amd/pm/amdgpu_pm.c
··· 1421 1421 return -EINVAL; 1422 1422 } 1423 1423 1424 - static int amdgpu_hwmon_get_sensor_generic(struct amdgpu_device *adev, 1425 - enum amd_pp_sensors sensor, 1426 - void *query) 1424 + static int amdgpu_pm_get_sensor_generic(struct amdgpu_device *adev, 1425 + enum amd_pp_sensors sensor, 1426 + void *query) 1427 1427 { 1428 1428 int r, size = sizeof(uint32_t); 1429 1429 ··· 1456 1456 unsigned int value; 1457 1457 int r; 1458 1458 1459 - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value); 1459 + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_LOAD, &value); 1460 1460 if (r) 1461 1461 return r; 1462 1462 ··· 1480 1480 unsigned int value; 1481 1481 int r; 1482 1482 1483 - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value); 1483 + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_LOAD, &value); 1484 1484 if (r) 1485 1485 return r; 1486 1486 ··· 1504 1504 unsigned int value; 1505 1505 int r; 1506 1506 1507 - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value); 1507 + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value); 1508 1508 if (r) 1509 1509 return r; 1510 1510 ··· 1783 1783 uint32_t ss_power; 1784 1784 int r = 0, i; 1785 1785 1786 - r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power); 1786 + r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power); 1787 1787 if (r == -EOPNOTSUPP) { 1788 1788 /* sensor not available on dGPU, try to read from APU */ 1789 1789 adev = NULL; ··· 1796 1796 } 1797 1797 mutex_unlock(&mgpu_info.mutex); 1798 1798 if (adev) 1799 - r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&ss_power); 1799 + r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&ss_power); 1800 1800 } 1801 1801 1802 1802 if (r) ··· 1906 1906 1907 1907 if (!amdgpu_device_supports_smart_shift(adev)) 1908 1908 *states = ATTR_STATE_UNSUPPORTED; 1909 - else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE, 1910 - (void *)&ss_power)) 1909 + else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_APU_SHARE, 1910 + (void *)&ss_power)) 1911 1911 *states = ATTR_STATE_UNSUPPORTED; 1912 - else if (amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE, 1913 - (void *)&ss_power)) 1912 + else if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_SS_DGPU_SHARE, 1913 + (void *)&ss_power)) 1914 1914 *states = ATTR_STATE_UNSUPPORTED; 1915 1915 1916 1916 return 0; ··· 2081 2081 * for user application to monitor various board reated attributes. 2082 2082 * 2083 2083 * The amdgpu driver provides a sysfs API for reporting board attributes. Presently, 2084 - * only two types of attributes are reported, baseboard temperature and 2085 - * gpu board temperature. Both of them are reported as binary files. 2084 + * seven types of attributes are reported. Baseboard temperature and 2085 + * gpu board temperature are reported as binary files. Npm status, current node power limit, 2086 + * max node power limit, node power and global ppt residency is reported as ASCII text file. 2086 2087 * 2087 2088 * * .. code-block:: console 2088 2089 * ··· 2091 2090 * 2092 2091 * hexdump /sys/bus/pci/devices/.../board/gpuboard_temp 2093 2092 * 2093 + * hexdump /sys/bus/pci/devices/.../board/npm_status 2094 + * 2095 + * hexdump /sys/bus/pci/devices/.../board/cur_node_power_limit 2096 + * 2097 + * hexdump /sys/bus/pci/devices/.../board/max_node_power_limit 2098 + * 2099 + * hexdump /sys/bus/pci/devices/.../board/node_power 2100 + * 2101 + * hexdump /sys/bus/pci/devices/.../board/global_ppt_resid 2094 2102 */ 2095 2103 2096 2104 /** ··· 2178 2168 return size; 2179 2169 } 2180 2170 2171 + /** 2172 + * DOC: cur_node_power_limit 2173 + * 2174 + * The amdgpu driver provides a sysfs API for retrieving current node power limit. 2175 + * The file cur_node_power_limit is used for this. 2176 + */ 2177 + static ssize_t amdgpu_show_cur_node_power_limit(struct device *dev, 2178 + struct device_attribute *attr, char *buf) 2179 + { 2180 + struct drm_device *ddev = dev_get_drvdata(dev); 2181 + struct amdgpu_device *adev = drm_to_adev(ddev); 2182 + u32 nplimit; 2183 + int r; 2184 + 2185 + /* get the current node power limit */ 2186 + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWERLIMIT, 2187 + (void *)&nplimit); 2188 + if (r) 2189 + return r; 2190 + 2191 + return sysfs_emit(buf, "%u\n", nplimit); 2192 + } 2193 + 2194 + /** 2195 + * DOC: node_power 2196 + * 2197 + * The amdgpu driver provides a sysfs API for retrieving current node power. 2198 + * The file node_power is used for this. 2199 + */ 2200 + static ssize_t amdgpu_show_node_power(struct device *dev, 2201 + struct device_attribute *attr, char *buf) 2202 + { 2203 + struct drm_device *ddev = dev_get_drvdata(dev); 2204 + struct amdgpu_device *adev = drm_to_adev(ddev); 2205 + u32 npower; 2206 + int r; 2207 + 2208 + /* get the node power */ 2209 + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER, 2210 + (void *)&npower); 2211 + if (r) 2212 + return r; 2213 + 2214 + return sysfs_emit(buf, "%u\n", npower); 2215 + } 2216 + 2217 + /** 2218 + * DOC: npm_status 2219 + * 2220 + * The amdgpu driver provides a sysfs API for retrieving current node power management status. 2221 + * The file npm_status is used for this. It shows the status as enabled or disabled based on 2222 + * current node power value. If node power is zero, status is disabled else enabled. 2223 + */ 2224 + static ssize_t amdgpu_show_npm_status(struct device *dev, 2225 + struct device_attribute *attr, char *buf) 2226 + { 2227 + struct drm_device *ddev = dev_get_drvdata(dev); 2228 + struct amdgpu_device *adev = drm_to_adev(ddev); 2229 + u32 npower; 2230 + int r; 2231 + 2232 + /* get the node power */ 2233 + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_NODEPOWER, 2234 + (void *)&npower); 2235 + if (r) 2236 + return r; 2237 + 2238 + return sysfs_emit(buf, "%s\n", npower ? "enabled" : "disabled"); 2239 + } 2240 + 2241 + /** 2242 + * DOC: global_ppt_resid 2243 + * 2244 + * The amdgpu driver provides a sysfs API for retrieving global ppt residency. 2245 + * The file global_ppt_resid is used for this. 2246 + */ 2247 + static ssize_t amdgpu_show_global_ppt_resid(struct device *dev, 2248 + struct device_attribute *attr, char *buf) 2249 + { 2250 + struct drm_device *ddev = dev_get_drvdata(dev); 2251 + struct amdgpu_device *adev = drm_to_adev(ddev); 2252 + u32 gpptresid; 2253 + int r; 2254 + 2255 + /* get the global ppt residency */ 2256 + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPPTRESIDENCY, 2257 + (void *)&gpptresid); 2258 + if (r) 2259 + return r; 2260 + 2261 + return sysfs_emit(buf, "%u\n", gpptresid); 2262 + } 2263 + 2264 + /** 2265 + * DOC: max_node_power_limit 2266 + * 2267 + * The amdgpu driver provides a sysfs API for retrieving maximum node power limit. 2268 + * The file max_node_power_limit is used for this. 2269 + */ 2270 + static ssize_t amdgpu_show_max_node_power_limit(struct device *dev, 2271 + struct device_attribute *attr, char *buf) 2272 + { 2273 + struct drm_device *ddev = dev_get_drvdata(dev); 2274 + struct amdgpu_device *adev = drm_to_adev(ddev); 2275 + u32 max_nplimit; 2276 + int r; 2277 + 2278 + /* get the max node power limit */ 2279 + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT, 2280 + (void *)&max_nplimit); 2281 + if (r) 2282 + return r; 2283 + 2284 + return sysfs_emit(buf, "%u\n", max_nplimit); 2285 + } 2286 + 2181 2287 static DEVICE_ATTR(baseboard_temp, 0444, amdgpu_get_baseboard_temp_metrics, NULL); 2182 2288 static DEVICE_ATTR(gpuboard_temp, 0444, amdgpu_get_gpuboard_temp_metrics, NULL); 2289 + static DEVICE_ATTR(cur_node_power_limit, 0444, amdgpu_show_cur_node_power_limit, NULL); 2290 + static DEVICE_ATTR(node_power, 0444, amdgpu_show_node_power, NULL); 2291 + static DEVICE_ATTR(global_ppt_resid, 0444, amdgpu_show_global_ppt_resid, NULL); 2292 + static DEVICE_ATTR(max_node_power_limit, 0444, amdgpu_show_max_node_power_limit, NULL); 2293 + static DEVICE_ATTR(npm_status, 0444, amdgpu_show_npm_status, NULL); 2183 2294 2184 2295 static struct attribute *board_attrs[] = { 2185 2296 &dev_attr_baseboard_temp.attr, ··· 2767 2636 switch (channel) { 2768 2637 case PP_TEMP_JUNCTION: 2769 2638 /* get current junction temperature */ 2770 - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP, 2771 - (void *)&temp); 2639 + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP, 2640 + (void *)&temp); 2772 2641 break; 2773 2642 case PP_TEMP_EDGE: 2774 2643 /* get current edge temperature */ 2775 - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP, 2776 - (void *)&temp); 2644 + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_EDGE_TEMP, 2645 + (void *)&temp); 2777 2646 break; 2778 2647 case PP_TEMP_MEM: 2779 2648 /* get current memory temperature */ 2780 - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP, 2781 - (void *)&temp); 2649 + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MEM_TEMP, 2650 + (void *)&temp); 2782 2651 break; 2783 2652 default: 2784 2653 r = -EINVAL; ··· 3040 2909 u32 min_rpm = 0; 3041 2910 int r; 3042 2911 3043 - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM, 3044 - (void *)&min_rpm); 2912 + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM, 2913 + (void *)&min_rpm); 3045 2914 3046 2915 if (r) 3047 2916 return r; ··· 3057 2926 u32 max_rpm = 0; 3058 2927 int r; 3059 2928 3060 - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM, 3061 - (void *)&max_rpm); 2929 + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM, 2930 + (void *)&max_rpm); 3062 2931 3063 2932 if (r) 3064 2933 return r; ··· 3191 3060 int r; 3192 3061 3193 3062 /* get the voltage */ 3194 - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX, 3195 - (void *)&vddgfx); 3063 + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDGFX, 3064 + (void *)&vddgfx); 3196 3065 if (r) 3197 3066 return r; 3198 3067 ··· 3208 3077 int r; 3209 3078 3210 3079 /* get the voltage */ 3211 - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD, 3212 - (void *)&vddboard); 3080 + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD, 3081 + (void *)&vddboard); 3213 3082 if (r) 3214 3083 return r; 3215 3084 ··· 3242 3111 return -EINVAL; 3243 3112 3244 3113 /* get the voltage */ 3245 - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB, 3246 - (void *)&vddnb); 3114 + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDNB, 3115 + (void *)&vddnb); 3247 3116 if (r) 3248 3117 return r; 3249 3118 ··· 3265 3134 u32 query = 0; 3266 3135 int r; 3267 3136 3268 - r = amdgpu_hwmon_get_sensor_generic(adev, sensor, (void *)&query); 3137 + r = amdgpu_pm_get_sensor_generic(adev, sensor, (void *)&query); 3269 3138 if (r) 3270 3139 return r; 3271 3140 ··· 3385 3254 int err; 3386 3255 u32 value; 3387 3256 3388 - if (amdgpu_sriov_vf(adev)) 3389 - return -EINVAL; 3390 - 3391 3257 err = kstrtou32(buf, 10, &value); 3392 3258 if (err) 3393 3259 return err; ··· 3415 3287 int r; 3416 3288 3417 3289 /* get the sclk */ 3418 - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK, 3419 - (void *)&sclk); 3290 + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_SCLK, 3291 + (void *)&sclk); 3420 3292 if (r) 3421 3293 return r; 3422 3294 ··· 3439 3311 int r; 3440 3312 3441 3313 /* get the sclk */ 3442 - r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK, 3443 - (void *)&mclk); 3314 + r = amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GFX_MCLK, 3315 + (void *)&mclk); 3444 3316 if (r) 3445 3317 return r; 3446 3318 ··· 3726 3598 return 0; 3727 3599 } 3728 3600 3601 + if (attr == &sensor_dev_attr_power1_cap.dev_attr.attr && 3602 + amdgpu_virt_cap_is_rw(&adev->virt.virt_caps, AMDGPU_VIRT_CAP_POWER_LIMIT)) 3603 + effective_mode |= S_IWUSR; 3604 + 3729 3605 /* not implemented yet for APUs having < GC 9.3.0 (Renoir) */ 3730 3606 if (((adev->family == AMDGPU_FAMILY_SI) || 3731 3607 ((adev->flags & AMD_IS_APU) && (gc_ver < IP_VERSION(9, 3, 0)))) && ··· 3738 3606 3739 3607 /* not all products support both average and instantaneous */ 3740 3608 if (attr == &sensor_dev_attr_power1_average.dev_attr.attr && 3741 - amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, (void *)&tmp) == -EOPNOTSUPP) 3609 + amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_AVG_POWER, 3610 + (void *)&tmp) == -EOPNOTSUPP) 3742 3611 return 0; 3743 3612 if (attr == &sensor_dev_attr_power1_input.dev_attr.attr && 3744 - amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, (void *)&tmp) == -EOPNOTSUPP) 3613 + amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_GPU_INPUT_POWER, 3614 + (void *)&tmp) == -EOPNOTSUPP) 3745 3615 return 0; 3746 3616 3747 3617 /* hide max/min values if we can't both query and manage the fan */ ··· 3782 3648 /* only few boards support vddboard */ 3783 3649 if ((attr == &sensor_dev_attr_in2_input.dev_attr.attr || 3784 3650 attr == &sensor_dev_attr_in2_label.dev_attr.attr) && 3785 - amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD, 3786 - (void *)&tmp) == -EOPNOTSUPP) 3651 + amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VDDBOARD, 3652 + (void *)&tmp) == -EOPNOTSUPP) 3787 3653 return 0; 3788 3654 3789 3655 /* no mclk on APUs other than gc 9,4,3*/ ··· 4665 4531 { 4666 4532 enum amdgpu_sriov_vf_mode mode; 4667 4533 uint32_t mask = 0; 4534 + uint32_t tmp; 4668 4535 int ret; 4669 4536 4670 4537 if (adev->pm.sysfs_initialized) ··· 4732 4597 &amdgpu_board_attr_group); 4733 4598 if (ret) 4734 4599 goto err_out0; 4600 + if (amdgpu_pm_get_sensor_generic(adev, AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT, 4601 + (void *)&tmp) != -EOPNOTSUPP) { 4602 + sysfs_add_file_to_group(&adev->dev->kobj, 4603 + &dev_attr_cur_node_power_limit.attr, 4604 + amdgpu_board_attr_group.name); 4605 + sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_node_power.attr, 4606 + amdgpu_board_attr_group.name); 4607 + sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_global_ppt_resid.attr, 4608 + amdgpu_board_attr_group.name); 4609 + sysfs_add_file_to_group(&adev->dev->kobj, 4610 + &dev_attr_max_node_power_limit.attr, 4611 + amdgpu_board_attr_group.name); 4612 + sysfs_add_file_to_group(&adev->dev->kobj, &dev_attr_npm_status.attr, 4613 + amdgpu_board_attr_group.name); 4614 + } 4735 4615 } 4736 4616 4737 4617 adev->pm.sysfs_initialized = true;
-4
drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
··· 263 263 u32 voltage_response_time; 264 264 u32 backbias_response_time; 265 265 void *priv; 266 - u32 new_active_crtcs; 267 - int new_active_crtc_count; 268 - u32 current_active_crtcs; 269 - int current_active_crtc_count; 270 266 struct amdgpu_dpm_dynamic_state dyn_state; 271 267 struct amdgpu_dpm_fan fan; 272 268 u32 tdp_limit;
+1 -5
drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h
··· 23 23 #ifndef __AMDGPU_DPM_INTERNAL_H__ 24 24 #define __AMDGPU_DPM_INTERNAL_H__ 25 25 26 - void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev); 27 - 28 - u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev); 29 - 30 - u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev); 26 + void amdgpu_dpm_get_display_cfg(struct amdgpu_device *adev); 31 27 32 28 #endif
+2 -2
drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
··· 2299 2299 2300 2300 if (pi->sys_info.nb_dpm_enable) { 2301 2301 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || 2302 - pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) || 2302 + pi->video_start || (adev->pm.pm_display_cfg.num_display >= 3) || 2303 2303 pi->disable_nb_ps3_in_battery; 2304 2304 ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3; 2305 2305 ps->dpm0_pg_nb_ps_hi = 0x2; ··· 2358 2358 return 0; 2359 2359 2360 2360 force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) || 2361 - (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start); 2361 + (adev->pm.pm_display_cfg.num_display >= 3) || pi->video_start); 2362 2362 2363 2363 if (force_high) { 2364 2364 for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
+3 -5
drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
··· 771 771 int i; 772 772 struct amdgpu_ps *ps; 773 773 u32 ui_class; 774 - bool single_display = adev->pm.dpm.new_active_crtc_count < 2; 774 + bool single_display = adev->pm.pm_display_cfg.num_display < 2; 775 775 776 776 /* check if the vblank period is too short to adjust the mclk */ 777 777 if (single_display && adev->powerplay.pp_funcs->vblank_too_short) { ··· 944 944 945 945 amdgpu_dpm_post_set_power_state(adev); 946 946 947 - adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; 948 - adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; 949 - 950 947 if (pp_funcs->force_performance_level) { 951 948 if (adev->pm.dpm.thermal_active) { 952 949 enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; ··· 964 967 { 965 968 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 966 969 967 - amdgpu_dpm_get_active_displays(adev); 970 + if (!adev->dc_enabled) 971 + amdgpu_dpm_get_display_cfg(adev); 968 972 969 973 amdgpu_dpm_change_power_state_locked(adev); 970 974 }
+22 -43
drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
··· 3081 3081 static bool si_dpm_vblank_too_short(void *handle) 3082 3082 { 3083 3083 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 3084 - u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); 3084 + u32 vblank_time = adev->pm.pm_display_cfg.min_vblank_time; 3085 3085 /* we never hit the non-gddr5 limit so disable it */ 3086 3086 u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0; 3087 3087 ··· 3447 3447 static void si_apply_state_adjust_rules(struct amdgpu_device *adev, 3448 3448 struct amdgpu_ps *rps) 3449 3449 { 3450 + const struct amd_pp_display_configuration *display_cfg = 3451 + &adev->pm.pm_display_cfg; 3450 3452 struct si_ps *ps = si_get_ps(rps); 3451 3453 struct amdgpu_clock_and_voltage_limits *max_limits; 3452 - struct amdgpu_connector *conn; 3453 3454 bool disable_mclk_switching = false; 3454 3455 bool disable_sclk_switching = false; 3455 3456 u32 mclk, sclk; ··· 3489 3488 * For example, 4K 60Hz and 1080p 144Hz fall into this category. 3490 3489 * Find number of such displays connected. 3491 3490 */ 3492 - for (i = 0; i < adev->mode_info.num_crtc; i++) { 3493 - if (!(adev->pm.dpm.new_active_crtcs & (1 << i)) || 3494 - !adev->mode_info.crtcs[i]->enabled) 3495 - continue; 3496 - 3497 - conn = to_amdgpu_connector(adev->mode_info.crtcs[i]->connector); 3498 - 3499 - if (conn->pixelclock_for_modeset > 297000) 3491 + for (i = 0; i < display_cfg->num_display; i++) { 3492 + /* The array only contains active displays. */ 3493 + if (display_cfg->displays[i].pixel_clock > 297000) 3500 3494 high_pixelclock_count++; 3501 3495 } 3502 3496 ··· 3519 3523 rps->ecclk = 0; 3520 3524 } 3521 3525 3522 - if ((adev->pm.dpm.new_active_crtc_count > 1) || 3526 + if ((adev->pm.pm_display_cfg.num_display > 1) || 3523 3527 si_dpm_vblank_too_short(adev)) 3524 3528 disable_mclk_switching = true; 3525 3529 ··· 3667 3671 ps->performance_levels[i].mclk, 3668 3672 max_limits->vddc, &ps->performance_levels[i].vddc); 3669 3673 btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk, 3670 - adev->clock.current_dispclk, 3674 + display_cfg->display_clk, 3671 3675 max_limits->vddc, &ps->performance_levels[i].vddc); 3672 3676 } 3673 3677 ··· 4192 4196 4193 4197 static void si_program_display_gap(struct amdgpu_device *adev) 4194 4198 { 4199 + const struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg; 4195 4200 u32 tmp, pipe; 4196 - int i; 4197 4201 4198 4202 tmp = RREG32(mmCG_DISPLAY_GAP_CNTL) & ~(CG_DISPLAY_GAP_CNTL__DISP1_GAP_MASK | CG_DISPLAY_GAP_CNTL__DISP2_GAP_MASK); 4199 - if (adev->pm.dpm.new_active_crtc_count > 0) 4203 + if (cfg->num_display > 0) 4200 4204 tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT; 4201 4205 else 4202 4206 tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP1_GAP__SHIFT; 4203 4207 4204 - if (adev->pm.dpm.new_active_crtc_count > 1) 4208 + if (cfg->num_display > 1) 4205 4209 tmp |= R600_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT; 4206 4210 else 4207 4211 tmp |= R600_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP2_GAP__SHIFT; ··· 4211 4215 tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG); 4212 4216 pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT; 4213 4217 4214 - if ((adev->pm.dpm.new_active_crtc_count > 0) && 4215 - (!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) { 4216 - /* find the first active crtc */ 4217 - for (i = 0; i < adev->mode_info.num_crtc; i++) { 4218 - if (adev->pm.dpm.new_active_crtcs & (1 << i)) 4219 - break; 4220 - } 4221 - if (i == adev->mode_info.num_crtc) 4222 - pipe = 0; 4223 - else 4224 - pipe = i; 4218 + if (cfg->num_display > 0 && pipe != cfg->crtc_index) { 4219 + pipe = cfg->crtc_index; 4225 4220 4226 4221 tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK; 4227 4222 tmp |= DCCG_DISP1_SLOW_SELECT(pipe); ··· 4223 4236 * This can be a problem on PowerXpress systems or if you want to use the card 4224 4237 * for offscreen rendering or compute if there are no crtcs enabled. 4225 4238 */ 4226 - si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0); 4239 + si_notify_smc_display_change(adev, cfg->num_display > 0); 4227 4240 } 4228 4241 4229 4242 static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable) ··· 5532 5545 (pl->mclk <= pi->mclk_stutter_mode_threshold) && 5533 5546 !eg_pi->uvd_enabled && 5534 5547 (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) && 5535 - (adev->pm.dpm.new_active_crtc_count <= 2)) { 5548 + (adev->pm.pm_display_cfg.num_display <= 2)) { 5536 5549 level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN; 5537 5550 } 5538 5551 ··· 5681 5694 /* XXX validate against display requirements! */ 5682 5695 5683 5696 for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) { 5684 - if (adev->clock.current_dispclk <= 5697 + if (adev->pm.pm_display_cfg.display_clk <= 5685 5698 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) { 5686 5699 if (ulv->pl.vddc < 5687 5700 adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v) ··· 5835 5848 5836 5849 static int si_upload_smc_data(struct amdgpu_device *adev) 5837 5850 { 5838 - struct amdgpu_crtc *amdgpu_crtc = NULL; 5839 - int i; 5851 + const struct amd_pp_display_configuration *cfg = &adev->pm.pm_display_cfg; 5840 5852 u32 crtc_index = 0; 5841 5853 u32 mclk_change_block_cp_min = 0; 5842 5854 u32 mclk_change_block_cp_max = 0; 5843 - 5844 - for (i = 0; i < adev->mode_info.num_crtc; i++) { 5845 - if (adev->pm.dpm.new_active_crtcs & (1 << i)) { 5846 - amdgpu_crtc = adev->mode_info.crtcs[i]; 5847 - break; 5848 - } 5849 - } 5850 5855 5851 5856 /* When a display is plugged in, program these so that the SMC 5852 5857 * performs MCLK switching when it doesn't cause flickering. 5853 5858 * When no display is plugged in, there is no need to restrict 5854 5859 * MCLK switching, so program them to zero. 5855 5860 */ 5856 - if (adev->pm.dpm.new_active_crtc_count && amdgpu_crtc) { 5857 - crtc_index = amdgpu_crtc->crtc_id; 5861 + if (cfg->num_display) { 5862 + crtc_index = cfg->crtc_index; 5858 5863 5859 - if (amdgpu_crtc->line_time) { 5860 - mclk_change_block_cp_min = 200 / amdgpu_crtc->line_time; 5861 - mclk_change_block_cp_max = 100 / amdgpu_crtc->line_time; 5864 + if (cfg->line_time_in_us) { 5865 + mclk_change_block_cp_min = 200 / cfg->line_time_in_us; 5866 + mclk_change_block_cp_max = 100 / cfg->line_time_in_us; 5862 5867 } 5863 5868 } 5864 5869
+1 -10
drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
··· 1554 1554 struct amdgpu_device *adev = hwmgr->adev; 1555 1555 1556 1556 if (!adev->dc_enabled) { 1557 - amdgpu_dpm_get_active_displays(adev); 1558 - adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; 1559 - adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); 1560 - adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); 1561 - /* we have issues with mclk switching with 1562 - * refresh rates over 120 hz on the non-DC code. 1563 - */ 1564 - if (adev->pm.pm_display_cfg.vrefresh > 120) 1565 - adev->pm.pm_display_cfg.min_vblank_time = 0; 1566 - 1557 + amdgpu_dpm_get_display_cfg(adev); 1567 1558 pp_display_configuration_change(handle, 1568 1559 &adev->pm.pm_display_cfg); 1569 1560 }
+33 -22
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
··· 766 766 case IP_VERSION(13, 0, 14): 767 767 case IP_VERSION(13, 0, 12): 768 768 smu_v13_0_6_set_ppt_funcs(smu); 769 - smu_v13_0_6_set_temp_funcs(smu); 770 769 /* Enable pp_od_clk_voltage node */ 771 770 smu->od_enabled = true; 772 771 break; ··· 1315 1316 smu_power_profile_mode_get(smu, smu->power_profile_mode); 1316 1317 } 1317 1318 1319 + void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id) 1320 + { 1321 + struct smu_feature_cap *fea_cap = &smu->fea_cap; 1322 + 1323 + if (fea_id >= SMU_FEATURE_CAP_ID__COUNT) 1324 + return; 1325 + 1326 + set_bit(fea_id, fea_cap->cap_map); 1327 + } 1328 + 1329 + bool smu_feature_cap_test(struct smu_context *smu, enum smu_feature_cap_id fea_id) 1330 + { 1331 + struct smu_feature_cap *fea_cap = &smu->fea_cap; 1332 + 1333 + if (fea_id >= SMU_FEATURE_CAP_ID__COUNT) 1334 + return false; 1335 + 1336 + return test_bit(fea_id, fea_cap->cap_map); 1337 + } 1338 + 1339 + static void smu_feature_cap_init(struct smu_context *smu) 1340 + { 1341 + struct smu_feature_cap *fea_cap = &smu->fea_cap; 1342 + 1343 + bitmap_zero(fea_cap->cap_map, SMU_FEATURE_CAP_ID__COUNT); 1344 + } 1345 + 1318 1346 static int smu_sw_init(struct amdgpu_ip_block *ip_block) 1319 1347 { 1320 1348 struct amdgpu_device *adev = ip_block->adev; ··· 1373 1347 1374 1348 INIT_DELAYED_WORK(&smu->swctf_delayed_work, 1375 1349 smu_swctf_delayed_work_handler); 1350 + 1351 + smu_feature_cap_init(smu); 1376 1352 1377 1353 ret = smu_smc_table_sw_init(smu); 1378 1354 if (ret) { ··· 1925 1897 for (i = 0; i < adev->vcn.num_vcn_inst; i++) 1926 1898 smu_dpm_set_vcn_enable(smu, true, i); 1927 1899 smu_dpm_set_jpeg_enable(smu, true); 1928 - smu_dpm_set_vpe_enable(smu, true); 1929 1900 smu_dpm_set_umsch_mm_enable(smu, true); 1930 1901 smu_set_mall_enable(smu); 1931 1902 smu_set_gfx_cgpg(smu, true); ··· 2132 2105 } 2133 2106 smu_dpm_set_jpeg_enable(smu, false); 2134 2107 adev->jpeg.cur_state = AMD_PG_STATE_GATE; 2135 - smu_dpm_set_vpe_enable(smu, false); 2136 2108 smu_dpm_set_umsch_mm_enable(smu, false); 2137 2109 2138 2110 if (!smu->pm_enabled) ··· 2263 2237 return ret; 2264 2238 } 2265 2239 2266 - if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) { 2240 + if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL && smu->od_enabled) { 2267 2241 ret = smu_od_edit_dpm_table(smu, PP_OD_COMMIT_DPM_TABLE, NULL, 0); 2268 2242 if (ret) 2269 2243 return ret; ··· 3534 3508 3535 3509 bool smu_link_reset_is_support(struct smu_context *smu) 3536 3510 { 3537 - bool ret = false; 3538 - 3539 3511 if (!smu->pm_enabled) 3540 3512 return false; 3541 3513 3542 - if (smu->ppt_funcs && smu->ppt_funcs->link_reset_is_support) 3543 - ret = smu->ppt_funcs->link_reset_is_support(smu); 3544 - 3545 - return ret; 3514 + return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__LINK_RESET); 3546 3515 } 3547 3516 3548 3517 int smu_mode1_reset(struct smu_context *smu) ··· 4127 4106 */ 4128 4107 bool smu_reset_sdma_is_supported(struct smu_context *smu) 4129 4108 { 4130 - bool ret = false; 4131 - 4132 - if (smu->ppt_funcs && smu->ppt_funcs->reset_sdma_is_supported) 4133 - ret = smu->ppt_funcs->reset_sdma_is_supported(smu); 4134 - 4135 - return ret; 4109 + return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__SDMA_RESET); 4136 4110 } 4137 4111 4138 4112 int smu_reset_sdma(struct smu_context *smu, uint32_t inst_mask) ··· 4142 4126 4143 4127 bool smu_reset_vcn_is_supported(struct smu_context *smu) 4144 4128 { 4145 - bool ret = false; 4146 - 4147 - if (smu->ppt_funcs && smu->ppt_funcs->reset_vcn_is_supported) 4148 - ret = smu->ppt_funcs->reset_vcn_is_supported(smu); 4149 - 4150 - return ret; 4129 + return smu_feature_cap_test(smu, SMU_FEATURE_CAP_ID__VCN_RESET); 4151 4130 } 4152 4131 4153 4132 int smu_reset_vcn(struct smu_context *smu, uint32_t inst_mask)
+15 -13
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
··· 528 528 */ 529 529 #define SMU_WBRF_EVENT_HANDLING_PACE 10 530 530 531 + enum smu_feature_cap_id { 532 + SMU_FEATURE_CAP_ID__LINK_RESET = 0, 533 + SMU_FEATURE_CAP_ID__SDMA_RESET, 534 + SMU_FEATURE_CAP_ID__VCN_RESET, 535 + SMU_FEATURE_CAP_ID__COUNT, 536 + }; 537 + 538 + struct smu_feature_cap { 539 + DECLARE_BITMAP(cap_map, SMU_FEATURE_CAP_ID__COUNT); 540 + }; 541 + 531 542 struct smu_context { 532 543 struct amdgpu_device *adev; 533 544 struct amdgpu_irq_src irq_source; ··· 561 550 struct amd_pp_display_configuration *display_config; 562 551 struct smu_baco_context smu_baco; 563 552 struct smu_temperature_range thermal_range; 553 + struct smu_feature_cap fea_cap; 564 554 void *od_settings; 565 555 566 556 struct smu_umd_pstate_table pstate_table; ··· 1285 1273 bool (*mode1_reset_is_support)(struct smu_context *smu); 1286 1274 1287 1275 /** 1288 - * @link_reset_is_support: Check if GPU supports link reset. 1289 - */ 1290 - bool (*link_reset_is_support)(struct smu_context *smu); 1291 - 1292 - /** 1293 1276 * @mode1_reset: Perform mode1 reset. 1294 1277 * 1295 1278 * Complete GPU reset. ··· 1434 1427 * @reset_sdma: message SMU to soft reset sdma instance. 1435 1428 */ 1436 1429 int (*reset_sdma)(struct smu_context *smu, uint32_t inst_mask); 1437 - /** 1438 - * @reset_sdma_is_supported: Check if support resets the SDMA engine. 1439 - */ 1440 - bool (*reset_sdma_is_supported)(struct smu_context *smu); 1441 1430 1442 1431 /** 1443 1432 * @reset_vcn: message SMU to soft reset vcn instance. 1444 1433 */ 1445 1434 int (*dpm_reset_vcn)(struct smu_context *smu, uint32_t inst_mask); 1446 - /** 1447 - * @reset_vcn_is_supported: Check if support resets vcn. 1448 - */ 1449 - bool (*reset_vcn_is_supported)(struct smu_context *smu); 1450 1435 1451 1436 /** 1452 1437 * @get_ecc_table: message SMU to get ECC INFO table. ··· 1787 1788 enum pp_pm_policy p_type, char *sysbuf); 1788 1789 1789 1790 #endif 1791 + 1792 + void smu_feature_cap_set(struct smu_context *smu, enum smu_feature_cap_id fea_id); 1793 + bool smu_feature_cap_test(struct smu_context *smu, enum smu_feature_cap_id fea_id); 1790 1794 #endif
+10 -2
drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_12_pmfw.h
··· 191 191 192 192 #define SMU_METRICS_TABLE_VERSION 0x14 193 193 194 - #define SMU_SYSTEM_METRICS_TABLE_VERSION 0x0 194 + #define SMU_SYSTEM_METRICS_TABLE_VERSION 0x1 195 195 196 196 typedef struct __attribute__((packed, aligned(4))) { 197 197 uint64_t AccumulationCounter; ··· 304 304 int16_t SystemTemperatures[SYSTEM_TEMP_MAX_ENTRIES]; // Signed integer temperature value in Celsius, unused fields are set to 0xFFFF 305 305 int16_t NodeTemperatures[NODE_TEMP_MAX_TEMP_ENTRIES]; // Signed integer temperature value in Celsius, unused fields are set to 0xFFFF 306 306 int16_t VrTemperatures[SVI_MAX_TEMP_ENTRIES]; // Signed integer temperature value in Celsius 307 - int16_t spare[3]; 307 + int16_t spare[7]; 308 + 309 + //NPM: NODE POWER MANAGEMENT 310 + uint32_t NodePowerLimit; 311 + uint32_t NodePower; 312 + uint32_t GlobalPPTResidencyAcc; 308 313 } SystemMetricsTable_t; 309 314 #pragma pack(pop) 310 315 ··· 364 359 365 360 // General info 366 361 uint32_t pldmVersion[2]; 362 + 363 + //Node Power Limit 364 + uint32_t MaxNodePowerLimit; 367 365 } StaticMetricsTable_t; 368 366 #pragma pack(pop) 369 367
+2 -17
drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
··· 1745 1745 snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i); 1746 1746 i2c_set_adapdata(control, smu_i2c); 1747 1747 1748 - res = i2c_add_adapter(control); 1748 + res = devm_i2c_add_adapter(adev->dev, control); 1749 1749 if (res) { 1750 1750 DRM_ERROR("Failed to register hw i2c, err: %d\n", res); 1751 - goto Out_err; 1751 + return res; 1752 1752 } 1753 1753 } 1754 1754 ··· 1756 1756 adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter; 1757 1757 1758 1758 return 0; 1759 - Out_err: 1760 - for ( ; i >= 0; i--) { 1761 - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 1762 - struct i2c_adapter *control = &smu_i2c->adapter; 1763 - 1764 - i2c_del_adapter(control); 1765 - } 1766 - return res; 1767 1759 } 1768 1760 1769 1761 static void arcturus_i2c_control_fini(struct smu_context *smu) 1770 1762 { 1771 1763 struct amdgpu_device *adev = smu->adev; 1772 - int i; 1773 1764 1774 - for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { 1775 - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 1776 - struct i2c_adapter *control = &smu_i2c->adapter; 1777 - 1778 - i2c_del_adapter(control); 1779 - } 1780 1765 adev->pm.ras_eeprom_i2c_bus = NULL; 1781 1766 adev->pm.fru_eeprom_i2c_bus = NULL; 1782 1767 }
+2 -17
drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
··· 3145 3145 control->quirks = &navi10_i2c_control_quirks; 3146 3146 i2c_set_adapdata(control, smu_i2c); 3147 3147 3148 - res = i2c_add_adapter(control); 3148 + res = devm_i2c_add_adapter(adev->dev, control); 3149 3149 if (res) { 3150 3150 DRM_ERROR("Failed to register hw i2c, err: %d\n", res); 3151 - goto Out_err; 3151 + return res; 3152 3152 } 3153 3153 } 3154 3154 ··· 3156 3156 adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter; 3157 3157 3158 3158 return 0; 3159 - Out_err: 3160 - for ( ; i >= 0; i--) { 3161 - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 3162 - struct i2c_adapter *control = &smu_i2c->adapter; 3163 - 3164 - i2c_del_adapter(control); 3165 - } 3166 - return res; 3167 3159 } 3168 3160 3169 3161 static void navi10_i2c_control_fini(struct smu_context *smu) 3170 3162 { 3171 3163 struct amdgpu_device *adev = smu->adev; 3172 - int i; 3173 3164 3174 - for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { 3175 - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 3176 - struct i2c_adapter *control = &smu_i2c->adapter; 3177 - 3178 - i2c_del_adapter(control); 3179 - } 3180 3165 adev->pm.ras_eeprom_i2c_bus = NULL; 3181 3166 adev->pm.fru_eeprom_i2c_bus = NULL; 3182 3167 }
+2 -17
drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
··· 2648 2648 control->quirks = &sienna_cichlid_i2c_control_quirks; 2649 2649 i2c_set_adapdata(control, smu_i2c); 2650 2650 2651 - res = i2c_add_adapter(control); 2651 + res = devm_i2c_add_adapter(adev->dev, control); 2652 2652 if (res) { 2653 2653 DRM_ERROR("Failed to register hw i2c, err: %d\n", res); 2654 - goto Out_err; 2654 + return res; 2655 2655 } 2656 2656 } 2657 2657 /* assign the buses used for the FRU EEPROM and RAS EEPROM */ ··· 2660 2660 adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; 2661 2661 2662 2662 return 0; 2663 - Out_err: 2664 - for ( ; i >= 0; i--) { 2665 - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 2666 - struct i2c_adapter *control = &smu_i2c->adapter; 2667 - 2668 - i2c_del_adapter(control); 2669 - } 2670 - return res; 2671 2663 } 2672 2664 2673 2665 static void sienna_cichlid_i2c_control_fini(struct smu_context *smu) 2674 2666 { 2675 2667 struct amdgpu_device *adev = smu->adev; 2676 - int i; 2677 2668 2678 - for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { 2679 - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 2680 - struct i2c_adapter *control = &smu_i2c->adapter; 2681 - 2682 - i2c_del_adapter(control); 2683 - } 2684 2669 adev->pm.ras_eeprom_i2c_bus = NULL; 2685 2670 adev->pm.fru_eeprom_i2c_bus = NULL; 2686 2671 }
+2 -13
drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
··· 1641 1641 control->quirks = &aldebaran_i2c_control_quirks; 1642 1642 i2c_set_adapdata(control, smu_i2c); 1643 1643 1644 - res = i2c_add_adapter(control); 1644 + res = devm_i2c_add_adapter(adev->dev, control); 1645 1645 if (res) { 1646 1646 DRM_ERROR("Failed to register hw i2c, err: %d\n", res); 1647 - goto Out_err; 1647 + return res; 1648 1648 } 1649 1649 1650 1650 adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; 1651 1651 adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; 1652 1652 1653 1653 return 0; 1654 - Out_err: 1655 - i2c_del_adapter(control); 1656 - 1657 - return res; 1658 1654 } 1659 1655 1660 1656 static void aldebaran_i2c_control_fini(struct smu_context *smu) 1661 1657 { 1662 1658 struct amdgpu_device *adev = smu->adev; 1663 - int i; 1664 1659 1665 - for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { 1666 - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 1667 - struct i2c_adapter *control = &smu_i2c->adapter; 1668 - 1669 - i2c_del_adapter(control); 1670 - } 1671 1660 adev->pm.ras_eeprom_i2c_bus = NULL; 1672 1661 adev->pm.fru_eeprom_i2c_bus = NULL; 1673 1662 }
+2 -17
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
··· 2825 2825 control->quirks = &smu_v13_0_0_i2c_control_quirks; 2826 2826 i2c_set_adapdata(control, smu_i2c); 2827 2827 2828 - res = i2c_add_adapter(control); 2828 + res = devm_i2c_add_adapter(adev->dev, control); 2829 2829 if (res) { 2830 2830 DRM_ERROR("Failed to register hw i2c, err: %d\n", res); 2831 - goto Out_err; 2831 + return res; 2832 2832 } 2833 2833 } 2834 2834 ··· 2838 2838 adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; 2839 2839 2840 2840 return 0; 2841 - Out_err: 2842 - for ( ; i >= 0; i--) { 2843 - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 2844 - struct i2c_adapter *control = &smu_i2c->adapter; 2845 - 2846 - i2c_del_adapter(control); 2847 - } 2848 - return res; 2849 2841 } 2850 2842 2851 2843 static void smu_v13_0_0_i2c_control_fini(struct smu_context *smu) 2852 2844 { 2853 2845 struct amdgpu_device *adev = smu->adev; 2854 - int i; 2855 2846 2856 - for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { 2857 - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 2858 - struct i2c_adapter *control = &smu_i2c->adapter; 2859 - 2860 - i2c_del_adapter(control); 2861 - } 2862 2847 adev->pm.ras_eeprom_i2c_bus = NULL; 2863 2848 adev->pm.fru_eeprom_i2c_bus = NULL; 2864 2849 }
+48 -1
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
··· 137 137 MSG_MAP(SetThrottlingPolicy, PPSMC_MSG_SetThrottlingPolicy, 0), 138 138 MSG_MAP(ResetSDMA, PPSMC_MSG_ResetSDMA, 0), 139 139 MSG_MAP(GetStaticMetricsTable, PPSMC_MSG_GetStaticMetricsTable, 1), 140 - MSG_MAP(GetSystemMetricsTable, PPSMC_MSG_GetSystemMetricsTable, 0), 140 + MSG_MAP(GetSystemMetricsTable, PPSMC_MSG_GetSystemMetricsTable, 1), 141 141 }; 142 142 143 143 int smu_v13_0_12_tables_init(struct smu_context *smu) ··· 341 341 static_metrics->pldmVersion[0] != 0xFFFFFFFF) 342 342 smu->adev->firmware.pldm_version = 343 343 static_metrics->pldmVersion[0]; 344 + if (smu_v13_0_6_cap_supported(smu, SMU_CAP(NPM_METRICS))) 345 + pptable->MaxNodePowerLimit = 346 + SMUQ10_ROUND(static_metrics->MaxNodePowerLimit); 344 347 smu_v13_0_12_init_xgmi_data(smu, static_metrics); 345 348 pptable->Init = true; 346 349 } ··· 581 578 } 582 579 583 580 return false; 581 + } 582 + 583 + int smu_v13_0_12_get_npm_data(struct smu_context *smu, 584 + enum amd_pp_sensors sensor, 585 + uint32_t *value) 586 + { 587 + struct smu_table_context *smu_table = &smu->smu_table; 588 + struct PPTable_t *pptable = 589 + (struct PPTable_t *)smu_table->driver_pptable; 590 + struct smu_table *tables = smu_table->tables; 591 + SystemMetricsTable_t *metrics; 592 + struct smu_table *sys_table; 593 + int ret; 594 + 595 + if (!smu_v13_0_6_cap_supported(smu, SMU_CAP(NPM_METRICS))) 596 + return -EOPNOTSUPP; 597 + 598 + if (sensor == AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT) { 599 + *value = pptable->MaxNodePowerLimit; 600 + return 0; 601 + } 602 + 603 + ret = smu_v13_0_12_get_system_metrics_table(smu); 604 + if (ret) 605 + return ret; 606 + 607 + sys_table = &tables[SMU_TABLE_PMFW_SYSTEM_METRICS]; 608 + metrics = (SystemMetricsTable_t *)sys_table->cache.buffer; 609 + 610 + switch (sensor) { 611 + case AMDGPU_PP_SENSOR_NODEPOWERLIMIT: 612 + *value = SMUQ10_ROUND(metrics->NodePowerLimit); 613 + break; 614 + case AMDGPU_PP_SENSOR_NODEPOWER: 615 + *value = SMUQ10_ROUND(metrics->NodePower); 616 + break; 617 + case AMDGPU_PP_SENSOR_GPPTRESIDENCY: 618 + *value = SMUQ10_ROUND(metrics->GlobalPPTResidencyAcc); 619 + break; 620 + default: 621 + return -EINVAL; 622 + } 623 + 624 + return ret; 584 625 } 585 626 586 627 static ssize_t smu_v13_0_12_get_temp_metrics(struct smu_context *smu,
+43 -27
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
··· 143 143 MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1), 144 144 MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1), 145 145 MSG_MAP(GetDpmFreqByIndex, PPSMC_MSG_GetDpmFreqByIndex, 1), 146 - MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0), 146 + MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 1), 147 147 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1), 148 148 MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDriverReset, SMU_MSG_RAS_PRI | SMU_MSG_NO_PRECHECK), 149 149 MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0), ··· 354 354 } 355 355 356 356 if (fw_ver >= 0x04560700) { 357 - if (!amdgpu_sriov_vf(smu->adev)) 357 + if (fw_ver >= 0x04560900) { 358 + smu_v13_0_6_cap_set(smu, SMU_CAP(TEMP_METRICS)); 359 + if (smu->adev->gmc.xgmi.physical_node_id == 0) 360 + smu_v13_0_6_cap_set(smu, SMU_CAP(NPM_METRICS)); 361 + } else if (!amdgpu_sriov_vf(smu->adev)) 358 362 smu_v13_0_6_cap_set(smu, SMU_CAP(TEMP_METRICS)); 359 363 } else { 360 364 smu_v13_0_12_tables_fini(smu); ··· 417 413 smu_v13_0_6_cap_set(smu, SMU_CAP(HST_LIMIT_METRICS)); 418 414 419 415 if (amdgpu_sriov_vf(adev)) { 416 + if (fw_ver >= 0x00558200) 417 + amdgpu_virt_attr_set(&adev->virt.virt_caps, 418 + AMDGPU_VIRT_CAP_POWER_LIMIT, 419 + AMDGPU_CAP_ATTR_RW); 420 420 if ((pgm == 0 && fw_ver >= 0x00558000) || 421 421 (pgm == 7 && fw_ver >= 0x7551000)) { 422 422 smu_v13_0_6_cap_set(smu, ··· 1803 1795 ret = -EOPNOTSUPP; 1804 1796 break; 1805 1797 } 1798 + case AMDGPU_PP_SENSOR_NODEPOWERLIMIT: 1799 + case AMDGPU_PP_SENSOR_NODEPOWER: 1800 + case AMDGPU_PP_SENSOR_GPPTRESIDENCY: 1801 + case AMDGPU_PP_SENSOR_MAXNODEPOWERLIMIT: 1802 + ret = smu_v13_0_12_get_npm_data(smu, sensor, (uint32_t *)data); 1803 + if (ret) 1804 + return ret; 1805 + *size = 4; 1806 + break; 1806 1807 case AMDGPU_PP_SENSOR_GPU_AVG_POWER: 1807 1808 default: 1808 1809 ret = -EOPNOTSUPP; ··· 2507 2490 control->quirks = &smu_v13_0_6_i2c_control_quirks; 2508 2491 i2c_set_adapdata(control, smu_i2c); 2509 2492 2510 - res = i2c_add_adapter(control); 2493 + res = devm_i2c_add_adapter(adev->dev, control); 2511 2494 if (res) { 2512 2495 DRM_ERROR("Failed to register hw i2c, err: %d\n", res); 2513 - goto Out_err; 2496 + return res; 2514 2497 } 2515 2498 } 2516 2499 ··· 2518 2501 adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; 2519 2502 2520 2503 return 0; 2521 - Out_err: 2522 - for ( ; i >= 0; i--) { 2523 - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 2524 - struct i2c_adapter *control = &smu_i2c->adapter; 2525 - 2526 - i2c_del_adapter(control); 2527 - } 2528 - return res; 2529 2504 } 2530 2505 2531 2506 static void smu_v13_0_6_i2c_control_fini(struct smu_context *smu) 2532 2507 { 2533 2508 struct amdgpu_device *adev = smu->adev; 2534 - int i; 2535 2509 2536 - for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { 2537 - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 2538 - struct i2c_adapter *control = &smu_i2c->adapter; 2539 - 2540 - i2c_del_adapter(control); 2541 - } 2542 2510 adev->pm.ras_eeprom_i2c_bus = NULL; 2543 2511 adev->pm.fru_eeprom_i2c_bus = NULL; 2544 2512 } ··· 3225 3223 } 3226 3224 3227 3225 3226 + static int smu_v13_0_6_post_init(struct smu_context *smu) 3227 + { 3228 + if (smu_v13_0_6_is_link_reset_supported(smu)) 3229 + smu_feature_cap_set(smu, SMU_FEATURE_CAP_ID__LINK_RESET); 3230 + 3231 + if (smu_v13_0_6_reset_sdma_is_supported(smu)) 3232 + smu_feature_cap_set(smu, SMU_FEATURE_CAP_ID__SDMA_RESET); 3233 + 3234 + if (smu_v13_0_6_reset_vcn_is_supported(smu)) 3235 + smu_feature_cap_set(smu, SMU_FEATURE_CAP_ID__VCN_RESET); 3236 + 3237 + return 0; 3238 + } 3239 + 3228 3240 static int mca_smu_set_debug_mode(struct amdgpu_device *adev, bool enable) 3229 3241 { 3230 3242 struct smu_context *smu = adev->powerplay.pp_handle; ··· 3855 3839 .parse_error_code = aca_smu_parse_error_code, 3856 3840 }; 3857 3841 3842 + static void smu_v13_0_6_set_temp_funcs(struct smu_context *smu) 3843 + { 3844 + smu->smu_temp.temp_funcs = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) 3845 + == IP_VERSION(13, 0, 12)) ? &smu_v13_0_12_temp_funcs : NULL; 3846 + } 3847 + 3858 3848 static const struct pptable_funcs smu_v13_0_6_ppt_funcs = { 3859 3849 /* init dpm */ 3860 3850 .get_allowed_feature_mask = smu_v13_0_6_get_allowed_feature_mask, ··· 3908 3886 .get_xcp_metrics = smu_v13_0_6_get_xcp_metrics, 3909 3887 .get_thermal_temperature_range = smu_v13_0_6_get_thermal_temperature_range, 3910 3888 .mode1_reset_is_support = smu_v13_0_6_is_mode1_reset_supported, 3911 - .link_reset_is_support = smu_v13_0_6_is_link_reset_supported, 3912 3889 .mode1_reset = smu_v13_0_6_mode1_reset, 3913 3890 .mode2_reset = smu_v13_0_6_mode2_reset, 3914 3891 .link_reset = smu_v13_0_6_link_reset, ··· 3917 3896 .send_hbm_bad_pages_num = smu_v13_0_6_smu_send_hbm_bad_page_num, 3918 3897 .send_rma_reason = smu_v13_0_6_send_rma_reason, 3919 3898 .reset_sdma = smu_v13_0_6_reset_sdma, 3920 - .reset_sdma_is_supported = smu_v13_0_6_reset_sdma_is_supported, 3921 3899 .dpm_reset_vcn = smu_v13_0_6_reset_vcn, 3922 - .reset_vcn_is_supported = smu_v13_0_6_reset_vcn_is_supported, 3900 + .post_init = smu_v13_0_6_post_init, 3923 3901 }; 3924 3902 3925 3903 void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu) ··· 3933 3913 smu->smc_driver_if_version = SMU13_0_6_DRIVER_IF_VERSION; 3934 3914 smu->smc_fw_caps |= SMU_FW_CAP_RAS_PRI; 3935 3915 smu_v13_0_set_smu_mailbox_registers(smu); 3916 + smu_v13_0_6_set_temp_funcs(smu); 3936 3917 amdgpu_mca_smu_init_funcs(smu->adev, &smu_v13_0_6_mca_smu_funcs); 3937 3918 amdgpu_aca_set_smu_funcs(smu->adev, &smu_v13_0_6_aca_smu_funcs); 3938 3919 } 3939 3920 3940 - void smu_v13_0_6_set_temp_funcs(struct smu_context *smu) 3941 - { 3942 - smu->smu_temp.temp_funcs = (amdgpu_ip_version(smu->adev, MP1_HWIP, 0) 3943 - == IP_VERSION(13, 0, 12)) ? &smu_v13_0_12_temp_funcs : NULL; 3944 - }
+5 -1
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
··· 49 49 uint32_t MaxLclkDpmRange; 50 50 uint32_t MinLclkDpmRange; 51 51 uint64_t PublicSerialNumber_AID; 52 + uint32_t MaxNodePowerLimit; 52 53 bool Init; 53 54 }; 54 55 ··· 71 70 SMU_CAP(BOARD_VOLTAGE), 72 71 SMU_CAP(PLDM_VERSION), 73 72 SMU_CAP(TEMP_METRICS), 73 + SMU_CAP(NPM_METRICS), 74 74 SMU_CAP(ALL), 75 75 }; 76 76 77 77 extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu); 78 - extern void smu_v13_0_6_set_temp_funcs(struct smu_context *smu); 79 78 bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap); 80 79 int smu_v13_0_6_get_static_metrics_table(struct smu_context *smu); 81 80 int smu_v13_0_6_get_metrics_table(struct smu_context *smu, void *metrics_table, ··· 93 92 void *smu_metrics); 94 93 int smu_v13_0_12_tables_init(struct smu_context *smu); 95 94 void smu_v13_0_12_tables_fini(struct smu_context *smu); 95 + int smu_v13_0_12_get_npm_data(struct smu_context *smu, 96 + enum amd_pp_sensors sensor, 97 + uint32_t *value); 96 98 extern const struct cmn2asic_mapping smu_v13_0_12_feature_mask_map[]; 97 99 extern const struct cmn2asic_msg_mapping smu_v13_0_12_message_map[]; 98 100 extern const struct smu_temp_funcs smu_v13_0_12_temp_funcs;
+2 -17
drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
··· 2087 2087 control->quirks = &smu_v14_0_2_i2c_control_quirks; 2088 2088 i2c_set_adapdata(control, smu_i2c); 2089 2089 2090 - res = i2c_add_adapter(control); 2090 + res = devm_i2c_add_adapter(adev->dev, control); 2091 2091 if (res) { 2092 2092 DRM_ERROR("Failed to register hw i2c, err: %d\n", res); 2093 - goto Out_err; 2093 + return res; 2094 2094 } 2095 2095 } 2096 2096 ··· 2100 2100 adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; 2101 2101 2102 2102 return 0; 2103 - Out_err: 2104 - for ( ; i >= 0; i--) { 2105 - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 2106 - struct i2c_adapter *control = &smu_i2c->adapter; 2107 - 2108 - i2c_del_adapter(control); 2109 - } 2110 - return res; 2111 2103 } 2112 2104 2113 2105 static void smu_v14_0_2_i2c_control_fini(struct smu_context *smu) 2114 2106 { 2115 2107 struct amdgpu_device *adev = smu->adev; 2116 - int i; 2117 2108 2118 - for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { 2119 - struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; 2120 - struct i2c_adapter *control = &smu_i2c->adapter; 2121 - 2122 - i2c_del_adapter(control); 2123 - } 2124 2109 adev->pm.ras_eeprom_i2c_bus = NULL; 2125 2110 adev->pm.fru_eeprom_i2c_bus = NULL; 2126 2111 }
+1 -1
include/drm/ttm/ttm_resource.h
··· 36 36 #include <drm/ttm/ttm_kmap_iter.h> 37 37 38 38 #define TTM_MAX_BO_PRIORITY 4U 39 - #define TTM_NUM_MEM_TYPES 8 39 + #define TTM_NUM_MEM_TYPES 9 40 40 41 41 struct dmem_cgroup_device; 42 42 struct ttm_device;
+11 -6
include/uapi/drm/amdgpu_drm.h
··· 105 105 * 106 106 * %AMDGPU_GEM_DOMAIN_DOORBELL Doorbell. It is an MMIO region for 107 107 * signalling user mode queues. 108 + * 109 + * %AMDGPU_GEM_DOMAIN_MMIO_REMAP MMIO remap page (special mapping for HDP flushing). 108 110 */ 109 111 #define AMDGPU_GEM_DOMAIN_CPU 0x1 110 112 #define AMDGPU_GEM_DOMAIN_GTT 0x2 ··· 115 113 #define AMDGPU_GEM_DOMAIN_GWS 0x10 116 114 #define AMDGPU_GEM_DOMAIN_OA 0x20 117 115 #define AMDGPU_GEM_DOMAIN_DOORBELL 0x40 116 + #define AMDGPU_GEM_DOMAIN_MMIO_REMAP 0x80 118 117 #define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \ 119 118 AMDGPU_GEM_DOMAIN_GTT | \ 120 119 AMDGPU_GEM_DOMAIN_VRAM | \ 121 120 AMDGPU_GEM_DOMAIN_GDS | \ 122 121 AMDGPU_GEM_DOMAIN_GWS | \ 123 - AMDGPU_GEM_DOMAIN_OA | \ 124 - AMDGPU_GEM_DOMAIN_DOORBELL) 122 + AMDGPU_GEM_DOMAIN_OA | \ 123 + AMDGPU_GEM_DOMAIN_DOORBELL | \ 124 + AMDGPU_GEM_DOMAIN_MMIO_REMAP) 125 125 126 126 /* Flag that CPU access will be required for the case of VRAM domain */ 127 127 #define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0) ··· 1088 1084 * Query h/w info: Flag that this is integrated (a.h.a. fusion) GPU 1089 1085 * 1090 1086 */ 1091 - #define AMDGPU_IDS_FLAGS_FUSION 0x1 1092 - #define AMDGPU_IDS_FLAGS_PREEMPTION 0x2 1093 - #define AMDGPU_IDS_FLAGS_TMZ 0x4 1094 - #define AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD 0x8 1087 + #define AMDGPU_IDS_FLAGS_FUSION 0x01 1088 + #define AMDGPU_IDS_FLAGS_PREEMPTION 0x02 1089 + #define AMDGPU_IDS_FLAGS_TMZ 0x04 1090 + #define AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD 0x08 1091 + #define AMDGPU_IDS_FLAGS_GANG_SUBMIT 0x10 1095 1092 1096 1093 /* 1097 1094 * Query h/w info: Flag identifying VF/PF/PT mode