Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-fixes-for-v4.15-rc2' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes and cleanups from Dave Airlie:
"The main thing are a bunch of fixes for the new amd display code, a
bunch of smatch fixes.

core:
- Atomic helper regression fix.
- Deferred fbdev fallout regression fix.

amdgpu:
- New display code (dc) dpms, suspend/resume and smatch fixes, along
with some others
- Some regression fixes for amdkfd/radeon.
- Fix a ttm regression for swiotlb disabled

bridge:
- A bunch of fixes for the tc358767 bridge

mali-dp + hdlcd:
- some fixes and internal API catchups.

imx-drm:
-regression fix in atomic code.

omapdrm:
- platform detection regression fixes"

* tag 'drm-fixes-for-v4.15-rc2' of git://people.freedesktop.org/~airlied/linux: (76 commits)
drm/imx: always call wait_for_flip_done in commit_tail
omapdrm: hdmi4_cec: signedness bug in hdmi4_cec_init()
drm: omapdrm: Fix DPI on platforms using the DSI VDDS
omapdrm: hdmi4: Correct the SoC revision matching
drm/omap: displays: panel-dpi: add backlight dependency
drm/omap: Fix error handling path in 'omap_dmm_probe()'
drm/i915: Disable THP until we have a GPU read BW W/A
drm/bridge: tc358767: fix 1-lane behavior
drm/bridge: tc358767: fix AUXDATAn registers access
drm/bridge: tc358767: fix timing calculations
drm/bridge: tc358767: fix DP0_MISC register set
drm/bridge: tc358767: filter out too high modes
drm/bridge: tc358767: do no fail on hi-res displays
drm/bridge: Fix lvds-encoder since the panel_bridge rework.
drm/bridge: synopsys/dw-hdmi: Enable cec clock
drm/bridge: adv7511/33: Fix adv7511_cec_init() failure handling
drm/radeon: remove init of CIK VMIDs 8-16 for amdkfd
drm/ttm: fix populate_and_map() functions once more
drm/fb_helper: Disable all crtc's when initial setup fails.
drm/atomic: make drm_atomic_helper_wait_for_vblanks more agressive
...

+819 -488
+7 -11
drivers/gpu/drm/amd/amdgpu/amdgpu.h
··· 717 717 struct amdgpu_queue_mgr *mgr); 718 718 int amdgpu_queue_mgr_map(struct amdgpu_device *adev, 719 719 struct amdgpu_queue_mgr *mgr, 720 - int hw_ip, int instance, int ring, 720 + u32 hw_ip, u32 instance, u32 ring, 721 721 struct amdgpu_ring **out_ring); 722 722 723 723 /* ··· 1572 1572 /* sdma */ 1573 1573 struct amdgpu_sdma sdma; 1574 1574 1575 - union { 1576 - struct { 1577 - /* uvd */ 1578 - struct amdgpu_uvd uvd; 1575 + /* uvd */ 1576 + struct amdgpu_uvd uvd; 1579 1577 1580 - /* vce */ 1581 - struct amdgpu_vce vce; 1582 - }; 1578 + /* vce */ 1579 + struct amdgpu_vce vce; 1583 1580 1584 - /* vcn */ 1585 - struct amdgpu_vcn vcn; 1586 - }; 1581 + /* vcn */ 1582 + struct amdgpu_vcn vcn; 1587 1583 1588 1584 /* firmwares */ 1589 1585 struct amdgpu_firmware firmware;
+39 -18
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
··· 379 379 { 380 380 struct amdgpu_device *adev = get_amdgpu_device(kgd); 381 381 struct cik_sdma_rlc_registers *m; 382 + unsigned long end_jiffies; 382 383 uint32_t sdma_base_addr; 384 + uint32_t data; 383 385 384 386 m = get_sdma_mqd(mqd); 385 387 sdma_base_addr = get_sdma_base_addr(m); 386 388 387 - WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR, 388 - m->sdma_rlc_virtual_addr); 389 + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, 390 + m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); 389 391 390 - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 391 - m->sdma_rlc_rb_base); 392 - 393 - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, 394 - m->sdma_rlc_rb_base_hi); 395 - 396 - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, 397 - m->sdma_rlc_rb_rptr_addr_lo); 398 - 399 - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, 400 - m->sdma_rlc_rb_rptr_addr_hi); 392 + end_jiffies = msecs_to_jiffies(2000) + jiffies; 393 + while (true) { 394 + data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); 395 + if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) 396 + break; 397 + if (time_after(jiffies, end_jiffies)) 398 + return -ETIME; 399 + usleep_range(500, 1000); 400 + } 401 + if (m->sdma_engine_id) { 402 + data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL); 403 + data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL, 404 + RESUME_CTX, 0); 405 + WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data); 406 + } else { 407 + data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL); 408 + data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, 409 + RESUME_CTX, 0); 410 + WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data); 411 + } 401 412 402 413 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 403 - m->sdma_rlc_doorbell); 404 - 414 + m->sdma_rlc_doorbell); 415 + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0); 416 + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0); 417 + WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR, 418 + m->sdma_rlc_virtual_addr); 419 + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base); 420 + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, 421 + m->sdma_rlc_rb_base_hi); 422 + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, 423 + m->sdma_rlc_rb_rptr_addr_lo); 424 + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, 425 + m->sdma_rlc_rb_rptr_addr_hi); 405 426 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, 406 427 m->sdma_rlc_rb_cntl); 407 428 ··· 595 574 } 596 575 597 576 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); 598 - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0); 599 - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0); 600 - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0); 577 + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, 578 + RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | 579 + SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); 601 580 602 581 return 0; 603 582 }
+4
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
··· 409 409 if (candidate->robj == validated) 410 410 break; 411 411 412 + /* We can't move pinned BOs here */ 413 + if (bo->pin_count) 414 + continue; 415 + 412 416 other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); 413 417 414 418 /* Check if this BO is in one of the domains we need space for */
-3
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 1837 1837 adev->ip_blocks[i].status.hw = false; 1838 1838 } 1839 1839 1840 - if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) 1841 - amdgpu_ucode_fini_bo(adev); 1842 - 1843 1840 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1844 1841 if (!adev->ip_blocks[i].status.sw) 1845 1842 continue;
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 536 536 {0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, 537 537 {0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10}, 538 538 /* Raven */ 539 - {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU|AMD_EXP_HW_SUPPORT}, 539 + {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU}, 540 540 541 541 {0, 0, 0} 542 542 };
+3
drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
··· 164 164 ret = adev->powerplay.ip_funcs->hw_fini( 165 165 adev->powerplay.pp_handle); 166 166 167 + if (adev->firmware.load_type == AMDGPU_FW_LOAD_SMU) 168 + amdgpu_ucode_fini_bo(adev); 169 + 167 170 return ret; 168 171 } 169 172
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
··· 442 442 if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) 443 443 return 0; 444 444 445 + amdgpu_ucode_fini_bo(adev); 446 + 445 447 psp_ring_destroy(psp, PSP_RING_TYPE__KM); 446 448 447 449 amdgpu_bo_free_kernel(&psp->tmr_bo, &psp->tmr_mc_addr, &psp->tmr_buf);
+3 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
··· 63 63 64 64 static int amdgpu_identity_map(struct amdgpu_device *adev, 65 65 struct amdgpu_queue_mapper *mapper, 66 - int ring, 66 + u32 ring, 67 67 struct amdgpu_ring **out_ring) 68 68 { 69 69 switch (mapper->hw_ip) { ··· 121 121 122 122 static int amdgpu_lru_map(struct amdgpu_device *adev, 123 123 struct amdgpu_queue_mapper *mapper, 124 - int user_ring, bool lru_pipe_order, 124 + u32 user_ring, bool lru_pipe_order, 125 125 struct amdgpu_ring **out_ring) 126 126 { 127 127 int r, i, j; ··· 208 208 */ 209 209 int amdgpu_queue_mgr_map(struct amdgpu_device *adev, 210 210 struct amdgpu_queue_mgr *mgr, 211 - int hw_ip, int instance, int ring, 211 + u32 hw_ip, u32 instance, u32 ring, 212 212 struct amdgpu_ring **out_ring) 213 213 { 214 214 int r, ip_num_rings;
+95 -16
drivers/gpu/drm/amd/amdgpu/cik.c
··· 1023 1023 {mmPA_SC_RASTER_CONFIG_1, true}, 1024 1024 }; 1025 1025 1026 - static uint32_t cik_read_indexed_register(struct amdgpu_device *adev, 1027 - u32 se_num, u32 sh_num, 1028 - u32 reg_offset) 1026 + 1027 + static uint32_t cik_get_register_value(struct amdgpu_device *adev, 1028 + bool indexed, u32 se_num, 1029 + u32 sh_num, u32 reg_offset) 1029 1030 { 1030 - uint32_t val; 1031 + if (indexed) { 1032 + uint32_t val; 1033 + unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num; 1034 + unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num; 1031 1035 1032 - mutex_lock(&adev->grbm_idx_mutex); 1033 - if (se_num != 0xffffffff || sh_num != 0xffffffff) 1034 - amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 1036 + switch (reg_offset) { 1037 + case mmCC_RB_BACKEND_DISABLE: 1038 + return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable; 1039 + case mmGC_USER_RB_BACKEND_DISABLE: 1040 + return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable; 1041 + case mmPA_SC_RASTER_CONFIG: 1042 + return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config; 1043 + case mmPA_SC_RASTER_CONFIG_1: 1044 + return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1; 1045 + } 1035 1046 1036 - val = RREG32(reg_offset); 1047 + mutex_lock(&adev->grbm_idx_mutex); 1048 + if (se_num != 0xffffffff || sh_num != 0xffffffff) 1049 + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); 1037 1050 1038 - if (se_num != 0xffffffff || sh_num != 0xffffffff) 1039 - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 1040 - mutex_unlock(&adev->grbm_idx_mutex); 1041 - return val; 1051 + val = RREG32(reg_offset); 1052 + 1053 + if (se_num != 0xffffffff || sh_num != 0xffffffff) 1054 + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 1055 + mutex_unlock(&adev->grbm_idx_mutex); 1056 + return val; 1057 + } else { 1058 + unsigned idx; 1059 + 1060 + switch (reg_offset) { 1061 + case mmGB_ADDR_CONFIG: 1062 + return adev->gfx.config.gb_addr_config; 1063 + case mmMC_ARB_RAMCFG: 1064 + return adev->gfx.config.mc_arb_ramcfg; 1065 + case mmGB_TILE_MODE0: 1066 + case mmGB_TILE_MODE1: 1067 + case mmGB_TILE_MODE2: 1068 + case mmGB_TILE_MODE3: 1069 + case mmGB_TILE_MODE4: 1070 + case mmGB_TILE_MODE5: 1071 + case mmGB_TILE_MODE6: 1072 + case mmGB_TILE_MODE7: 1073 + case mmGB_TILE_MODE8: 1074 + case mmGB_TILE_MODE9: 1075 + case mmGB_TILE_MODE10: 1076 + case mmGB_TILE_MODE11: 1077 + case mmGB_TILE_MODE12: 1078 + case mmGB_TILE_MODE13: 1079 + case mmGB_TILE_MODE14: 1080 + case mmGB_TILE_MODE15: 1081 + case mmGB_TILE_MODE16: 1082 + case mmGB_TILE_MODE17: 1083 + case mmGB_TILE_MODE18: 1084 + case mmGB_TILE_MODE19: 1085 + case mmGB_TILE_MODE20: 1086 + case mmGB_TILE_MODE21: 1087 + case mmGB_TILE_MODE22: 1088 + case mmGB_TILE_MODE23: 1089 + case mmGB_TILE_MODE24: 1090 + case mmGB_TILE_MODE25: 1091 + case mmGB_TILE_MODE26: 1092 + case mmGB_TILE_MODE27: 1093 + case mmGB_TILE_MODE28: 1094 + case mmGB_TILE_MODE29: 1095 + case mmGB_TILE_MODE30: 1096 + case mmGB_TILE_MODE31: 1097 + idx = (reg_offset - mmGB_TILE_MODE0); 1098 + return adev->gfx.config.tile_mode_array[idx]; 1099 + case mmGB_MACROTILE_MODE0: 1100 + case mmGB_MACROTILE_MODE1: 1101 + case mmGB_MACROTILE_MODE2: 1102 + case mmGB_MACROTILE_MODE3: 1103 + case mmGB_MACROTILE_MODE4: 1104 + case mmGB_MACROTILE_MODE5: 1105 + case mmGB_MACROTILE_MODE6: 1106 + case mmGB_MACROTILE_MODE7: 1107 + case mmGB_MACROTILE_MODE8: 1108 + case mmGB_MACROTILE_MODE9: 1109 + case mmGB_MACROTILE_MODE10: 1110 + case mmGB_MACROTILE_MODE11: 1111 + case mmGB_MACROTILE_MODE12: 1112 + case mmGB_MACROTILE_MODE13: 1113 + case mmGB_MACROTILE_MODE14: 1114 + case mmGB_MACROTILE_MODE15: 1115 + idx = (reg_offset - mmGB_MACROTILE_MODE0); 1116 + return adev->gfx.config.macrotile_mode_array[idx]; 1117 + default: 1118 + return RREG32(reg_offset); 1119 + } 1120 + } 1042 1121 } 1043 1122 1044 1123 static int cik_read_register(struct amdgpu_device *adev, u32 se_num, ··· 1127 1048 1128 1049 *value = 0; 1129 1050 for (i = 0; i < ARRAY_SIZE(cik_allowed_read_registers); i++) { 1051 + bool indexed = cik_allowed_read_registers[i].grbm_indexed; 1052 + 1130 1053 if (reg_offset != cik_allowed_read_registers[i].reg_offset) 1131 1054 continue; 1132 1055 1133 - *value = cik_allowed_read_registers[i].grbm_indexed ? 1134 - cik_read_indexed_register(adev, se_num, 1135 - sh_num, reg_offset) : 1136 - RREG32(reg_offset); 1056 + *value = cik_get_register_value(adev, indexed, se_num, sh_num, 1057 + reg_offset); 1137 1058 return 0; 1138 1059 } 1139 1060 return -EINVAL;
+16
drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
··· 1819 1819 adev->gfx.config.backend_enable_mask, 1820 1820 num_rb_pipes); 1821 1821 } 1822 + 1823 + /* cache the values for userspace */ 1824 + for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { 1825 + for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { 1826 + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); 1827 + adev->gfx.config.rb_config[i][j].rb_backend_disable = 1828 + RREG32(mmCC_RB_BACKEND_DISABLE); 1829 + adev->gfx.config.rb_config[i][j].user_rb_backend_disable = 1830 + RREG32(mmGC_USER_RB_BACKEND_DISABLE); 1831 + adev->gfx.config.rb_config[i][j].raster_config = 1832 + RREG32(mmPA_SC_RASTER_CONFIG); 1833 + adev->gfx.config.rb_config[i][j].raster_config_1 = 1834 + RREG32(mmPA_SC_RASTER_CONFIG_1); 1835 + } 1836 + } 1837 + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); 1822 1838 mutex_unlock(&adev->grbm_idx_mutex); 1823 1839 } 1824 1840
+1 -1
drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
··· 1175 1175 1176 1176 static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev) 1177 1177 { 1178 - adev->uvd.irq.num_types = adev->vcn.num_enc_rings + 1; 1178 + adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 1; 1179 1179 adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs; 1180 1180 } 1181 1181
+2 -1
drivers/gpu/drm/amd/amdkfd/kfd_module.c
··· 24 24 #include <linux/sched.h> 25 25 #include <linux/moduleparam.h> 26 26 #include <linux/device.h> 27 + #include <linux/printk.h> 27 28 #include "kfd_priv.h" 28 29 29 30 #define KFD_DRIVER_AUTHOR "AMD Inc. and others" ··· 133 132 kfd_process_destroy_wq(); 134 133 kfd_topology_shutdown(); 135 134 kfd_chardev_exit(); 136 - dev_info(kfd_device, "Removed module\n"); 135 + pr_info("amdkfd: Removed module\n"); 137 136 } 138 137 139 138 module_init(kfd_module_init);
+2 -2
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
··· 202 202 struct cik_sdma_rlc_registers *m; 203 203 204 204 m = get_sdma_mqd(mqd); 205 - m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) << 206 - SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT | 205 + m->sdma_rlc_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1) 206 + << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT | 207 207 q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT | 208 208 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT | 209 209 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
+18
drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
··· 191 191 192 192 switch (type) { 193 193 case KFD_QUEUE_TYPE_SDMA: 194 + if (dev->dqm->queue_count >= 195 + CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) { 196 + pr_err("Over-subscription is not allowed for SDMA.\n"); 197 + retval = -EPERM; 198 + goto err_create_queue; 199 + } 200 + 201 + retval = create_cp_queue(pqm, dev, &q, properties, f, *qid); 202 + if (retval != 0) 203 + goto err_create_queue; 204 + pqn->q = q; 205 + pqn->kq = NULL; 206 + retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, 207 + &q->properties.vmid); 208 + pr_debug("DQM returned %d for create_queue\n", retval); 209 + print_queue(q); 210 + break; 211 + 194 212 case KFD_QUEUE_TYPE_COMPUTE: 195 213 /* check if there is over subscription */ 196 214 if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
+20 -11
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 520 520 521 521 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 522 522 aconnector = to_amdgpu_dm_connector(connector); 523 - if (aconnector->dc_link->type == dc_connection_mst_branch) { 523 + if (aconnector->dc_link->type == dc_connection_mst_branch && 524 + aconnector->mst_mgr.aux) { 524 525 DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n", 525 526 aconnector, aconnector->base.base.id); 526 527 ··· 678 677 679 678 mutex_lock(&aconnector->hpd_lock); 680 679 dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); 680 + 681 + if (aconnector->fake_enable && aconnector->dc_link->local_sink) 682 + aconnector->fake_enable = false; 683 + 681 684 aconnector->dc_sink = NULL; 682 685 amdgpu_dm_update_connector_after_detect(aconnector); 683 686 mutex_unlock(&aconnector->hpd_lock); ··· 716 711 717 712 ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state); 718 713 719 - drm_atomic_state_put(adev->dm.cached_state); 720 714 adev->dm.cached_state = NULL; 721 715 722 716 amdgpu_dm_irq_resume_late(adev); ··· 2708 2704 .link = aconnector->dc_link, 2709 2705 .sink_signal = SIGNAL_TYPE_VIRTUAL 2710 2706 }; 2711 - struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data; 2707 + struct edid *edid; 2712 2708 2713 2709 if (!aconnector->base.edid_blob_ptr || 2714 2710 !aconnector->base.edid_blob_ptr->data) { ··· 2719 2715 aconnector->base.override_edid = false; 2720 2716 return; 2721 2717 } 2718 + 2719 + edid = (struct edid *) aconnector->base.edid_blob_ptr->data; 2722 2720 2723 2721 aconnector->edid = edid; 2724 2722 ··· 4199 4193 update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, 4200 4194 dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream); 4201 4195 4196 + if (!dm_new_crtc_state->stream) 4197 + continue; 4198 + 4202 4199 status = dc_stream_get_status(dm_new_crtc_state->stream); 4203 4200 WARN_ON(!status); 4204 4201 WARN_ON(!status->plane_count); 4205 - 4206 - if (!dm_new_crtc_state->stream) 4207 - continue; 4208 4202 4209 4203 /*TODO How it works with MPO ?*/ 4210 4204 if (!dc_commit_planes_to_stream( ··· 4259 4253 drm_atomic_helper_commit_hw_done(state); 4260 4254 4261 4255 if (wait_for_vblank) 4262 - drm_atomic_helper_wait_for_vblanks(dev, state); 4256 + drm_atomic_helper_wait_for_flip_done(dev, state); 4263 4257 4264 4258 drm_atomic_helper_cleanup_planes(dev, state); 4265 4259 } ··· 4338 4332 return; 4339 4333 4340 4334 disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); 4341 - acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state); 4335 + if (!disconnected_acrtc) 4336 + return; 4342 4337 4343 - if (!disconnected_acrtc || !acrtc_state->stream) 4338 + acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state); 4339 + if (!acrtc_state->stream) 4344 4340 return; 4345 4341 4346 4342 /* ··· 4463 4455 } 4464 4456 } 4465 4457 4466 - if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && 4458 + if (enable && dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && 4467 4459 dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { 4468 4460 4469 4461 new_crtc_state->mode_changed = false; ··· 4717 4709 } 4718 4710 } else { 4719 4711 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 4720 - if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) 4712 + if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && 4713 + !new_crtc_state->color_mgmt_changed) 4721 4714 continue; 4722 4715 4723 4716 if (!new_crtc_state->enable)
+5
drivers/gpu/drm/amd/display/dc/basics/log_helpers.c
··· 75 75 if (signal == signal_type_info_tbl[i].type) 76 76 break; 77 77 78 + if (i == NUM_ELEMENTS(signal_type_info_tbl)) 79 + goto fail; 80 + 78 81 dm_logger_append(&entry, "[%s][ConnIdx:%d] ", 79 82 signal_type_info_tbl[i].name, 80 83 link->link_index); ··· 99 96 100 97 dm_logger_append(&entry, "^\n"); 101 98 dm_helpers_dc_conn_log(ctx, &entry, event); 99 + 100 + fail: 102 101 dm_logger_close(&entry); 103 102 104 103 va_end(args);
+2 -2
drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
··· 249 249 struct graphics_object_id *dest_object_id) 250 250 { 251 251 uint32_t number; 252 - uint16_t *id; 252 + uint16_t *id = NULL; 253 253 ATOM_OBJECT *object; 254 254 struct bios_parser *bp = BP_FROM_DCB(dcb); 255 255 ··· 260 260 261 261 number = get_dest_obj_list(bp, object, &id); 262 262 263 - if (number <= index) 263 + if (number <= index || !id) 264 264 return BP_RESULT_BADINPUT; 265 265 266 266 *dest_object_id = object_id_from_bios_object_id(id[index]);
+11 -4
drivers/gpu/drm/amd/display/dc/core/dc.c
··· 121 121 goto failed_alloc; 122 122 } 123 123 124 + link->link_index = dc->link_count; 125 + dc->links[dc->link_count] = link; 126 + dc->link_count++; 127 + 124 128 link->ctx = dc->ctx; 125 129 link->dc = dc; 126 130 link->connector_signal = SIGNAL_TYPE_VIRTUAL; ··· 132 128 link->link_id.id = CONNECTOR_ID_VIRTUAL; 133 129 link->link_id.enum_id = ENUM_ID_1; 134 130 link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL); 131 + 132 + if (!link->link_enc) { 133 + BREAK_TO_DEBUGGER(); 134 + goto failed_alloc; 135 + } 136 + 137 + link->link_status.dpcd_caps = &link->dpcd_caps; 135 138 136 139 enc_init.ctx = dc->ctx; 137 140 enc_init.channel = CHANNEL_ID_UNKNOWN; ··· 149 138 enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL; 150 139 enc_init.encoder.enum_id = ENUM_ID_1; 151 140 virtual_link_encoder_construct(link->link_enc, &enc_init); 152 - 153 - link->link_index = dc->link_count; 154 - dc->links[dc->link_count] = link; 155 - dc->link_count++; 156 141 } 157 142 158 143 return true;
+85 -17
drivers/gpu/drm/amd/display/dc/core/dc_link.c
··· 480 480 sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; 481 481 detect_dp_sink_caps(link); 482 482 483 - /* DP active dongles */ 484 - if (is_dp_active_dongle(link)) { 485 - link->type = dc_connection_active_dongle; 486 - if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) { 487 - /* 488 - * active dongle unplug processing for short irq 489 - */ 490 - link_disconnect_sink(link); 491 - return; 492 - } 493 - 494 - if (link->dpcd_caps.dongle_type != 495 - DISPLAY_DONGLE_DP_HDMI_CONVERTER) { 496 - *converter_disable_audio = true; 497 - } 498 - } 499 483 if (is_mst_supported(link)) { 500 484 sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST; 501 485 link->type = dc_connection_mst_branch; ··· 518 534 link->type = dc_connection_single; 519 535 sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; 520 536 } 537 + } 538 + 539 + if (link->type != dc_connection_mst_branch && 540 + is_dp_active_dongle(link)) { 541 + /* DP active dongles */ 542 + link->type = dc_connection_active_dongle; 543 + if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) { 544 + /* 545 + * active dongle unplug processing for short irq 546 + */ 547 + link_disconnect_sink(link); 548 + return; 549 + } 550 + 551 + if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER) 552 + *converter_disable_audio = true; 521 553 } 522 554 } else { 523 555 /* DP passive dongles */ ··· 1801 1801 link->link_enc->funcs->disable_output(link->link_enc, signal, link); 1802 1802 } 1803 1803 1804 + bool dp_active_dongle_validate_timing( 1805 + const struct dc_crtc_timing *timing, 1806 + const struct dc_dongle_caps *dongle_caps) 1807 + { 1808 + unsigned int required_pix_clk = timing->pix_clk_khz; 1809 + 1810 + if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER || 1811 + dongle_caps->extendedCapValid == false) 1812 + return true; 1813 + 1814 + /* Check Pixel Encoding */ 1815 + switch (timing->pixel_encoding) { 1816 + case PIXEL_ENCODING_RGB: 1817 + case PIXEL_ENCODING_YCBCR444: 1818 + break; 1819 + case PIXEL_ENCODING_YCBCR422: 1820 + if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through) 1821 + return false; 1822 + break; 1823 + case PIXEL_ENCODING_YCBCR420: 1824 + if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through) 1825 + return false; 1826 + break; 1827 + default: 1828 + /* Invalid Pixel Encoding*/ 1829 + return false; 1830 + } 1831 + 1832 + 1833 + /* Check Color Depth and Pixel Clock */ 1834 + if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) 1835 + required_pix_clk /= 2; 1836 + 1837 + switch (timing->display_color_depth) { 1838 + case COLOR_DEPTH_666: 1839 + case COLOR_DEPTH_888: 1840 + /*888 and 666 should always be supported*/ 1841 + break; 1842 + case COLOR_DEPTH_101010: 1843 + if (dongle_caps->dp_hdmi_max_bpc < 10) 1844 + return false; 1845 + required_pix_clk = required_pix_clk * 10 / 8; 1846 + break; 1847 + case COLOR_DEPTH_121212: 1848 + if (dongle_caps->dp_hdmi_max_bpc < 12) 1849 + return false; 1850 + required_pix_clk = required_pix_clk * 12 / 8; 1851 + break; 1852 + 1853 + case COLOR_DEPTH_141414: 1854 + case COLOR_DEPTH_161616: 1855 + default: 1856 + /* These color depths are currently not supported */ 1857 + return false; 1858 + } 1859 + 1860 + if (required_pix_clk > dongle_caps->dp_hdmi_max_pixel_clk) 1861 + return false; 1862 + 1863 + return true; 1864 + } 1865 + 1804 1866 enum dc_status dc_link_validate_mode_timing( 1805 1867 const struct dc_stream_state *stream, 1806 1868 struct dc_link *link, 1807 1869 const struct dc_crtc_timing *timing) 1808 1870 { 1809 1871 uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk; 1872 + struct dc_dongle_caps *dongle_caps = &link->link_status.dpcd_caps->dongle_caps; 1810 1873 1811 1874 /* A hack to avoid failing any modes for EDID override feature on 1812 1875 * topology change such as lower quality cable for DP or different dongle ··· 1877 1814 if (link->remote_sinks[0]) 1878 1815 return DC_OK; 1879 1816 1817 + /* Passive Dongle */ 1880 1818 if (0 != max_pix_clk && timing->pix_clk_khz > max_pix_clk) 1881 - return DC_EXCEED_DONGLE_MAX_CLK; 1819 + return DC_EXCEED_DONGLE_CAP; 1820 + 1821 + /* Active Dongle*/ 1822 + if (!dp_active_dongle_validate_timing(timing, dongle_caps)) 1823 + return DC_EXCEED_DONGLE_CAP; 1882 1824 1883 1825 switch (stream->signal) { 1884 1826 case SIGNAL_TYPE_EDP:
+33 -21
drivers/gpu/drm/amd/display/dc/core/dc_resource.c
··· 516 516 right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split; 517 517 518 518 if (right_view) { 519 - data->viewport.width /= 2; 520 - data->viewport_c.width /= 2; 521 - data->viewport.x += data->viewport.width; 522 - data->viewport_c.x += data->viewport_c.width; 519 + data->viewport.x += data->viewport.width / 2; 520 + data->viewport_c.x += data->viewport_c.width / 2; 523 521 /* Ceil offset pipe */ 524 - data->viewport.width += data->viewport.width % 2; 525 - data->viewport_c.width += data->viewport_c.width % 2; 522 + data->viewport.width = (data->viewport.width + 1) / 2; 523 + data->viewport_c.width = (data->viewport_c.width + 1) / 2; 526 524 } else { 527 525 data->viewport.width /= 2; 528 526 data->viewport_c.width /= 2; ··· 578 580 if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->plane_state == 579 581 pipe_ctx->plane_state) { 580 582 if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) { 581 - pipe_ctx->plane_res.scl_data.recout.height /= 2; 582 - pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height; 583 + pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height / 2; 583 584 /* Floor primary pipe, ceil 2ndary pipe */ 584 - pipe_ctx->plane_res.scl_data.recout.height += pipe_ctx->plane_res.scl_data.recout.height % 2; 585 + pipe_ctx->plane_res.scl_data.recout.height = (pipe_ctx->plane_res.scl_data.recout.height + 1) / 2; 585 586 } else { 586 - pipe_ctx->plane_res.scl_data.recout.width /= 2; 587 - pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width; 588 - pipe_ctx->plane_res.scl_data.recout.width += pipe_ctx->plane_res.scl_data.recout.width % 2; 587 + pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width / 2; 588 + pipe_ctx->plane_res.scl_data.recout.width = (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2; 589 589 } 590 590 } else if (pipe_ctx->bottom_pipe && 591 591 pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state) { ··· 852 856 pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right; 853 857 pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom; 854 858 859 + 855 860 /* Taps calculations */ 856 861 if (pipe_ctx->plane_res.xfm != NULL) 857 862 res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps( ··· 861 864 if (pipe_ctx->plane_res.dpp != NULL) 862 865 res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps( 863 866 pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality); 864 - 865 867 if (!res) { 866 868 /* Try 24 bpp linebuffer */ 867 869 pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_24BPP; 868 870 869 - res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps( 870 - pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality); 871 + if (pipe_ctx->plane_res.xfm != NULL) 872 + res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps( 873 + pipe_ctx->plane_res.xfm, 874 + &pipe_ctx->plane_res.scl_data, 875 + &plane_state->scaling_quality); 871 876 872 - res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps( 873 - pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality); 877 + if (pipe_ctx->plane_res.dpp != NULL) 878 + res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps( 879 + pipe_ctx->plane_res.dpp, 880 + &pipe_ctx->plane_res.scl_data, 881 + &plane_state->scaling_quality); 874 882 } 875 883 876 884 if (res) ··· 993 991 994 992 head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream); 995 993 996 - if (!head_pipe) 994 + if (!head_pipe) { 997 995 ASSERT(0); 996 + return NULL; 997 + } 998 998 999 999 if (!head_pipe->plane_state) 1000 1000 return head_pipe; ··· 1451 1447 1452 1448 static struct audio *find_first_free_audio( 1453 1449 struct resource_context *res_ctx, 1454 - const struct resource_pool *pool) 1450 + const struct resource_pool *pool, 1451 + enum engine_id id) 1455 1452 { 1456 1453 int i; 1457 1454 for (i = 0; i < pool->audio_count; i++) { 1458 1455 if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) { 1456 + /*we have enough audio endpoint, find the matching inst*/ 1457 + if (id != i) 1458 + continue; 1459 + 1459 1460 return pool->audios[i]; 1460 1461 } 1461 1462 } ··· 1709 1700 dc_is_audio_capable_signal(pipe_ctx->stream->signal) && 1710 1701 stream->audio_info.mode_count) { 1711 1702 pipe_ctx->stream_res.audio = find_first_free_audio( 1712 - &context->res_ctx, pool); 1703 + &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id); 1713 1704 1714 1705 /* 1715 1706 * Audio assigned in order first come first get. ··· 1774 1765 enum dc_status result = DC_ERROR_UNEXPECTED; 1775 1766 int i, j; 1776 1767 1768 + if (!new_ctx) 1769 + return DC_ERROR_UNEXPECTED; 1770 + 1777 1771 if (dc->res_pool->funcs->validate_global) { 1778 1772 result = dc->res_pool->funcs->validate_global(dc, new_ctx); 1779 1773 if (result != DC_OK) 1780 1774 return result; 1781 1775 } 1782 1776 1783 - for (i = 0; new_ctx && i < new_ctx->stream_count; i++) { 1777 + for (i = 0; i < new_ctx->stream_count; i++) { 1784 1778 struct dc_stream_state *stream = new_ctx->streams[i]; 1785 1779 1786 1780 for (j = 0; j < dc->res_pool->pipe_count; j++) {
+4 -5
drivers/gpu/drm/amd/display/dc/core/dc_stream.c
··· 263 263 struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp; 264 264 struct mem_input *mi = pipe_ctx->plane_res.mi; 265 265 struct hubp *hubp = pipe_ctx->plane_res.hubp; 266 - struct transform *xfm = pipe_ctx->plane_res.xfm; 267 266 struct dpp *dpp = pipe_ctx->plane_res.dpp; 268 267 struct dc_cursor_position pos_cpy = *position; 269 268 struct dc_cursor_mi_param param = { ··· 293 294 if (mi != NULL && mi->funcs->set_cursor_position != NULL) 294 295 mi->funcs->set_cursor_position(mi, &pos_cpy, &param); 295 296 296 - if (hubp != NULL && hubp->funcs->set_cursor_position != NULL) 297 - hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param); 297 + if (!hubp) 298 + continue; 298 299 299 - if (xfm != NULL && xfm->funcs->set_cursor_position != NULL) 300 - xfm->funcs->set_cursor_position(xfm, &pos_cpy, &param, hubp->curs_attr.width); 300 + if (hubp->funcs->set_cursor_position != NULL) 301 + hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param); 301 302 302 303 if (dpp != NULL && dpp->funcs->set_cursor_position != NULL) 303 304 dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width);
+5 -5
drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
··· 352 352 uint32_t value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL); 353 353 354 354 set_reg_field_value(value, 1, 355 - AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, 356 - CLOCK_GATING_DISABLE); 357 - set_reg_field_value(value, 1, 358 - AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, 359 - AUDIO_ENABLED); 355 + AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, 356 + CLOCK_GATING_DISABLE); 357 + set_reg_field_value(value, 1, 358 + AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, 359 + AUDIO_ENABLED); 360 360 361 361 AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value); 362 362 value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
+3
drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
··· 87 87 */ 88 88 uint32_t max_retries = 50; 89 89 90 + /*we need turn on clock before programming AFMT block*/ 91 + REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1); 92 + 90 93 if (REG(AFMT_VBI_PACKET_CONTROL1)) { 91 94 if (packet_index >= 8) 92 95 ASSERT(0);
+18 -14
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
··· 991 991 struct dc_link *link = stream->sink->link; 992 992 struct dc *dc = pipe_ctx->stream->ctx->dc; 993 993 994 + if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) 995 + pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets( 996 + pipe_ctx->stream_res.stream_enc); 997 + 998 + if (dc_is_dp_signal(pipe_ctx->stream->signal)) 999 + pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets( 1000 + pipe_ctx->stream_res.stream_enc); 1001 + 1002 + pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( 1003 + pipe_ctx->stream_res.stream_enc, true); 994 1004 if (pipe_ctx->stream_res.audio) { 995 1005 pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio); 996 1006 ··· 1024 1014 * stream->stream_engine_id); 1025 1015 */ 1026 1016 } 1027 - 1028 - if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) 1029 - pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets( 1030 - pipe_ctx->stream_res.stream_enc); 1031 - 1032 - if (dc_is_dp_signal(pipe_ctx->stream->signal)) 1033 - pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets( 1034 - pipe_ctx->stream_res.stream_enc); 1035 - 1036 - pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( 1037 - pipe_ctx->stream_res.stream_enc, true); 1038 - 1039 1017 1040 1018 /* blank at encoder level */ 1041 1019 if (dc_is_dp_signal(pipe_ctx->stream->signal)) { ··· 1772 1774 if (pipe_ctx->stream->sink->link->psr_enabled) 1773 1775 return DC_ERROR_UNEXPECTED; 1774 1776 1777 + /* Nothing to compress */ 1778 + if (!pipe_ctx->plane_state) 1779 + return DC_ERROR_UNEXPECTED; 1780 + 1775 1781 /* Only for non-linear tiling */ 1776 1782 if (pipe_ctx->plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL) 1777 1783 return DC_ERROR_UNEXPECTED; ··· 1870 1868 pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) { 1871 1869 struct clock_source *old_clk = pipe_ctx_old->clock_source; 1872 1870 1873 - /* disable already, no need to disable again */ 1874 - if (pipe_ctx->stream && !pipe_ctx->stream->dpms_off) 1871 + /* Disable if new stream is null. O/w, if stream is 1872 + * disabled already, no need to disable again. 1873 + */ 1874 + if (!pipe_ctx->stream || !pipe_ctx->stream->dpms_off) 1875 1875 core_link_disable_stream(pipe_ctx_old, FREE_ACQUIRED_RESOURCE); 1876 1876 1877 1877 pipe_ctx_old->stream_res.tg->funcs->set_blank(pipe_ctx_old->stream_res.tg, true);
+7 -5
drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
··· 1037 1037 struct dce110_opp *dce110_oppv = kzalloc(sizeof(*dce110_oppv), 1038 1038 GFP_KERNEL); 1039 1039 1040 - if ((dce110_tgv == NULL) || 1041 - (dce110_xfmv == NULL) || 1042 - (dce110_miv == NULL) || 1043 - (dce110_oppv == NULL)) 1044 - return false; 1040 + if (!dce110_tgv || !dce110_xfmv || !dce110_miv || !dce110_oppv) { 1041 + kfree(dce110_tgv); 1042 + kfree(dce110_xfmv); 1043 + kfree(dce110_miv); 1044 + kfree(dce110_oppv); 1045 + return false; 1046 + } 1045 1047 1046 1048 dce110_opp_v_construct(dce110_oppv, ctx); 1047 1049
+4 -4
drivers/gpu/drm/amd/display/dc/dce110/dce110_timing_generator.c
··· 1112 1112 enum signal_type signal) 1113 1113 { 1114 1114 uint32_t h_blank; 1115 - uint32_t h_back_porch; 1116 - uint32_t hsync_offset = timing->h_border_right + 1117 - timing->h_front_porch; 1118 - uint32_t h_sync_start = timing->h_addressable + hsync_offset; 1115 + uint32_t h_back_porch, hsync_offset, h_sync_start; 1119 1116 1120 1117 struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg); 1121 1118 ··· 1120 1123 1121 1124 if (!timing) 1122 1125 return false; 1126 + 1127 + hsync_offset = timing->h_border_right + timing->h_front_porch; 1128 + h_sync_start = timing->h_addressable + hsync_offset; 1123 1129 1124 1130 /* Currently we don't support 3D, so block all 3D timings */ 1125 1131 if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE)
+4 -2
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
··· 912 912 struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream); 913 913 struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool); 914 914 915 - if (!head_pipe) 915 + if (!head_pipe) { 916 916 ASSERT(0); 917 + return NULL; 918 + } 917 919 918 920 if (!idle_pipe) 919 - return false; 921 + return NULL; 920 922 921 923 idle_pipe->stream = head_pipe->stream; 922 924 idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
-3
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_timing_generator.c
··· 496 496 timing->timing_3d_format != TIMING_3D_FORMAT_INBAND_FA) 497 497 return false; 498 498 499 - if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE && 500 - tg->ctx->dc->debug.disable_stereo_support) 501 - return false; 502 499 /* Temporarily blocking interlacing mode until it's supported */ 503 500 if (timing->flags.INTERLACE == 1) 504 501 return false;
+1 -1
drivers/gpu/drm/amd/display/dc/inc/core_status.h
··· 38 38 DC_FAIL_DETACH_SURFACES = 8, 39 39 DC_FAIL_SURFACE_VALIDATE = 9, 40 40 DC_NO_DP_LINK_BANDWIDTH = 10, 41 - DC_EXCEED_DONGLE_MAX_CLK = 11, 41 + DC_EXCEED_DONGLE_CAP = 11, 42 42 DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED = 12, 43 43 DC_FAIL_BANDWIDTH_VALIDATE = 13, /* BW and Watermark validation */ 44 44 DC_FAIL_SCALING = 14,
-7
drivers/gpu/drm/amd/display/dc/inc/hw/transform.h
··· 259 259 struct transform *xfm_base, 260 260 const struct dc_cursor_attributes *attr); 261 261 262 - void (*set_cursor_position)( 263 - struct transform *xfm_base, 264 - const struct dc_cursor_position *pos, 265 - const struct dc_cursor_mi_param *param, 266 - uint32_t width 267 - ); 268 - 269 262 }; 270 263 271 264 const uint16_t *get_filter_2tap_16p(void);
+1 -2
drivers/gpu/drm/arm/hdlcd_crtc.c
··· 317 317 formats, ARRAY_SIZE(formats), 318 318 NULL, 319 319 DRM_PLANE_TYPE_PRIMARY, NULL); 320 - if (ret) { 320 + if (ret) 321 321 return ERR_PTR(ret); 322 - } 323 322 324 323 drm_plane_helper_add(plane, &hdlcd_plane_helper_funcs); 325 324 hdlcd->plane = plane;
+6 -3
drivers/gpu/drm/arm/hdlcd_drv.c
··· 13 13 #include <linux/spinlock.h> 14 14 #include <linux/clk.h> 15 15 #include <linux/component.h> 16 + #include <linux/console.h> 16 17 #include <linux/list.h> 17 18 #include <linux/of_graph.h> 18 19 #include <linux/of_reserved_mem.h> ··· 355 354 err_free: 356 355 drm_mode_config_cleanup(drm); 357 356 dev_set_drvdata(dev, NULL); 358 - drm_dev_unref(drm); 357 + drm_dev_put(drm); 359 358 360 359 return ret; 361 360 } ··· 380 379 pm_runtime_disable(drm->dev); 381 380 of_reserved_mem_device_release(drm->dev); 382 381 drm_mode_config_cleanup(drm); 383 - drm_dev_unref(drm); 382 + drm_dev_put(drm); 384 383 drm->dev_private = NULL; 385 384 dev_set_drvdata(dev, NULL); 386 385 } ··· 433 432 return 0; 434 433 435 434 drm_kms_helper_poll_disable(drm); 435 + drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 1); 436 436 437 437 hdlcd->state = drm_atomic_helper_suspend(drm); 438 438 if (IS_ERR(hdlcd->state)) { 439 + drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0); 439 440 drm_kms_helper_poll_enable(drm); 440 441 return PTR_ERR(hdlcd->state); 441 442 } ··· 454 451 return 0; 455 452 456 453 drm_atomic_helper_resume(drm, hdlcd->state); 454 + drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0); 457 455 drm_kms_helper_poll_enable(drm); 458 - pm_runtime_set_active(dev); 459 456 460 457 return 0; 461 458 }
+10 -6
drivers/gpu/drm/arm/malidp_crtc.c
··· 65 65 /* We rely on firmware to set mclk to a sensible level. */ 66 66 clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000); 67 67 68 - hwdev->modeset(hwdev, &vm); 69 - hwdev->leave_config_mode(hwdev); 68 + hwdev->hw->modeset(hwdev, &vm); 69 + hwdev->hw->leave_config_mode(hwdev); 70 70 drm_crtc_vblank_on(crtc); 71 71 } 72 72 ··· 77 77 struct malidp_hw_device *hwdev = malidp->dev; 78 78 int err; 79 79 80 + /* always disable planes on the CRTC that is being turned off */ 81 + drm_atomic_helper_disable_planes_on_crtc(old_state, false); 82 + 80 83 drm_crtc_vblank_off(crtc); 81 - hwdev->enter_config_mode(hwdev); 84 + hwdev->hw->enter_config_mode(hwdev); 85 + 82 86 clk_disable_unprepare(hwdev->pxlclk); 83 87 84 88 err = pm_runtime_put(crtc->dev->dev); ··· 323 319 324 320 mclk_calc: 325 321 drm_display_mode_to_videomode(&state->adjusted_mode, &vm); 326 - ret = hwdev->se_calc_mclk(hwdev, s, &vm); 322 + ret = hwdev->hw->se_calc_mclk(hwdev, s, &vm); 327 323 if (ret < 0) 328 324 return -EINVAL; 329 325 return 0; ··· 479 475 struct malidp_hw_device *hwdev = malidp->dev; 480 476 481 477 malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK, 482 - hwdev->map.de_irq_map.vsync_irq); 478 + hwdev->hw->map.de_irq_map.vsync_irq); 483 479 return 0; 484 480 } 485 481 ··· 489 485 struct malidp_hw_device *hwdev = malidp->dev; 490 486 491 487 malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, 492 - hwdev->map.de_irq_map.vsync_irq); 488 + hwdev->hw->map.de_irq_map.vsync_irq); 493 489 } 494 490 495 491 static const struct drm_crtc_funcs malidp_crtc_funcs = {
+15 -19
drivers/gpu/drm/arm/malidp_drv.c
··· 47 47 * directly. 48 48 */ 49 49 malidp_hw_write(hwdev, gamma_write_mask, 50 - hwdev->map.coeffs_base + MALIDP_COEF_TABLE_ADDR); 50 + hwdev->hw->map.coeffs_base + MALIDP_COEF_TABLE_ADDR); 51 51 for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i) 52 52 malidp_hw_write(hwdev, data[i], 53 - hwdev->map.coeffs_base + 53 + hwdev->hw->map.coeffs_base + 54 54 MALIDP_COEF_TABLE_DATA); 55 55 } 56 56 ··· 103 103 for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; ++i) 104 104 malidp_hw_write(hwdev, 105 105 mc->coloradj_coeffs[i], 106 - hwdev->map.coeffs_base + 106 + hwdev->hw->map.coeffs_base + 107 107 MALIDP_COLOR_ADJ_COEF + 4 * i); 108 108 109 109 malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_CADJ, ··· 120 120 struct malidp_hw_device *hwdev = malidp->dev; 121 121 struct malidp_se_config *s = &cs->scaler_config; 122 122 struct malidp_se_config *old_s = &old_cs->scaler_config; 123 - u32 se_control = hwdev->map.se_base + 124 - ((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ? 123 + u32 se_control = hwdev->hw->map.se_base + 124 + ((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ? 125 125 0x10 : 0xC); 126 126 u32 layer_control = se_control + MALIDP_SE_LAYER_CONTROL; 127 127 u32 scr = se_control + MALIDP_SE_SCALING_CONTROL; ··· 135 135 return; 136 136 } 137 137 138 - hwdev->se_set_scaling_coeffs(hwdev, s, old_s); 138 + hwdev->hw->se_set_scaling_coeffs(hwdev, s, old_s); 139 139 val = malidp_hw_read(hwdev, se_control); 140 140 val |= MALIDP_SE_SCALING_EN | MALIDP_SE_ALPHA_EN; 141 141 ··· 170 170 int ret; 171 171 172 172 atomic_set(&malidp->config_valid, 0); 173 - hwdev->set_config_valid(hwdev); 173 + hwdev->hw->set_config_valid(hwdev); 174 174 /* don't wait for config_valid flag if we are in config mode */ 175 - if (hwdev->in_config_mode(hwdev)) 175 + if (hwdev->hw->in_config_mode(hwdev)) 176 176 return 0; 177 177 178 178 ret = wait_event_interruptible_timeout(malidp->wq, ··· 455 455 struct malidp_hw_device *hwdev = malidp->dev; 456 456 457 457 /* we can only suspend if the hardware is in config mode */ 458 - WARN_ON(!hwdev->in_config_mode(hwdev)); 458 + WARN_ON(!hwdev->hw->in_config_mode(hwdev)); 459 459 460 460 hwdev->pm_suspended = true; 461 461 clk_disable_unprepare(hwdev->mclk); ··· 500 500 if (!hwdev) 501 501 return -ENOMEM; 502 502 503 - /* 504 - * copy the associated data from malidp_drm_of_match to avoid 505 - * having to keep a reference to the OF node after binding 506 - */ 507 - memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev)); 503 + hwdev->hw = (struct malidp_hw *)of_device_get_match_data(dev); 508 504 malidp->dev = hwdev; 509 505 510 506 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ··· 564 568 goto query_hw_fail; 565 569 } 566 570 567 - ret = hwdev->query_hw(hwdev); 571 + ret = hwdev->hw->query_hw(hwdev); 568 572 if (ret) { 569 573 DRM_ERROR("Invalid HW configuration\n"); 570 574 goto query_hw_fail; 571 575 } 572 576 573 - version = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_DE_CORE_ID); 577 + version = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_DE_CORE_ID); 574 578 DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16, 575 579 (version >> 12) & 0xf, (version >> 8) & 0xf); 576 580 ··· 585 589 586 590 for (i = 0; i < MAX_OUTPUT_CHANNELS; i++) 587 591 out_depth = (out_depth << 8) | (output_width[i] & 0xf); 588 - malidp_hw_write(hwdev, out_depth, hwdev->map.out_depth_base); 592 + malidp_hw_write(hwdev, out_depth, hwdev->hw->map.out_depth_base); 589 593 590 594 atomic_set(&malidp->config_valid, 0); 591 595 init_waitqueue_head(&malidp->wq); ··· 667 671 malidp_runtime_pm_suspend(dev); 668 672 drm->dev_private = NULL; 669 673 dev_set_drvdata(dev, NULL); 670 - drm_dev_unref(drm); 674 + drm_dev_put(drm); 671 675 alloc_fail: 672 676 of_reserved_mem_device_release(dev); 673 677 ··· 700 704 malidp_runtime_pm_suspend(dev); 701 705 drm->dev_private = NULL; 702 706 dev_set_drvdata(dev, NULL); 703 - drm_dev_unref(drm); 707 + drm_dev_put(drm); 704 708 of_reserved_mem_device_release(dev); 705 709 } 706 710
+25 -21
drivers/gpu/drm/arm/malidp_hw.c
··· 183 183 184 184 malidp_hw_setbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL); 185 185 while (count) { 186 - status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); 186 + status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS); 187 187 if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ) 188 188 break; 189 189 /* ··· 203 203 malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID); 204 204 malidp_hw_clearbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL); 205 205 while (count) { 206 - status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); 206 + status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS); 207 207 if ((status & MALIDP500_DC_CONFIG_REQ) == 0) 208 208 break; 209 209 usleep_range(100, 1000); ··· 216 216 { 217 217 u32 status; 218 218 219 - status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); 219 + status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS); 220 220 if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ) 221 221 return true; 222 222 ··· 407 407 408 408 malidp_hw_setbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL); 409 409 while (count) { 410 - status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); 410 + status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS); 411 411 if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ) 412 412 break; 413 413 /* ··· 427 427 malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID); 428 428 malidp_hw_clearbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL); 429 429 while (count) { 430 - status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); 430 + status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS); 431 431 if ((status & MALIDP550_DC_CONFIG_REQ) == 0) 432 432 break; 433 433 usleep_range(100, 1000); ··· 440 440 { 441 441 u32 status; 442 442 443 - status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); 443 + status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS); 444 444 if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ) 445 445 return true; 446 446 ··· 616 616 return 0; 617 617 } 618 618 619 - const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = { 619 + const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = { 620 620 [MALIDP_500] = { 621 621 .map = { 622 622 .coeffs_base = MALIDP500_COEFFS_BASE, ··· 751 751 { 752 752 u32 base = malidp_get_block_base(hwdev, block); 753 753 754 - if (hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) 754 + if (hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) 755 755 malidp_hw_write(hwdev, irq, base + MALIDP_REG_CLEARIRQ); 756 756 else 757 757 malidp_hw_write(hwdev, irq, base + MALIDP_REG_STATUS); ··· 762 762 struct drm_device *drm = arg; 763 763 struct malidp_drm *malidp = drm->dev_private; 764 764 struct malidp_hw_device *hwdev; 765 + struct malidp_hw *hw; 765 766 const struct malidp_irq_map *de; 766 767 u32 status, mask, dc_status; 767 768 irqreturn_t ret = IRQ_NONE; 768 769 769 770 hwdev = malidp->dev; 770 - de = &hwdev->map.de_irq_map; 771 + hw = hwdev->hw; 772 + de = &hw->map.de_irq_map; 771 773 772 774 /* 773 775 * if we are suspended it is likely that we were invoked because ··· 780 778 return IRQ_NONE; 781 779 782 780 /* first handle the config valid IRQ */ 783 - dc_status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS); 784 - if (dc_status & hwdev->map.dc_irq_map.vsync_irq) { 781 + dc_status = malidp_hw_read(hwdev, hw->map.dc_base + MALIDP_REG_STATUS); 782 + if (dc_status & hw->map.dc_irq_map.vsync_irq) { 785 783 /* we have a page flip event */ 786 784 atomic_set(&malidp->config_valid, 1); 787 785 malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, dc_status); ··· 834 832 835 833 /* first enable the DC block IRQs */ 836 834 malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK, 837 - hwdev->map.dc_irq_map.irq_mask); 835 + hwdev->hw->map.dc_irq_map.irq_mask); 838 836 839 837 /* now enable the DE block IRQs */ 840 838 malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK, 841 - hwdev->map.de_irq_map.irq_mask); 839 + hwdev->hw->map.de_irq_map.irq_mask); 842 840 843 841 return 0; 844 842 } ··· 849 847 struct malidp_hw_device *hwdev = malidp->dev; 850 848 851 849 malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK, 852 - hwdev->map.de_irq_map.irq_mask); 850 + hwdev->hw->map.de_irq_map.irq_mask); 853 851 malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK, 854 - hwdev->map.dc_irq_map.irq_mask); 852 + hwdev->hw->map.dc_irq_map.irq_mask); 855 853 } 856 854 857 855 static irqreturn_t malidp_se_irq(int irq, void *arg) ··· 859 857 struct drm_device *drm = arg; 860 858 struct malidp_drm *malidp = drm->dev_private; 861 859 struct malidp_hw_device *hwdev = malidp->dev; 860 + struct malidp_hw *hw = hwdev->hw; 861 + const struct malidp_irq_map *se = &hw->map.se_irq_map; 862 862 u32 status, mask; 863 863 864 864 /* ··· 871 867 if (hwdev->pm_suspended) 872 868 return IRQ_NONE; 873 869 874 - status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS); 875 - if (!(status & hwdev->map.se_irq_map.irq_mask)) 870 + status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS); 871 + if (!(status & se->irq_mask)) 876 872 return IRQ_NONE; 877 873 878 - mask = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_MASKIRQ); 879 - status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS); 874 + mask = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_MASKIRQ); 875 + status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS); 880 876 status &= mask; 881 877 /* ToDo: status decoding and firing up of VSYNC and page flip events */ 882 878 ··· 909 905 } 910 906 911 907 malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK, 912 - hwdev->map.se_irq_map.irq_mask); 908 + hwdev->hw->map.se_irq_map.irq_mask); 913 909 914 910 return 0; 915 911 } ··· 920 916 struct malidp_hw_device *hwdev = malidp->dev; 921 917 922 918 malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK, 923 - hwdev->map.se_irq_map.irq_mask); 919 + hwdev->hw->map.se_irq_map.irq_mask); 924 920 }
+39 -26
drivers/gpu/drm/arm/malidp_hw.h
··· 120 120 /* Unlike DP550/650, DP500 has 3 stride registers in its video layer. */ 121 121 #define MALIDP_DEVICE_LV_HAS_3_STRIDES BIT(0) 122 122 123 - struct malidp_hw_device { 124 - const struct malidp_hw_regmap map; 125 - void __iomem *regs; 123 + struct malidp_hw_device; 126 124 127 - /* APB clock */ 128 - struct clk *pclk; 129 - /* AXI clock */ 130 - struct clk *aclk; 131 - /* main clock for display core */ 132 - struct clk *mclk; 133 - /* pixel clock for display core */ 134 - struct clk *pxlclk; 125 + /* 126 + * Static structure containing hardware specific data and pointers to 127 + * functions that behave differently between various versions of the IP. 128 + */ 129 + struct malidp_hw { 130 + const struct malidp_hw_regmap map; 135 131 136 132 /* 137 133 * Validate the driver instance against the hardware bits ··· 178 182 struct videomode *vm); 179 183 180 184 u8 features; 181 - 182 - u8 min_line_size; 183 - u16 max_line_size; 184 - 185 - /* track the device PM state */ 186 - bool pm_suspended; 187 - 188 - /* size of memory used for rotating layers, up to two banks available */ 189 - u32 rotation_memory[2]; 190 185 }; 191 186 192 187 /* Supported variants of the hardware */ ··· 189 202 MALIDP_MAX_DEVICES 190 203 }; 191 204 192 - extern const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES]; 205 + extern const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES]; 206 + 207 + /* 208 + * Structure used by the driver during runtime operation. 209 + */ 210 + struct malidp_hw_device { 211 + struct malidp_hw *hw; 212 + void __iomem *regs; 213 + 214 + /* APB clock */ 215 + struct clk *pclk; 216 + /* AXI clock */ 217 + struct clk *aclk; 218 + /* main clock for display core */ 219 + struct clk *mclk; 220 + /* pixel clock for display core */ 221 + struct clk *pxlclk; 222 + 223 + u8 min_line_size; 224 + u16 max_line_size; 225 + 226 + /* track the device PM state */ 227 + bool pm_suspended; 228 + 229 + /* size of memory used for rotating layers, up to two banks available */ 230 + u32 rotation_memory[2]; 231 + }; 193 232 194 233 static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg) 195 234 { ··· 253 240 { 254 241 switch (block) { 255 242 case MALIDP_SE_BLOCK: 256 - return hwdev->map.se_base; 243 + return hwdev->hw->map.se_base; 257 244 case MALIDP_DC_BLOCK: 258 - return hwdev->map.dc_base; 245 + return hwdev->hw->map.dc_base; 259 246 } 260 247 261 248 return 0; ··· 288 275 static inline bool malidp_hw_pitch_valid(struct malidp_hw_device *hwdev, 289 276 unsigned int pitch) 290 277 { 291 - return !(pitch & (hwdev->map.bus_align_bytes - 1)); 278 + return !(pitch & (hwdev->hw->map.bus_align_bytes - 1)); 292 279 } 293 280 294 281 /* U16.16 */ ··· 321 308 }; 322 309 u32 val = MALIDP_SE_SET_ENH_LIMIT_LOW(MALIDP_SE_ENH_LOW_LEVEL) | 323 310 MALIDP_SE_SET_ENH_LIMIT_HIGH(MALIDP_SE_ENH_HIGH_LEVEL); 324 - u32 image_enh = hwdev->map.se_base + 325 - ((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ? 311 + u32 image_enh = hwdev->hw->map.se_base + 312 + ((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ? 326 313 0x10 : 0xC) + MALIDP_SE_IMAGE_ENH; 327 314 u32 enh_coeffs = image_enh + MALIDP_SE_ENH_COEFF0; 328 315 int i;
+10 -11
drivers/gpu/drm/arm/malidp_planes.c
··· 57 57 struct malidp_plane *mp = to_malidp_plane(plane); 58 58 59 59 if (mp->base.fb) 60 - drm_framebuffer_unreference(mp->base.fb); 60 + drm_framebuffer_put(mp->base.fb); 61 61 62 62 drm_plane_helper_disable(plane); 63 63 drm_plane_cleanup(plane); ··· 185 185 186 186 fb = state->fb; 187 187 188 - ms->format = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id, 189 - fb->format->format); 188 + ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map, 189 + mp->layer->id, 190 + fb->format->format); 190 191 if (ms->format == MALIDP_INVALID_FORMAT_ID) 191 192 return -EINVAL; 192 193 ··· 212 211 * third plane stride register. 213 212 */ 214 213 if (ms->n_planes == 3 && 215 - !(mp->hwdev->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) && 214 + !(mp->hwdev->hw->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) && 216 215 (state->fb->pitches[1] != state->fb->pitches[2])) 217 216 return -EINVAL; 218 217 ··· 230 229 if (state->rotation & MALIDP_ROTATED_MASK) { 231 230 int val; 232 231 233 - val = mp->hwdev->rotmem_required(mp->hwdev, state->crtc_h, 234 - state->crtc_w, 235 - fb->format->format); 232 + val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_h, 233 + state->crtc_w, 234 + fb->format->format); 236 235 if (val < 0) 237 236 return val; 238 237 ··· 252 251 return; 253 252 254 253 if (num_planes == 3) 255 - num_strides = (mp->hwdev->features & 254 + num_strides = (mp->hwdev->hw->features & 256 255 MALIDP_DEVICE_LV_HAS_3_STRIDES) ? 3 : 2; 257 256 258 257 for (i = 0; i < num_strides; ++i) ··· 265 264 struct drm_plane_state *old_state) 266 265 { 267 266 struct malidp_plane *mp; 268 - const struct malidp_hw_regmap *map; 269 267 struct malidp_plane_state *ms = to_malidp_plane_state(plane->state); 270 268 u32 src_w, src_h, dest_w, dest_h, val; 271 269 int i; 272 270 273 271 mp = to_malidp_plane(plane); 274 - map = &mp->hwdev->map; 275 272 276 273 /* convert src values from Q16 fixed point to integer */ 277 274 src_w = plane->state->src_w >> 16; ··· 362 363 int malidp_de_planes_init(struct drm_device *drm) 363 364 { 364 365 struct malidp_drm *malidp = drm->dev_private; 365 - const struct malidp_hw_regmap *map = &malidp->dev->map; 366 + const struct malidp_hw_regmap *map = &malidp->dev->hw->map; 366 367 struct malidp_plane *plane = NULL; 367 368 enum drm_plane_type plane_type; 368 369 unsigned long crtcs = 1 << drm->mode_config.num_crtc;
+11 -2
drivers/gpu/drm/bridge/adv7511/adv7511.h
··· 372 372 }; 373 373 374 374 #ifdef CONFIG_DRM_I2C_ADV7511_CEC 375 - int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511, 376 - unsigned int offset); 375 + int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511); 377 376 void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1); 377 + #else 378 + static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511) 379 + { 380 + unsigned int offset = adv7511->type == ADV7533 ? 381 + ADV7533_REG_CEC_OFFSET : 0; 382 + 383 + regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, 384 + ADV7511_CEC_CTRL_POWER_DOWN); 385 + return 0; 386 + } 378 387 #endif 379 388 380 389 #ifdef CONFIG_DRM_I2C_ADV7533
+22 -10
drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
··· 300 300 return 0; 301 301 } 302 302 303 - int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511, 304 - unsigned int offset) 303 + int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511) 305 304 { 305 + unsigned int offset = adv7511->type == ADV7533 ? 306 + ADV7533_REG_CEC_OFFSET : 0; 306 307 int ret = adv7511_cec_parse_dt(dev, adv7511); 307 308 308 309 if (ret) 309 - return ret; 310 + goto err_cec_parse_dt; 310 311 311 312 adv7511->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops, 312 313 adv7511, dev_name(dev), CEC_CAP_DEFAULTS, ADV7511_MAX_ADDRS); 313 - if (IS_ERR(adv7511->cec_adap)) 314 - return PTR_ERR(adv7511->cec_adap); 314 + if (IS_ERR(adv7511->cec_adap)) { 315 + ret = PTR_ERR(adv7511->cec_adap); 316 + goto err_cec_alloc; 317 + } 315 318 316 319 regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, 0); 317 320 /* cec soft reset */ ··· 332 329 ((adv7511->cec_clk_freq / 750000) - 1) << 2); 333 330 334 331 ret = cec_register_adapter(adv7511->cec_adap, dev); 335 - if (ret) { 336 - cec_delete_adapter(adv7511->cec_adap); 337 - adv7511->cec_adap = NULL; 338 - } 339 - return ret; 332 + if (ret) 333 + goto err_cec_register; 334 + return 0; 335 + 336 + err_cec_register: 337 + cec_delete_adapter(adv7511->cec_adap); 338 + adv7511->cec_adap = NULL; 339 + err_cec_alloc: 340 + dev_info(dev, "Initializing CEC failed with error %d, disabling CEC\n", 341 + ret); 342 + err_cec_parse_dt: 343 + regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, 344 + ADV7511_CEC_CTRL_POWER_DOWN); 345 + return ret == -EPROBE_DEFER ? ret : 0; 340 346 }
+4 -13
drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
··· 1084 1084 struct device *dev = &i2c->dev; 1085 1085 unsigned int main_i2c_addr = i2c->addr << 1; 1086 1086 unsigned int edid_i2c_addr = main_i2c_addr + 4; 1087 - unsigned int offset; 1088 1087 unsigned int val; 1089 1088 int ret; 1090 1089 ··· 1191 1192 if (adv7511->type == ADV7511) 1192 1193 adv7511_set_link_config(adv7511, &link_config); 1193 1194 1195 + ret = adv7511_cec_init(dev, adv7511); 1196 + if (ret) 1197 + goto err_unregister_cec; 1198 + 1194 1199 adv7511->bridge.funcs = &adv7511_bridge_funcs; 1195 1200 adv7511->bridge.of_node = dev->of_node; 1196 1201 1197 1202 drm_bridge_add(&adv7511->bridge); 1198 1203 1199 1204 adv7511_audio_init(dev, adv7511); 1200 - 1201 - offset = adv7511->type == ADV7533 ? ADV7533_REG_CEC_OFFSET : 0; 1202 - 1203 - #ifdef CONFIG_DRM_I2C_ADV7511_CEC 1204 - ret = adv7511_cec_init(dev, adv7511, offset); 1205 - if (ret) 1206 - goto err_unregister_cec; 1207 - #else 1208 - regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, 1209 - ADV7511_CEC_CTRL_POWER_DOWN); 1210 - #endif 1211 - 1212 1205 return 0; 1213 1206 1214 1207 err_unregister_cec:
+41 -7
drivers/gpu/drm/bridge/lvds-encoder.c
··· 13 13 14 14 #include <linux/of_graph.h> 15 15 16 + struct lvds_encoder { 17 + struct drm_bridge bridge; 18 + struct drm_bridge *panel_bridge; 19 + }; 20 + 21 + static int lvds_encoder_attach(struct drm_bridge *bridge) 22 + { 23 + struct lvds_encoder *lvds_encoder = container_of(bridge, 24 + struct lvds_encoder, 25 + bridge); 26 + 27 + return drm_bridge_attach(bridge->encoder, lvds_encoder->panel_bridge, 28 + bridge); 29 + } 30 + 31 + static struct drm_bridge_funcs funcs = { 32 + .attach = lvds_encoder_attach, 33 + }; 34 + 16 35 static int lvds_encoder_probe(struct platform_device *pdev) 17 36 { 18 37 struct device_node *port; 19 38 struct device_node *endpoint; 20 39 struct device_node *panel_node; 21 40 struct drm_panel *panel; 22 - struct drm_bridge *bridge; 41 + struct lvds_encoder *lvds_encoder; 42 + 43 + lvds_encoder = devm_kzalloc(&pdev->dev, sizeof(*lvds_encoder), 44 + GFP_KERNEL); 45 + if (!lvds_encoder) 46 + return -ENOMEM; 23 47 24 48 /* Locate the panel DT node. */ 25 49 port = of_graph_get_port_by_id(pdev->dev.of_node, 1); ··· 73 49 return -EPROBE_DEFER; 74 50 } 75 51 76 - bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_LVDS); 77 - if (IS_ERR(bridge)) 78 - return PTR_ERR(bridge); 52 + lvds_encoder->panel_bridge = 53 + devm_drm_panel_bridge_add(&pdev->dev, 54 + panel, DRM_MODE_CONNECTOR_LVDS); 55 + if (IS_ERR(lvds_encoder->panel_bridge)) 56 + return PTR_ERR(lvds_encoder->panel_bridge); 79 57 80 - platform_set_drvdata(pdev, bridge); 58 + /* The panel_bridge bridge is attached to the panel's of_node, 59 + * but we need a bridge attached to our of_node for our user 60 + * to look up. 61 + */ 62 + lvds_encoder->bridge.of_node = pdev->dev.of_node; 63 + lvds_encoder->bridge.funcs = &funcs; 64 + drm_bridge_add(&lvds_encoder->bridge); 65 + 66 + platform_set_drvdata(pdev, lvds_encoder); 81 67 82 68 return 0; 83 69 } 84 70 85 71 static int lvds_encoder_remove(struct platform_device *pdev) 86 72 { 87 - struct drm_bridge *bridge = platform_get_drvdata(pdev); 73 + struct lvds_encoder *lvds_encoder = platform_get_drvdata(pdev); 88 74 89 - drm_bridge_remove(bridge); 75 + drm_bridge_remove(&lvds_encoder->bridge); 90 76 91 77 return 0; 92 78 }
+25
drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
··· 138 138 struct device *dev; 139 139 struct clk *isfr_clk; 140 140 struct clk *iahb_clk; 141 + struct clk *cec_clk; 141 142 struct dw_hdmi_i2c *i2c; 142 143 143 144 struct hdmi_data_info hdmi_data; ··· 2383 2382 goto err_isfr; 2384 2383 } 2385 2384 2385 + hdmi->cec_clk = devm_clk_get(hdmi->dev, "cec"); 2386 + if (PTR_ERR(hdmi->cec_clk) == -ENOENT) { 2387 + hdmi->cec_clk = NULL; 2388 + } else if (IS_ERR(hdmi->cec_clk)) { 2389 + ret = PTR_ERR(hdmi->cec_clk); 2390 + if (ret != -EPROBE_DEFER) 2391 + dev_err(hdmi->dev, "Cannot get HDMI cec clock: %d\n", 2392 + ret); 2393 + 2394 + hdmi->cec_clk = NULL; 2395 + goto err_iahb; 2396 + } else { 2397 + ret = clk_prepare_enable(hdmi->cec_clk); 2398 + if (ret) { 2399 + dev_err(hdmi->dev, "Cannot enable HDMI cec clock: %d\n", 2400 + ret); 2401 + goto err_iahb; 2402 + } 2403 + } 2404 + 2386 2405 /* Product and revision IDs */ 2387 2406 hdmi->version = (hdmi_readb(hdmi, HDMI_DESIGN_ID) << 8) 2388 2407 | (hdmi_readb(hdmi, HDMI_REVISION_ID) << 0); ··· 2539 2518 cec_notifier_put(hdmi->cec_notifier); 2540 2519 2541 2520 clk_disable_unprepare(hdmi->iahb_clk); 2521 + if (hdmi->cec_clk) 2522 + clk_disable_unprepare(hdmi->cec_clk); 2542 2523 err_isfr: 2543 2524 clk_disable_unprepare(hdmi->isfr_clk); 2544 2525 err_res: ··· 2564 2541 2565 2542 clk_disable_unprepare(hdmi->iahb_clk); 2566 2543 clk_disable_unprepare(hdmi->isfr_clk); 2544 + if (hdmi->cec_clk) 2545 + clk_disable_unprepare(hdmi->cec_clk); 2567 2546 2568 2547 if (hdmi->i2c) 2569 2548 i2c_del_adapter(&hdmi->i2c->adap);
+40 -33
drivers/gpu/drm/bridge/tc358767.c
··· 97 97 #define DP0_ACTIVEVAL 0x0650 98 98 #define DP0_SYNCVAL 0x0654 99 99 #define DP0_MISC 0x0658 100 - #define TU_SIZE_RECOMMENDED (0x3f << 16) /* LSCLK cycles per TU */ 100 + #define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */ 101 101 #define BPC_6 (0 << 5) 102 102 #define BPC_8 (1 << 5) 103 103 ··· 318 318 tmp = (tmp << 8) | buf[i]; 319 319 i++; 320 320 if (((i % 4) == 0) || (i == size)) { 321 - tc_write(DP0_AUXWDATA(i >> 2), tmp); 321 + tc_write(DP0_AUXWDATA((i - 1) >> 2), tmp); 322 322 tmp = 0; 323 323 } 324 324 } ··· 603 603 ret = drm_dp_link_probe(&tc->aux, &tc->link.base); 604 604 if (ret < 0) 605 605 goto err_dpcd_read; 606 - if ((tc->link.base.rate != 162000) && (tc->link.base.rate != 270000)) 607 - goto err_dpcd_inval; 606 + if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) { 607 + dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n"); 608 + tc->link.base.rate = 270000; 609 + } 610 + 611 + if (tc->link.base.num_lanes > 2) { 612 + dev_dbg(tc->dev, "Falling to 2 lanes\n"); 613 + tc->link.base.num_lanes = 2; 614 + } 608 615 609 616 ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp); 610 617 if (ret < 0) ··· 644 637 err_dpcd_read: 645 638 dev_err(tc->dev, "failed to read DPCD: %d\n", ret); 646 639 return ret; 647 - err_dpcd_inval: 648 - dev_err(tc->dev, "invalid DPCD\n"); 649 - return -EINVAL; 650 640 } 651 641 652 642 static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode) ··· 659 655 int lower_margin = mode->vsync_start - mode->vdisplay; 660 656 int vsync_len = mode->vsync_end - mode->vsync_start; 661 657 658 + /* 659 + * Recommended maximum number of symbols transferred in a transfer unit: 660 + * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size, 661 + * (output active video bandwidth in bytes)) 662 + * Must be less than tu_size. 663 + */ 664 + max_tu_symbol = TU_SIZE_RECOMMENDED - 1; 665 + 662 666 dev_dbg(tc->dev, "set mode %dx%d\n", 663 667 mode->hdisplay, mode->vdisplay); 664 668 dev_dbg(tc->dev, "H margin %d,%d sync %d\n", ··· 676 664 dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal); 677 665 678 666 679 - /* LCD Ctl Frame Size */ 680 - tc_write(VPCTRL0, (0x40 << 20) /* VSDELAY */ | 667 + /* 668 + * LCD Ctl Frame Size 669 + * datasheet is not clear of vsdelay in case of DPI 670 + * assume we do not need any delay when DPI is a source of 671 + * sync signals 672 + */ 673 + tc_write(VPCTRL0, (0 << 20) /* VSDELAY */ | 681 674 OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED); 682 - tc_write(HTIM01, (left_margin << 16) | /* H back porch */ 683 - (hsync_len << 0)); /* Hsync */ 684 - tc_write(HTIM02, (right_margin << 16) | /* H front porch */ 685 - (mode->hdisplay << 0)); /* width */ 675 + tc_write(HTIM01, (ALIGN(left_margin, 2) << 16) | /* H back porch */ 676 + (ALIGN(hsync_len, 2) << 0)); /* Hsync */ 677 + tc_write(HTIM02, (ALIGN(right_margin, 2) << 16) | /* H front porch */ 678 + (ALIGN(mode->hdisplay, 2) << 0)); /* width */ 686 679 tc_write(VTIM01, (upper_margin << 16) | /* V back porch */ 687 680 (vsync_len << 0)); /* Vsync */ 688 681 tc_write(VTIM02, (lower_margin << 16) | /* V front porch */ ··· 706 689 /* DP Main Stream Attributes */ 707 690 vid_sync_dly = hsync_len + left_margin + mode->hdisplay; 708 691 tc_write(DP0_VIDSYNCDELAY, 709 - (0x003e << 16) | /* thresh_dly */ 692 + (max_tu_symbol << 16) | /* thresh_dly */ 710 693 (vid_sync_dly << 0)); 711 694 712 695 tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal)); ··· 722 705 tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW | 723 706 DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888); 724 707 725 - /* 726 - * Recommended maximum number of symbols transferred in a transfer unit: 727 - * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size, 728 - * (output active video bandwidth in bytes)) 729 - * Must be less than tu_size. 730 - */ 731 - max_tu_symbol = TU_SIZE_RECOMMENDED - 1; 732 - tc_write(DP0_MISC, (max_tu_symbol << 23) | TU_SIZE_RECOMMENDED | BPC_8); 708 + tc_write(DP0_MISC, (max_tu_symbol << 23) | (TU_SIZE_RECOMMENDED << 16) | 709 + BPC_8); 733 710 734 711 return 0; 735 712 err: ··· 819 808 unsigned int rate; 820 809 u32 dp_phy_ctrl; 821 810 int timeout; 822 - bool aligned; 823 - bool ready; 824 811 u32 value; 825 812 int ret; 826 813 u8 tmp[8]; ··· 963 954 ret = drm_dp_dpcd_read_link_status(aux, tmp + 2); 964 955 if (ret < 0) 965 956 goto err_dpcd_read; 966 - ready = (tmp[2] == ((DP_CHANNEL_EQ_BITS << 4) | /* Lane1 */ 967 - DP_CHANNEL_EQ_BITS)); /* Lane0 */ 968 - aligned = tmp[4] & DP_INTERLANE_ALIGN_DONE; 969 - } while ((--timeout) && !(ready && aligned)); 957 + } while ((--timeout) && 958 + !(drm_dp_channel_eq_ok(tmp + 2, tc->link.base.num_lanes))); 970 959 971 960 if (timeout == 0) { 972 961 /* Read DPCD 0x200-0x201 */ 973 962 ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2); 974 963 if (ret < 0) 975 964 goto err_dpcd_read; 965 + dev_err(dev, "channel(s) EQ not ok\n"); 976 966 dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]); 977 967 dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n", 978 968 tmp[1]); ··· 982 974 dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n", 983 975 tmp[6]); 984 976 985 - if (!ready) 986 - dev_err(dev, "Lane0/1 not ready\n"); 987 - if (!aligned) 988 - dev_err(dev, "Lane0/1 not aligned\n"); 989 977 return -EAGAIN; 990 978 } 991 979 ··· 1103 1099 static int tc_connector_mode_valid(struct drm_connector *connector, 1104 1100 struct drm_display_mode *mode) 1105 1101 { 1106 - /* Accept any mode */ 1102 + /* DPI interface clock limitation: upto 154 MHz */ 1103 + if (mode->clock > 154000) 1104 + return MODE_CLOCK_HIGH; 1105 + 1107 1106 return MODE_OK; 1108 1107 } 1109 1108
+1 -1
drivers/gpu/drm/drm_atomic_helper.c
··· 1225 1225 return; 1226 1226 1227 1227 for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) { 1228 - if (!new_crtc_state->active || !new_crtc_state->planes_changed) 1228 + if (!new_crtc_state->active) 1229 1229 continue; 1230 1230 1231 1231 ret = drm_crtc_vblank_get(crtc);
+4
drivers/gpu/drm/drm_fb_helper.c
··· 1809 1809 1810 1810 if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) { 1811 1811 DRM_INFO("Cannot find any crtc or sizes\n"); 1812 + 1813 + /* First time: disable all crtc's.. */ 1814 + if (!fb_helper->deferred_setup && !READ_ONCE(fb_helper->dev->master)) 1815 + restore_fbdev_mode(fb_helper); 1812 1816 return -EAGAIN; 1813 1817 } 1814 1818
+2
drivers/gpu/drm/i915/gvt/display.c
··· 282 282 static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, 283 283 int type, unsigned int resolution) 284 284 { 285 + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 285 286 struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); 286 287 287 288 if (WARN_ON(resolution >= GVT_EDID_NUM)) ··· 308 307 port->type = type; 309 308 310 309 emulate_monitor_status_change(vgpu); 310 + vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; 311 311 return 0; 312 312 } 313 313
+6
drivers/gpu/drm/i915/gvt/execlist.c
··· 496 496 goto err_unpin_mm; 497 497 } 498 498 499 + ret = intel_gvt_generate_request(workload); 500 + if (ret) { 501 + gvt_vgpu_err("fail to generate request\n"); 502 + goto err_unpin_mm; 503 + } 504 + 499 505 ret = prepare_shadow_batch_buffer(workload); 500 506 if (ret) { 501 507 gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
+3 -3
drivers/gpu/drm/i915/gvt/gtt.c
··· 311 311 312 312 #define GTT_HAW 46 313 313 314 - #define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30) 315 - #define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21) 316 - #define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12) 314 + #define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30) 315 + #define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21) 316 + #define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12) 317 317 318 318 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e) 319 319 {
+5 -40
drivers/gpu/drm/i915/gvt/handlers.c
··· 1381 1381 return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes); 1382 1382 } 1383 1383 1384 - static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset, 1385 - void *p_data, unsigned int bytes) 1386 - { 1387 - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 1388 - u32 v = *(u32 *)p_data; 1389 - 1390 - if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)) 1391 - return intel_vgpu_default_mmio_write(vgpu, 1392 - offset, p_data, bytes); 1393 - 1394 - switch (offset) { 1395 - case 0x4ddc: 1396 - /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */ 1397 - vgpu_vreg(vgpu, offset) = v & ~(1 << 31); 1398 - break; 1399 - case 0x42080: 1400 - /* bypass WaCompressedResourceDisplayNewHashMode */ 1401 - vgpu_vreg(vgpu, offset) = v & ~(1 << 15); 1402 - break; 1403 - case 0xe194: 1404 - /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */ 1405 - vgpu_vreg(vgpu, offset) = v & ~(1 << 8); 1406 - break; 1407 - case 0x7014: 1408 - /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */ 1409 - vgpu_vreg(vgpu, offset) = v & ~(1 << 13); 1410 - break; 1411 - default: 1412 - return -EINVAL; 1413 - } 1414 - 1415 - return 0; 1416 - } 1417 - 1418 1384 static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset, 1419 1385 void *p_data, unsigned int bytes) 1420 1386 { ··· 1637 1671 MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL); 1638 1672 MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, 1639 1673 NULL, NULL); 1640 - MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, 1641 - skl_misc_ctl_write); 1674 + MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, 1675 + NULL, NULL); 1642 1676 MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL); 1643 1677 MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL); 1644 1678 MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL); ··· 2530 2564 MMIO_D(0x6e570, D_BDW_PLUS); 2531 2565 MMIO_D(0x65f10, D_BDW_PLUS); 2532 2566 2533 - MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, 2534 - skl_misc_ctl_write); 2567 + MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2535 2568 MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2536 2569 MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2537 2570 MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); ··· 2580 2615 MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); 2581 2616 MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); 2582 2617 MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); 2583 - MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, skl_misc_ctl_write); 2584 - MMIO_DH(0x42080, D_SKL_PLUS, NULL, skl_misc_ctl_write); 2618 + MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, NULL); 2619 + MMIO_DH(0x42080, D_SKL_PLUS, NULL, NULL); 2585 2620 MMIO_D(0x45504, D_SKL_PLUS); 2586 2621 MMIO_D(0x45520, D_SKL_PLUS); 2587 2622 MMIO_D(0x46000, D_SKL_PLUS);
+25 -8
drivers/gpu/drm/i915/gvt/scheduler.c
··· 140 140 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 141 141 enum intel_engine_id ring_id = req->engine->id; 142 142 struct intel_vgpu_workload *workload; 143 + unsigned long flags; 143 144 144 145 if (!is_gvt_request(req)) { 145 - spin_lock_bh(&scheduler->mmio_context_lock); 146 + spin_lock_irqsave(&scheduler->mmio_context_lock, flags); 146 147 if (action == INTEL_CONTEXT_SCHEDULE_IN && 147 148 scheduler->engine_owner[ring_id]) { 148 149 /* Switch ring from vGPU to host. */ ··· 151 150 NULL, ring_id); 152 151 scheduler->engine_owner[ring_id] = NULL; 153 152 } 154 - spin_unlock_bh(&scheduler->mmio_context_lock); 153 + spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags); 155 154 156 155 return NOTIFY_OK; 157 156 } ··· 162 161 163 162 switch (action) { 164 163 case INTEL_CONTEXT_SCHEDULE_IN: 165 - spin_lock_bh(&scheduler->mmio_context_lock); 164 + spin_lock_irqsave(&scheduler->mmio_context_lock, flags); 166 165 if (workload->vgpu != scheduler->engine_owner[ring_id]) { 167 166 /* Switch ring from host to vGPU or vGPU to vGPU. */ 168 167 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id], ··· 171 170 } else 172 171 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n", 173 172 ring_id, workload->vgpu->id); 174 - spin_unlock_bh(&scheduler->mmio_context_lock); 173 + spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags); 175 174 atomic_set(&workload->shadow_ctx_active, 1); 176 175 break; 177 176 case INTEL_CONTEXT_SCHEDULE_OUT: ··· 254 253 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; 255 254 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; 256 255 struct intel_engine_cs *engine = dev_priv->engine[ring_id]; 257 - struct drm_i915_gem_request *rq; 258 256 struct intel_vgpu *vgpu = workload->vgpu; 259 257 struct intel_ring *ring; 260 258 int ret; ··· 299 299 ret = populate_shadow_context(workload); 300 300 if (ret) 301 301 goto err_unpin; 302 + workload->shadowed = true; 303 + return 0; 304 + 305 + err_unpin: 306 + engine->context_unpin(engine, shadow_ctx); 307 + err_shadow: 308 + release_shadow_wa_ctx(&workload->wa_ctx); 309 + err_scan: 310 + return ret; 311 + } 312 + 313 + int intel_gvt_generate_request(struct intel_vgpu_workload *workload) 314 + { 315 + int ring_id = workload->ring_id; 316 + struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; 317 + struct intel_engine_cs *engine = dev_priv->engine[ring_id]; 318 + struct drm_i915_gem_request *rq; 319 + struct intel_vgpu *vgpu = workload->vgpu; 320 + struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx; 321 + int ret; 302 322 303 323 rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); 304 324 if (IS_ERR(rq)) { ··· 333 313 ret = copy_workload_to_ring_buffer(workload); 334 314 if (ret) 335 315 goto err_unpin; 336 - workload->shadowed = true; 337 316 return 0; 338 317 339 318 err_unpin: 340 319 engine->context_unpin(engine, shadow_ctx); 341 - err_shadow: 342 320 release_shadow_wa_ctx(&workload->wa_ctx); 343 - err_scan: 344 321 return ret; 345 322 } 346 323
+3
drivers/gpu/drm/i915/gvt/scheduler.h
··· 142 142 void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu); 143 143 144 144 void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx); 145 + 146 + int intel_gvt_generate_request(struct intel_vgpu_workload *workload); 147 + 145 148 #endif
+2 -1
drivers/gpu/drm/i915/i915_gemfs.c
··· 52 52 53 53 if (has_transparent_hugepage()) { 54 54 struct super_block *sb = gemfs->mnt_sb; 55 - char options[] = "huge=within_size"; 55 + /* FIXME: Disabled until we get W/A for read BW issue. */ 56 + char options[] = "huge=never"; 56 57 int flags = 0; 57 58 int err; 58 59
+1 -1
drivers/gpu/drm/i915/intel_drv.h
··· 1736 1736 int intel_backlight_device_register(struct intel_connector *connector); 1737 1737 void intel_backlight_device_unregister(struct intel_connector *connector); 1738 1738 #else /* CONFIG_BACKLIGHT_CLASS_DEVICE */ 1739 - static int intel_backlight_device_register(struct intel_connector *connector) 1739 + static inline int intel_backlight_device_register(struct intel_connector *connector) 1740 1740 { 1741 1741 return 0; 1742 1742 }
+6 -4
drivers/gpu/drm/i915/intel_fbdev.c
··· 697 697 698 698 /* Due to peculiar init order wrt to hpd handling this is separate. */ 699 699 if (drm_fb_helper_initial_config(&ifbdev->helper, 700 - ifbdev->preferred_bpp)) { 700 + ifbdev->preferred_bpp)) 701 701 intel_fbdev_unregister(to_i915(ifbdev->helper.dev)); 702 - intel_fbdev_fini(to_i915(ifbdev->helper.dev)); 703 - } 704 702 } 705 703 706 704 void intel_fbdev_initial_config_async(struct drm_device *dev) ··· 798 800 { 799 801 struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; 800 802 801 - if (ifbdev) 803 + if (!ifbdev) 804 + return; 805 + 806 + intel_fbdev_sync(ifbdev); 807 + if (ifbdev->vma) 802 808 drm_fb_helper_hotplug_event(&ifbdev->helper); 803 809 } 804 810
+3 -1
drivers/gpu/drm/i915/intel_i2c.c
··· 438 438 gmbus_is_index_read(struct i2c_msg *msgs, int i, int num) 439 439 { 440 440 return (i + 1 < num && 441 - !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 && 441 + msgs[i].addr == msgs[i + 1].addr && 442 + !(msgs[i].flags & I2C_M_RD) && 443 + (msgs[i].len == 1 || msgs[i].len == 2) && 442 444 (msgs[i + 1].flags & I2C_M_RD)); 443 445 } 444 446
+9 -2
drivers/gpu/drm/imx/imx-drm-core.c
··· 133 133 plane_disabling = true; 134 134 } 135 135 136 - if (plane_disabling) { 137 - drm_atomic_helper_wait_for_vblanks(dev, state); 136 + /* 137 + * The flip done wait is only strictly required by imx-drm if a deferred 138 + * plane disable is in-flight. As the core requires blocking commits 139 + * to wait for the flip it is done here unconditionally. This keeps the 140 + * workitem around a bit longer than required for the majority of 141 + * non-blocking commits, but we accept that for the sake of simplicity. 142 + */ 143 + drm_atomic_helper_wait_for_flip_done(dev, state); 138 144 145 + if (plane_disabling) { 139 146 for_each_old_plane_in_state(state, plane, old_plane_state, i) 140 147 ipu_plane_disable_deferred(plane); 141 148
+1
drivers/gpu/drm/omapdrm/displays/Kconfig
··· 35 35 36 36 config DRM_OMAP_PANEL_DPI 37 37 tristate "Generic DPI panel" 38 + depends on BACKLIGHT_CLASS_DEVICE 38 39 help 39 40 Driver for generic DPI panels. 40 41
+2 -2
drivers/gpu/drm/omapdrm/dss/dpi.c
··· 566 566 } 567 567 568 568 static const struct soc_device_attribute dpi_soc_devices[] = { 569 - { .family = "OMAP3[456]*" }, 570 - { .family = "[AD]M37*" }, 569 + { .machine = "OMAP3[456]*" }, 570 + { .machine = "[AD]M37*" }, 571 571 { /* sentinel */ } 572 572 }; 573 573
+1 -1
drivers/gpu/drm/omapdrm/dss/hdmi4_cec.c
··· 352 352 { 353 353 const u32 caps = CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS | 354 354 CEC_CAP_PASSTHROUGH | CEC_CAP_RC; 355 - unsigned int ret; 355 + int ret; 356 356 357 357 core->adap = cec_allocate_adapter(&hdmi_cec_adap_ops, core, 358 358 "omap4", caps, CEC_MAX_LOG_ADDRS);
+17 -6
drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
··· 886 886 bool audio_use_mclk; 887 887 }; 888 888 889 - static const struct hdmi4_features hdmi4_es1_features = { 889 + static const struct hdmi4_features hdmi4430_es1_features = { 890 890 .cts_swmode = false, 891 891 .audio_use_mclk = false, 892 892 }; 893 893 894 - static const struct hdmi4_features hdmi4_es2_features = { 894 + static const struct hdmi4_features hdmi4430_es2_features = { 895 895 .cts_swmode = true, 896 896 .audio_use_mclk = false, 897 897 }; 898 898 899 - static const struct hdmi4_features hdmi4_es3_features = { 899 + static const struct hdmi4_features hdmi4_features = { 900 900 .cts_swmode = true, 901 901 .audio_use_mclk = true, 902 902 }; 903 903 904 904 static const struct soc_device_attribute hdmi4_soc_devices[] = { 905 - { .family = "OMAP4", .revision = "ES1.?", .data = &hdmi4_es1_features }, 906 - { .family = "OMAP4", .revision = "ES2.?", .data = &hdmi4_es2_features }, 907 - { .family = "OMAP4", .data = &hdmi4_es3_features }, 905 + { 906 + .machine = "OMAP4430", 907 + .revision = "ES1.?", 908 + .data = &hdmi4430_es1_features, 909 + }, 910 + { 911 + .machine = "OMAP4430", 912 + .revision = "ES2.?", 913 + .data = &hdmi4430_es2_features, 914 + }, 915 + { 916 + .family = "OMAP4", 917 + .data = &hdmi4_features, 918 + }, 908 919 { /* sentinel */ } 909 920 }; 910 921
+2 -1
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
··· 638 638 match = of_match_node(dmm_of_match, dev->dev.of_node); 639 639 if (!match) { 640 640 dev_err(&dev->dev, "failed to find matching device node\n"); 641 - return -ENODEV; 641 + ret = -ENODEV; 642 + goto fail; 642 643 } 643 644 644 645 omap_dmm->plat_data = match->data;
-24
drivers/gpu/drm/radeon/cik.c
··· 5451 5451 WREG32(VM_INVALIDATE_REQUEST, 0x1); 5452 5452 } 5453 5453 5454 - static void cik_pcie_init_compute_vmid(struct radeon_device *rdev) 5455 - { 5456 - int i; 5457 - uint32_t sh_mem_bases, sh_mem_config; 5458 - 5459 - sh_mem_bases = 0x6000 | 0x6000 << 16; 5460 - sh_mem_config = ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED); 5461 - sh_mem_config |= DEFAULT_MTYPE(MTYPE_NONCACHED); 5462 - 5463 - mutex_lock(&rdev->srbm_mutex); 5464 - for (i = 8; i < 16; i++) { 5465 - cik_srbm_select(rdev, 0, 0, 0, i); 5466 - /* CP and shaders */ 5467 - WREG32(SH_MEM_CONFIG, sh_mem_config); 5468 - WREG32(SH_MEM_APE1_BASE, 1); 5469 - WREG32(SH_MEM_APE1_LIMIT, 0); 5470 - WREG32(SH_MEM_BASES, sh_mem_bases); 5471 - } 5472 - cik_srbm_select(rdev, 0, 0, 0, 0); 5473 - mutex_unlock(&rdev->srbm_mutex); 5474 - } 5475 - 5476 5454 /** 5477 5455 * cik_pcie_gart_enable - gart enable 5478 5456 * ··· 5563 5585 } 5564 5586 cik_srbm_select(rdev, 0, 0, 0, 0); 5565 5587 mutex_unlock(&rdev->srbm_mutex); 5566 - 5567 - cik_pcie_init_compute_vmid(rdev); 5568 5588 5569 5589 cik_pcie_gart_tlb_flush(rdev); 5570 5590 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
+1 -2
drivers/gpu/drm/rockchip/dw-mipi-dsi.c
··· 1285 1285 goto err_pllref; 1286 1286 } 1287 1287 1288 - pm_runtime_enable(dev); 1289 - 1290 1288 dsi->dsi_host.ops = &dw_mipi_dsi_host_ops; 1291 1289 dsi->dsi_host.dev = dev; 1292 1290 ret = mipi_dsi_host_register(&dsi->dsi_host); ··· 1299 1301 } 1300 1302 1301 1303 dev_set_drvdata(dev, dsi); 1304 + pm_runtime_enable(dev); 1302 1305 return 0; 1303 1306 1304 1307 err_mipi_dsi_host:
-2
drivers/gpu/drm/ttm/ttm_page_alloc.c
··· 1062 1062 } 1063 1063 EXPORT_SYMBOL(ttm_pool_unpopulate); 1064 1064 1065 - #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU) 1066 1065 int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt) 1067 1066 { 1068 1067 unsigned i, j; ··· 1132 1133 ttm_pool_unpopulate(&tt->ttm); 1133 1134 } 1134 1135 EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages); 1135 - #endif 1136 1136 1137 1137 int ttm_page_alloc_debugfs(struct seq_file *m, void *data) 1138 1138 {
+10 -22
include/drm/ttm/ttm_page_alloc.h
··· 59 59 void ttm_pool_unpopulate(struct ttm_tt *ttm); 60 60 61 61 /** 62 + * Populates and DMA maps pages to fullfil a ttm_dma_populate() request 63 + */ 64 + int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt); 65 + 66 + /** 67 + * Unpopulates and DMA unmaps pages as part of a 68 + * ttm_dma_unpopulate() request */ 69 + void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt); 70 + 71 + /** 62 72 * Output the state of pools to debugfs file 63 73 */ 64 74 int ttm_page_alloc_debugfs(struct seq_file *m, void *data); 65 - 66 75 67 76 #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU) 68 77 /** ··· 91 82 92 83 int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev); 93 84 void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev); 94 - 95 - 96 - /** 97 - * Populates and DMA maps pages to fullfil a ttm_dma_populate() request 98 - */ 99 - int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt); 100 - 101 - /** 102 - * Unpopulates and DMA unmaps pages as part of a 103 - * ttm_dma_unpopulate() request */ 104 - void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt); 105 85 106 86 #else 107 87 static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, ··· 114 116 struct device *dev) 115 117 { 116 118 } 117 - 118 - static inline int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt) 119 - { 120 - return -ENOMEM; 121 - } 122 - 123 - static inline void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) 124 - { 125 - } 126 - 127 119 #endif 128 120 129 121 #endif
+11 -11
include/uapi/linux/kfd_ioctl.h
··· 233 233 }; 234 234 235 235 struct kfd_ioctl_set_scratch_backing_va_args { 236 - uint64_t va_addr; /* to KFD */ 237 - uint32_t gpu_id; /* to KFD */ 238 - uint32_t pad; 236 + __u64 va_addr; /* to KFD */ 237 + __u32 gpu_id; /* to KFD */ 238 + __u32 pad; 239 239 }; 240 240 241 241 struct kfd_ioctl_get_tile_config_args { 242 242 /* to KFD: pointer to tile array */ 243 - uint64_t tile_config_ptr; 243 + __u64 tile_config_ptr; 244 244 /* to KFD: pointer to macro tile array */ 245 - uint64_t macro_tile_config_ptr; 245 + __u64 macro_tile_config_ptr; 246 246 /* to KFD: array size allocated by user mode 247 247 * from KFD: array size filled by kernel 248 248 */ 249 - uint32_t num_tile_configs; 249 + __u32 num_tile_configs; 250 250 /* to KFD: array size allocated by user mode 251 251 * from KFD: array size filled by kernel 252 252 */ 253 - uint32_t num_macro_tile_configs; 253 + __u32 num_macro_tile_configs; 254 254 255 - uint32_t gpu_id; /* to KFD */ 256 - uint32_t gb_addr_config; /* from KFD */ 257 - uint32_t num_banks; /* from KFD */ 258 - uint32_t num_ranks; /* from KFD */ 255 + __u32 gpu_id; /* to KFD */ 256 + __u32 gb_addr_config; /* from KFD */ 257 + __u32 num_banks; /* from KFD */ 258 + __u32 num_ranks; /* from KFD */ 259 259 /* struct size can be extended later if needed 260 260 * without breaking ABI compatibility 261 261 */