Merge tag 'drm-fixes-2018-06-22' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
"Just run of the mill fixes,

core:
- regression fix in device unplug

qxl:
- regression fix for might sleep in cursor handling

nouveau:
- regression fix in multi-screen cursor handling

amdgpu:
- switch off DC by default on Kaveri and older
- some minor fixes

i915:
- some GEM regression fixes
- doublescan mode fixes

sun4i:
- revert fix for a regression

sii8620 bridge:
- misc fixes"

* tag 'drm-fixes-2018-06-22' of git://anongit.freedesktop.org/drm/drm: (28 commits)
drm/bridge/sii8620: fix display of packed pixel modes in MHL2
drm/amdgpu: Make amdgpu_vram_mgr_bo_invisible_size always accurate
drm/amdgpu: Refactor amdgpu_vram_mgr_bo_invisible_size helper
drm/amdgpu: Update pin_size values before unpinning BO
drm/amdgpu:All UVD instances share one idle_work handle
drm/amdgpu: Don't default to DC support for Kaveri and older
drm/amdgpu: Use kvmalloc_array for allocating VRAM manager nodes array
drm/amd/pp: Fix uninitialized variable
drm/i915: Enable provoking vertex fix on Gen9 systems.
drm/i915: Fix context ban and hang accounting for client
drm/i915: Turn off g4x DP port in .post_disable()
drm/i915: Disallow interlaced modes on g4x DP outputs
drm/i915: Fix PIPESTAT irq ack on i965/g4x
drm/i915: Allow DBLSCAN user modes with eDP/LVDS/DSI
drm/i915/execlists: Avoid putting the error pointer
drm/i915: Apply batch location restrictions before pinning
drm/nouveau/kms/nv50-: cursors always use core channel vram ctxdma
Revert "drm/sun4i: Handle DRM_BUS_FLAG_PIXDATA_*EDGE"
drm/atmel-hlcdc: check stride values in the first plane
drm/bridge/sii8620: fix HDMI cable connection to dongle
...

+407 -336
+9 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 2158 switch (asic_type) { 2159 #if defined(CONFIG_DRM_AMD_DC) 2160 case CHIP_BONAIRE: 2161 - case CHIP_HAWAII: 2162 case CHIP_KAVERI: 2163 case CHIP_KABINI: 2164 case CHIP_MULLINS: 2165 case CHIP_CARRIZO: 2166 case CHIP_STONEY: 2167 case CHIP_POLARIS10:
··· 2158 switch (asic_type) { 2159 #if defined(CONFIG_DRM_AMD_DC) 2160 case CHIP_BONAIRE: 2161 case CHIP_KAVERI: 2162 case CHIP_KABINI: 2163 case CHIP_MULLINS: 2164 + /* 2165 + * We have systems in the wild with these ASICs that require 2166 + * LVDS and VGA support which is not supported with DC. 2167 + * 2168 + * Fallback to the non-DC driver here by default so as not to 2169 + * cause regressions. 2170 + */ 2171 + return amdgpu_dc > 0; 2172 + case CHIP_HAWAII: 2173 case CHIP_CARRIZO: 2174 case CHIP_STONEY: 2175 case CHIP_POLARIS10:
+10 -14
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 762 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); 763 if (domain == AMDGPU_GEM_DOMAIN_VRAM) { 764 adev->vram_pin_size += amdgpu_bo_size(bo); 765 - if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) 766 - adev->invisible_pin_size += amdgpu_bo_size(bo); 767 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { 768 adev->gart_pin_size += amdgpu_bo_size(bo); 769 } ··· 789 bo->pin_count--; 790 if (bo->pin_count) 791 return 0; 792 for (i = 0; i < bo->placement.num_placement; i++) { 793 bo->placements[i].lpfn = 0; 794 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; 795 } 796 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 797 - if (unlikely(r)) { 798 dev_err(adev->dev, "%p validate failed for unpin\n", bo); 799 - goto error; 800 - } 801 802 - if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { 803 - adev->vram_pin_size -= amdgpu_bo_size(bo); 804 - if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) 805 - adev->invisible_pin_size -= amdgpu_bo_size(bo); 806 - } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { 807 - adev->gart_pin_size -= amdgpu_bo_size(bo); 808 - } 809 - 810 - error: 811 return r; 812 } 813
··· 762 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); 763 if (domain == AMDGPU_GEM_DOMAIN_VRAM) { 764 adev->vram_pin_size += amdgpu_bo_size(bo); 765 + adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo); 766 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { 767 adev->gart_pin_size += amdgpu_bo_size(bo); 768 } ··· 790 bo->pin_count--; 791 if (bo->pin_count) 792 return 0; 793 + 794 + if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { 795 + adev->vram_pin_size -= amdgpu_bo_size(bo); 796 + adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo); 797 + } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { 798 + adev->gart_pin_size -= amdgpu_bo_size(bo); 799 + } 800 + 801 for (i = 0; i < bo->placement.num_placement; i++) { 802 bo->placements[i].lpfn = 0; 803 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; 804 } 805 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 806 + if (unlikely(r)) 807 dev_err(adev->dev, "%p validate failed for unpin\n", bo); 808 809 return r; 810 } 811
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
··· 73 uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); 74 int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man); 75 76 uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); 77 uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); 78
··· 73 uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); 74 int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man); 75 76 + u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo); 77 uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); 78 uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); 79
+7 -7
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
··· 130 unsigned version_major, version_minor, family_id; 131 int i, j, r; 132 133 - INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler); 134 135 switch (adev->asic_type) { 136 #ifdef CONFIG_DRM_AMDGPU_CIK ··· 314 void *ptr; 315 int i, j; 316 317 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 318 if (adev->uvd.inst[j].vcpu_bo == NULL) 319 continue; 320 - 321 - cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work); 322 323 /* only valid for physical mode */ 324 if (adev->asic_type < CHIP_POLARIS10) { ··· 1145 static void amdgpu_uvd_idle_work_handler(struct work_struct *work) 1146 { 1147 struct amdgpu_device *adev = 1148 - container_of(work, struct amdgpu_device, uvd.inst->idle_work.work); 1149 unsigned fences = 0, i, j; 1150 1151 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { ··· 1167 AMD_CG_STATE_GATE); 1168 } 1169 } else { 1170 - schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); 1171 } 1172 } 1173 ··· 1179 if (amdgpu_sriov_vf(adev)) 1180 return; 1181 1182 - set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work); 1183 if (set_clocks) { 1184 if (adev->pm.dpm_enabled) { 1185 amdgpu_dpm_enable_uvd(adev, true); ··· 1196 void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) 1197 { 1198 if (!amdgpu_sriov_vf(ring->adev)) 1199 - schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); 1200 } 1201 1202 /**
··· 130 unsigned version_major, version_minor, family_id; 131 int i, j, r; 132 133 + INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); 134 135 switch (adev->asic_type) { 136 #ifdef CONFIG_DRM_AMDGPU_CIK ··· 314 void *ptr; 315 int i, j; 316 317 + cancel_delayed_work_sync(&adev->uvd.idle_work); 318 + 319 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 320 if (adev->uvd.inst[j].vcpu_bo == NULL) 321 continue; 322 323 /* only valid for physical mode */ 324 if (adev->asic_type < CHIP_POLARIS10) { ··· 1145 static void amdgpu_uvd_idle_work_handler(struct work_struct *work) 1146 { 1147 struct amdgpu_device *adev = 1148 + container_of(work, struct amdgpu_device, uvd.idle_work.work); 1149 unsigned fences = 0, i, j; 1150 1151 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { ··· 1167 AMD_CG_STATE_GATE); 1168 } 1169 } else { 1170 + schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); 1171 } 1172 } 1173 ··· 1179 if (amdgpu_sriov_vf(adev)) 1180 return; 1181 1182 + set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); 1183 if (set_clocks) { 1184 if (adev->pm.dpm_enabled) { 1185 amdgpu_dpm_enable_uvd(adev, true); ··· 1196 void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) 1197 { 1198 if (!amdgpu_sriov_vf(ring->adev)) 1199 + schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); 1200 } 1201 1202 /**
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
··· 44 void *saved_bo; 45 atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; 46 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; 47 - struct delayed_work idle_work; 48 struct amdgpu_ring ring; 49 struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; 50 struct amdgpu_irq_src irq; ··· 61 bool address_64_bit; 62 bool use_ctx_buf; 63 struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES]; 64 }; 65 66 int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
··· 44 void *saved_bo; 45 atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; 46 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; 47 struct amdgpu_ring ring; 48 struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; 49 struct amdgpu_irq_src irq; ··· 62 bool address_64_bit; 63 bool use_ctx_buf; 64 struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES]; 65 + struct delayed_work idle_work; 66 }; 67 68 int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
+36 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
··· 97 } 98 99 /** 100 * amdgpu_vram_mgr_new - allocate new ranges 101 * 102 * @man: TTM memory type manager ··· 167 num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node); 168 } 169 170 - nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL); 171 if (!nodes) 172 return -ENOMEM; 173 ··· 223 drm_mm_remove_node(&nodes[i]); 224 spin_unlock(&mgr->lock); 225 226 - kfree(nodes); 227 return r == -ENOSPC ? 0 : r; 228 } 229 ··· 262 atomic64_sub(usage, &mgr->usage); 263 atomic64_sub(vis_usage, &mgr->vis_usage); 264 265 - kfree(mem->mm_node); 266 mem->mm_node = NULL; 267 } 268
··· 97 } 98 99 /** 100 + * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size 101 + * 102 + * @bo: &amdgpu_bo buffer object (must be in VRAM) 103 + * 104 + * Returns: 105 + * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM. 106 + */ 107 + u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo) 108 + { 109 + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 110 + struct ttm_mem_reg *mem = &bo->tbo.mem; 111 + struct drm_mm_node *nodes = mem->mm_node; 112 + unsigned pages = mem->num_pages; 113 + u64 usage = 0; 114 + 115 + if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size) 116 + return 0; 117 + 118 + if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) 119 + return amdgpu_bo_size(bo); 120 + 121 + while (nodes && pages) { 122 + usage += nodes->size << PAGE_SHIFT; 123 + usage -= amdgpu_vram_mgr_vis_size(adev, nodes); 124 + pages -= nodes->size; 125 + ++nodes; 126 + } 127 + 128 + return usage; 129 + } 130 + 131 + /** 132 * amdgpu_vram_mgr_new - allocate new ranges 133 * 134 * @man: TTM memory type manager ··· 135 num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node); 136 } 137 138 + nodes = kvmalloc_array(num_nodes, sizeof(*nodes), 139 + GFP_KERNEL | __GFP_ZERO); 140 if (!nodes) 141 return -ENOMEM; 142 ··· 190 drm_mm_remove_node(&nodes[i]); 191 spin_unlock(&mgr->lock); 192 193 + kvfree(nodes); 194 return r == -ENOSPC ? 0 : r; 195 } 196 ··· 229 atomic64_sub(usage, &mgr->usage); 230 atomic64_sub(vis_usage, &mgr->vis_usage); 231 232 + kvfree(mem->mm_node); 233 mem->mm_node = NULL; 234 } 235
+1 -1
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
··· 1090 static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) 1091 { 1092 struct amdgpu_device *adev = hwmgr->adev; 1093 - int result; 1094 uint32_t num_se = 0; 1095 uint32_t count, data; 1096
··· 1090 static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) 1091 { 1092 struct amdgpu_device *adev = hwmgr->adev; 1093 + int result = 0; 1094 uint32_t num_se = 0; 1095 uint32_t count, data; 1096
+1 -1
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
··· 839 return ret; 840 } 841 842 - if (desc->layout.xstride && desc->layout.pstride) { 843 int ret; 844 845 ret = drm_plane_create_rotation_property(&plane->base,
··· 839 return ret; 840 } 841 842 + if (desc->layout.xstride[0] && desc->layout.pstride[0]) { 843 int ret; 844 845 ret = drm_plane_create_rotation_property(&plane->base,
+116 -191
drivers/gpu/drm/bridge/sil-sii8620.c
··· 36 37 #define SII8620_BURST_BUF_LEN 288 38 #define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3) 39 - #define MHL1_MAX_LCLK 225000 40 - #define MHL3_MAX_LCLK 600000 41 42 enum sii8620_mode { 43 CM_DISCONNECTED, ··· 83 u8 devcap[MHL_DCAP_SIZE]; 84 u8 xdevcap[MHL_XDC_SIZE]; 85 u8 avif[HDMI_INFOFRAME_SIZE(AVI)]; 86 struct edid *edid; 87 unsigned int gen2_write_burst:1; 88 enum sii8620_mt_state mt_state; ··· 482 } 483 } 484 485 - static void sii8620_sink_detected(struct sii8620 *ctx, int ret) 486 { 487 static const char * const sink_str[] = { 488 [SINK_NONE] = "NONE", ··· 493 char sink_name[20]; 494 struct device *dev = ctx->dev; 495 496 - if (ret < 0) 497 return; 498 499 sii8620_fetch_edid(ctx); ··· 502 sii8620_mhl_disconnected(ctx); 503 return; 504 } 505 506 if (drm_detect_hdmi_monitor(ctx->edid)) 507 ctx->sink_type = SINK_HDMI; ··· 513 514 dev_info(dev, "detected sink(type: %s): %s\n", 515 sink_str[ctx->sink_type], sink_name); 516 - } 517 - 518 - static void sii8620_hsic_init(struct sii8620 *ctx) 519 - { 520 - if (!sii8620_is_mhl3(ctx)) 521 - return; 522 - 523 - sii8620_write(ctx, REG_FCGC, 524 - BIT_FCGC_HSIC_HOSTMODE | BIT_FCGC_HSIC_ENABLE); 525 - sii8620_setbits(ctx, REG_HRXCTRL3, 526 - BIT_HRXCTRL3_HRX_STAY_RESET | BIT_HRXCTRL3_STATUS_EN, ~0); 527 - sii8620_setbits(ctx, REG_TTXNUMB, MSK_TTXNUMB_TTX_NUMBPS, 4); 528 - sii8620_setbits(ctx, REG_TRXCTRL, BIT_TRXCTRL_TRX_FROM_SE_COC, ~0); 529 - sii8620_setbits(ctx, REG_HTXCTRL, BIT_HTXCTRL_HTX_DRVCONN1, 0); 530 - sii8620_setbits(ctx, REG_KEEPER, MSK_KEEPER_MODE, VAL_KEEPER_MODE_HOST); 531 - sii8620_write_seq_static(ctx, 532 - REG_TDMLLCTL, 0, 533 - REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST | 534 - BIT_UTSRST_KEEPER_SRST | BIT_UTSRST_FC_SRST, 535 - REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST, 536 - REG_HRXINTL, 0xff, 537 - REG_HRXINTH, 0xff, 538 - REG_TTXINTL, 0xff, 539 - REG_TTXINTH, 0xff, 540 - REG_TRXINTL, 0xff, 541 - REG_TRXINTH, 0xff, 542 - REG_HTXINTL, 0xff, 543 - REG_HTXINTH, 0xff, 544 - REG_FCINTR0, 0xff, 545 - REG_FCINTR1, 0xff, 546 - REG_FCINTR2, 0xff, 547 - REG_FCINTR3, 0xff, 548 - REG_FCINTR4, 0xff, 549 - REG_FCINTR5, 0xff, 550 - REG_FCINTR6, 0xff, 551 - REG_FCINTR7, 0xff 552 - ); 553 - } 554 - 555 - static void sii8620_edid_read(struct sii8620 *ctx, int ret) 556 - { 557 - if (ret < 0) 558 - return; 559 - 560 - sii8620_set_upstream_edid(ctx); 561 - sii8620_hsic_init(ctx); 562 - sii8620_enable_hpd(ctx); 563 } 564 565 static void sii8620_mr_devcap(struct sii8620 *ctx) ··· 530 dcap[MHL_DCAP_ADOPTER_ID_H], dcap[MHL_DCAP_ADOPTER_ID_L], 531 dcap[MHL_DCAP_DEVICE_ID_H], dcap[MHL_DCAP_DEVICE_ID_L]); 532 sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE); 533 } 534 535 static void sii8620_mr_xdevcap(struct sii8620 *ctx) ··· 769 static void sii8620_fetch_edid(struct sii8620 *ctx) 770 { 771 u8 lm_ddc, ddc_cmd, int3, cbus; 772 int fetched, i; 773 int edid_len = EDID_LENGTH; 774 u8 *edid; ··· 819 REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK 820 ); 821 822 - do { 823 - int3 = sii8620_readb(ctx, REG_INTR3); 824 cbus = sii8620_readb(ctx, REG_CBUS_STATUS); 825 - 826 - if (int3 & BIT_DDC_CMD_DONE) 827 - break; 828 - 829 - if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) { 830 kfree(edid); 831 edid = NULL; 832 goto end; 833 } 834 - } while (1); 835 - 836 - sii8620_readb(ctx, REG_DDC_STATUS); 837 - while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE) 838 usleep_range(10, 20); 839 840 sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE); 841 if (fetched + FETCH_SIZE == EDID_LENGTH) { ··· 942 ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); 943 if (ret) 944 return ret; 945 usleep_range(10000, 20000); 946 - return clk_prepare_enable(ctx->clk_xtal); 947 } 948 949 static int sii8620_hw_off(struct sii8620 *ctx) ··· 960 clk_disable_unprepare(ctx->clk_xtal); 961 gpiod_set_value(ctx->gpio_reset, 1); 962 return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); 963 - } 964 - 965 - static void sii8620_hw_reset(struct sii8620 *ctx) 966 - { 967 - usleep_range(10000, 20000); 968 - gpiod_set_value(ctx->gpio_reset, 0); 969 - usleep_range(5000, 20000); 970 - gpiod_set_value(ctx->gpio_reset, 1); 971 - usleep_range(10000, 20000); 972 - gpiod_set_value(ctx->gpio_reset, 0); 973 - msleep(300); 974 } 975 976 static void sii8620_cbus_reset(struct sii8620 *ctx) ··· 1017 1018 static void sii8620_set_format(struct sii8620 *ctx) 1019 { 1020 - u8 out_fmt; 1021 - 1022 if (sii8620_is_mhl3(ctx)) { 1023 sii8620_setbits(ctx, REG_M3_P0CTRL, 1024 BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED, 1025 ctx->use_packed_pixel ? ~0 : 0); 1026 } else { 1027 - if (ctx->use_packed_pixel) 1028 - sii8620_write_seq_static(ctx, 1029 - REG_VID_MODE, BIT_VID_MODE_M1080P, 1030 - REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1, 1031 - REG_MHLTX_CTL6, 0x60 1032 - ); 1033 - else 1034 sii8620_write_seq_static(ctx, 1035 REG_VID_MODE, 0, 1036 REG_MHL_TOP_CTL, 1, ··· 1029 ); 1030 } 1031 1032 - if (ctx->use_packed_pixel) 1033 - out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL) | 1034 - BIT_TPI_OUTPUT_CSCMODE709; 1035 - else 1036 - out_fmt = VAL_TPI_FORMAT(RGB, FULL); 1037 - 1038 sii8620_write_seq(ctx, 1039 REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL), 1040 - REG_TPI_OUTPUT, out_fmt, 1041 ); 1042 } 1043 ··· 1170 int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3); 1171 int i; 1172 1173 - for (i = 0; i < ARRAY_SIZE(clk_spec); ++i) 1174 if (clk < clk_spec[i].max_clk) 1175 break; 1176 ··· 1488 ); 1489 } 1490 1491 static void sii8620_disconnect(struct sii8620 *ctx) 1492 { 1493 sii8620_disable_gen2_write_burst(ctx); ··· 1525 REG_MHL_DP_CTL6, 0x2A, 1526 REG_MHL_DP_CTL7, 0x03 1527 ); 1528 - sii8620_disable_hpd(ctx); 1529 sii8620_write_seq_static(ctx, 1530 REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE, 1531 REG_MHL_COC_CTL1, 0x07, ··· 1573 memset(ctx->xstat, 0, sizeof(ctx->xstat)); 1574 memset(ctx->devcap, 0, sizeof(ctx->devcap)); 1575 memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap)); 1576 ctx->cbus_status = 0; 1577 - ctx->sink_type = SINK_NONE; 1578 - kfree(ctx->edid); 1579 - ctx->edid = NULL; 1580 sii8620_mt_cleanup(ctx); 1581 } 1582 ··· 1665 sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), 1666 MHL_DST_LM_CLK_MODE_NORMAL 1667 | MHL_DST_LM_PATH_ENABLED); 1668 - if (!sii8620_is_mhl3(ctx)) 1669 - sii8620_mt_read_devcap(ctx, false); 1670 - sii8620_mt_set_cont(ctx, sii8620_sink_detected); 1671 } else { 1672 sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), 1673 MHL_DST_LM_CLK_MODE_NORMAL); ··· 1681 sii8620_update_array(ctx->stat, st, MHL_DST_SIZE); 1682 sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE); 1683 1684 - if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY) 1685 sii8620_status_dcap_ready(ctx); 1686 1687 if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) 1688 sii8620_status_changed_path(ctx); ··· 1772 } 1773 if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_REQ) 1774 sii8620_send_features(ctx); 1775 - if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE) 1776 - sii8620_edid_read(ctx, 0); 1777 } 1778 1779 static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx) ··· 1851 if (stat & BIT_CBUS_MSC_MR_WRITE_STAT) 1852 sii8620_msc_mr_write_stat(ctx); 1853 1854 if (stat & BIT_CBUS_MSC_MR_SET_INT) 1855 sii8620_msc_mr_set_int(ctx); 1856 ··· 1907 ctx->mt_state = MT_STATE_DONE; 1908 } 1909 1910 - static void sii8620_scdt_high(struct sii8620 *ctx) 1911 - { 1912 - sii8620_write_seq_static(ctx, 1913 - REG_INTR8_MASK, BIT_CEA_NEW_AVI | BIT_CEA_NEW_VSI, 1914 - REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI, 1915 - ); 1916 - } 1917 - 1918 static void sii8620_irq_scdt(struct sii8620 *ctx) 1919 { 1920 u8 stat = sii8620_readb(ctx, REG_INTR5); ··· 1914 if (stat & BIT_INTR_SCDT_CHANGE) { 1915 u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3); 1916 1917 - if (cstat & BIT_TMDS_CSTAT_P3_SCDT) { 1918 - if (ctx->sink_type == SINK_HDMI) 1919 - /* enable infoframe interrupt */ 1920 - sii8620_scdt_high(ctx); 1921 - else 1922 - sii8620_start_video(ctx); 1923 - } 1924 } 1925 1926 sii8620_write(ctx, REG_INTR5, stat); 1927 - } 1928 - 1929 - static void sii8620_new_vsi(struct sii8620 *ctx) 1930 - { 1931 - u8 vsif[11]; 1932 - 1933 - sii8620_write(ctx, REG_RX_HDMI_CTRL2, 1934 - VAL_RX_HDMI_CTRL2_DEFVAL | 1935 - BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI); 1936 - sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, vsif, 1937 - ARRAY_SIZE(vsif)); 1938 - } 1939 - 1940 - static void sii8620_new_avi(struct sii8620 *ctx) 1941 - { 1942 - sii8620_write(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL); 1943 - sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, ctx->avif, 1944 - ARRAY_SIZE(ctx->avif)); 1945 - } 1946 - 1947 - static void sii8620_irq_infr(struct sii8620 *ctx) 1948 - { 1949 - u8 stat = sii8620_readb(ctx, REG_INTR8) 1950 - & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI); 1951 - 1952 - sii8620_write(ctx, REG_INTR8, stat); 1953 - 1954 - if (stat & BIT_CEA_NEW_VSI) 1955 - sii8620_new_vsi(ctx); 1956 - 1957 - if (stat & BIT_CEA_NEW_AVI) 1958 - sii8620_new_avi(ctx); 1959 - 1960 - if (stat & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI)) 1961 - sii8620_start_video(ctx); 1962 } 1963 1964 static void sii8620_got_xdevcap(struct sii8620 *ctx, int ret) ··· 1971 1972 if (stat & BIT_DDC_CMD_DONE) { 1973 sii8620_write(ctx, REG_INTR3_MASK, 0); 1974 - if (sii8620_is_mhl3(ctx)) 1975 sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), 1976 MHL_INT_RC_FEAT_REQ); 1977 else 1978 - sii8620_edid_read(ctx, 0); 1979 } 1980 sii8620_write(ctx, REG_INTR3, stat); 1981 } ··· 2002 { BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid }, 2003 { BIT_FAST_INTR_STAT_DDC, sii8620_irq_ddc }, 2004 { BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt }, 2005 - { BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr }, 2006 }; 2007 struct sii8620 *ctx = data; 2008 u8 stats[LEN_FAST_INTR_STAT]; ··· 2039 dev_err(dev, "Error powering on, %d.\n", ret); 2040 return; 2041 } 2042 - sii8620_hw_reset(ctx); 2043 2044 sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver)); 2045 ret = sii8620_clear_error(ctx); ··· 2194 rc_unregister_device(ctx->rc_dev); 2195 } 2196 2197 static enum drm_mode_status sii8620_mode_valid(struct drm_bridge *bridge, 2198 const struct drm_display_mode *mode) 2199 { 2200 struct sii8620 *ctx = bridge_to_sii8620(bridge); 2201 bool can_pack = ctx->devcap[MHL_DCAP_VID_LINK_MODE] & 2202 MHL_DCAP_VID_LINK_PPIXEL; 2203 - unsigned int max_pclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK : 2204 - MHL1_MAX_LCLK; 2205 - max_pclk /= can_pack ? 2 : 3; 2206 2207 - return (mode->clock > max_pclk) ? MODE_CLOCK_HIGH : MODE_OK; 2208 } 2209 2210 static bool sii8620_mode_fixup(struct drm_bridge *bridge, ··· 2238 struct drm_display_mode *adjusted_mode) 2239 { 2240 struct sii8620 *ctx = bridge_to_sii8620(bridge); 2241 - int max_lclk; 2242 - bool ret = true; 2243 2244 mutex_lock(&ctx->lock); 2245 2246 - max_lclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK : MHL1_MAX_LCLK; 2247 - if (max_lclk > 3 * adjusted_mode->clock) { 2248 - ctx->use_packed_pixel = 0; 2249 - goto end; 2250 - } 2251 - if ((ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL) && 2252 - max_lclk > 2 * adjusted_mode->clock) { 2253 - ctx->use_packed_pixel = 1; 2254 - goto end; 2255 - } 2256 - ret = false; 2257 - end: 2258 - if (ret) { 2259 - u8 vic = drm_match_cea_mode(adjusted_mode); 2260 2261 - if (!vic) { 2262 - union hdmi_infoframe frm; 2263 - u8 mhl_vic[] = { 0, 95, 94, 93, 98 }; 2264 - 2265 - /* FIXME: We need the connector here */ 2266 - drm_hdmi_vendor_infoframe_from_display_mode( 2267 - &frm.vendor.hdmi, NULL, adjusted_mode); 2268 - vic = frm.vendor.hdmi.vic; 2269 - if (vic >= ARRAY_SIZE(mhl_vic)) 2270 - vic = 0; 2271 - vic = mhl_vic[vic]; 2272 - } 2273 - ctx->video_code = vic; 2274 - ctx->pixel_clock = adjusted_mode->clock; 2275 - } 2276 mutex_unlock(&ctx->lock); 2277 - return ret; 2278 } 2279 2280 static const struct drm_bridge_funcs sii8620_bridge_funcs = {
··· 36 37 #define SII8620_BURST_BUF_LEN 288 38 #define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3) 39 + 40 + #define MHL1_MAX_PCLK 75000 41 + #define MHL1_MAX_PCLK_PP_MODE 150000 42 + #define MHL3_MAX_PCLK 200000 43 + #define MHL3_MAX_PCLK_PP_MODE 300000 44 45 enum sii8620_mode { 46 CM_DISCONNECTED, ··· 80 u8 devcap[MHL_DCAP_SIZE]; 81 u8 xdevcap[MHL_XDC_SIZE]; 82 u8 avif[HDMI_INFOFRAME_SIZE(AVI)]; 83 + bool feature_complete; 84 + bool devcap_read; 85 + bool sink_detected; 86 struct edid *edid; 87 unsigned int gen2_write_burst:1; 88 enum sii8620_mt_state mt_state; ··· 476 } 477 } 478 479 + static void sii8620_identify_sink(struct sii8620 *ctx) 480 { 481 static const char * const sink_str[] = { 482 [SINK_NONE] = "NONE", ··· 487 char sink_name[20]; 488 struct device *dev = ctx->dev; 489 490 + if (!ctx->sink_detected || !ctx->devcap_read) 491 return; 492 493 sii8620_fetch_edid(ctx); ··· 496 sii8620_mhl_disconnected(ctx); 497 return; 498 } 499 + sii8620_set_upstream_edid(ctx); 500 501 if (drm_detect_hdmi_monitor(ctx->edid)) 502 ctx->sink_type = SINK_HDMI; ··· 506 507 dev_info(dev, "detected sink(type: %s): %s\n", 508 sink_str[ctx->sink_type], sink_name); 509 } 510 511 static void sii8620_mr_devcap(struct sii8620 *ctx) ··· 570 dcap[MHL_DCAP_ADOPTER_ID_H], dcap[MHL_DCAP_ADOPTER_ID_L], 571 dcap[MHL_DCAP_DEVICE_ID_H], dcap[MHL_DCAP_DEVICE_ID_L]); 572 sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE); 573 + ctx->devcap_read = true; 574 + sii8620_identify_sink(ctx); 575 } 576 577 static void sii8620_mr_xdevcap(struct sii8620 *ctx) ··· 807 static void sii8620_fetch_edid(struct sii8620 *ctx) 808 { 809 u8 lm_ddc, ddc_cmd, int3, cbus; 810 + unsigned long timeout; 811 int fetched, i; 812 int edid_len = EDID_LENGTH; 813 u8 *edid; ··· 856 REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK 857 ); 858 859 + int3 = 0; 860 + timeout = jiffies + msecs_to_jiffies(200); 861 + for (;;) { 862 cbus = sii8620_readb(ctx, REG_CBUS_STATUS); 863 + if (~cbus & BIT_CBUS_STATUS_CBUS_CONNECTED) { 864 kfree(edid); 865 edid = NULL; 866 goto end; 867 } 868 + if (int3 & BIT_DDC_CMD_DONE) { 869 + if (sii8620_readb(ctx, REG_DDC_DOUT_CNT) 870 + >= FETCH_SIZE) 871 + break; 872 + } else { 873 + int3 = sii8620_readb(ctx, REG_INTR3); 874 + } 875 + if (time_is_before_jiffies(timeout)) { 876 + ctx->error = -ETIMEDOUT; 877 + dev_err(ctx->dev, "timeout during EDID read\n"); 878 + kfree(edid); 879 + edid = NULL; 880 + goto end; 881 + } 882 usleep_range(10, 20); 883 + } 884 885 sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE); 886 if (fetched + FETCH_SIZE == EDID_LENGTH) { ··· 971 ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); 972 if (ret) 973 return ret; 974 + 975 usleep_range(10000, 20000); 976 + ret = clk_prepare_enable(ctx->clk_xtal); 977 + if (ret) 978 + return ret; 979 + 980 + msleep(100); 981 + gpiod_set_value(ctx->gpio_reset, 0); 982 + msleep(100); 983 + 984 + return 0; 985 } 986 987 static int sii8620_hw_off(struct sii8620 *ctx) ··· 980 clk_disable_unprepare(ctx->clk_xtal); 981 gpiod_set_value(ctx->gpio_reset, 1); 982 return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); 983 } 984 985 static void sii8620_cbus_reset(struct sii8620 *ctx) ··· 1048 1049 static void sii8620_set_format(struct sii8620 *ctx) 1050 { 1051 if (sii8620_is_mhl3(ctx)) { 1052 sii8620_setbits(ctx, REG_M3_P0CTRL, 1053 BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED, 1054 ctx->use_packed_pixel ? ~0 : 0); 1055 } else { 1056 sii8620_write_seq_static(ctx, 1057 REG_VID_MODE, 0, 1058 REG_MHL_TOP_CTL, 1, ··· 1069 ); 1070 } 1071 1072 sii8620_write_seq(ctx, 1073 REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL), 1074 + REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL), 1075 ); 1076 } 1077 ··· 1216 int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3); 1217 int i; 1218 1219 + for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i) 1220 if (clk < clk_spec[i].max_clk) 1221 break; 1222 ··· 1534 ); 1535 } 1536 1537 + static void sii8620_hpd_unplugged(struct sii8620 *ctx) 1538 + { 1539 + sii8620_disable_hpd(ctx); 1540 + ctx->sink_type = SINK_NONE; 1541 + ctx->sink_detected = false; 1542 + ctx->feature_complete = false; 1543 + kfree(ctx->edid); 1544 + ctx->edid = NULL; 1545 + } 1546 + 1547 static void sii8620_disconnect(struct sii8620 *ctx) 1548 { 1549 sii8620_disable_gen2_write_burst(ctx); ··· 1561 REG_MHL_DP_CTL6, 0x2A, 1562 REG_MHL_DP_CTL7, 0x03 1563 ); 1564 + sii8620_hpd_unplugged(ctx); 1565 sii8620_write_seq_static(ctx, 1566 REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE, 1567 REG_MHL_COC_CTL1, 0x07, ··· 1609 memset(ctx->xstat, 0, sizeof(ctx->xstat)); 1610 memset(ctx->devcap, 0, sizeof(ctx->devcap)); 1611 memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap)); 1612 + ctx->devcap_read = false; 1613 ctx->cbus_status = 0; 1614 sii8620_mt_cleanup(ctx); 1615 } 1616 ··· 1703 sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), 1704 MHL_DST_LM_CLK_MODE_NORMAL 1705 | MHL_DST_LM_PATH_ENABLED); 1706 } else { 1707 sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), 1708 MHL_DST_LM_CLK_MODE_NORMAL); ··· 1722 sii8620_update_array(ctx->stat, st, MHL_DST_SIZE); 1723 sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE); 1724 1725 + if (ctx->stat[MHL_DST_CONNECTED_RDY] & st[MHL_DST_CONNECTED_RDY] & 1726 + MHL_DST_CONN_DCAP_RDY) { 1727 sii8620_status_dcap_ready(ctx); 1728 + 1729 + if (!sii8620_is_mhl3(ctx)) 1730 + sii8620_mt_read_devcap(ctx, false); 1731 + } 1732 1733 if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) 1734 sii8620_status_changed_path(ctx); ··· 1808 } 1809 if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_REQ) 1810 sii8620_send_features(ctx); 1811 + if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE) { 1812 + ctx->feature_complete = true; 1813 + if (ctx->edid) 1814 + sii8620_enable_hpd(ctx); 1815 + } 1816 } 1817 1818 static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx) ··· 1884 if (stat & BIT_CBUS_MSC_MR_WRITE_STAT) 1885 sii8620_msc_mr_write_stat(ctx); 1886 1887 + if (stat & BIT_CBUS_HPD_CHG) { 1888 + if (ctx->cbus_status & BIT_CBUS_STATUS_CBUS_HPD) { 1889 + ctx->sink_detected = true; 1890 + sii8620_identify_sink(ctx); 1891 + } else { 1892 + sii8620_hpd_unplugged(ctx); 1893 + } 1894 + } 1895 + 1896 if (stat & BIT_CBUS_MSC_MR_SET_INT) 1897 sii8620_msc_mr_set_int(ctx); 1898 ··· 1931 ctx->mt_state = MT_STATE_DONE; 1932 } 1933 1934 static void sii8620_irq_scdt(struct sii8620 *ctx) 1935 { 1936 u8 stat = sii8620_readb(ctx, REG_INTR5); ··· 1946 if (stat & BIT_INTR_SCDT_CHANGE) { 1947 u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3); 1948 1949 + if (cstat & BIT_TMDS_CSTAT_P3_SCDT) 1950 + sii8620_start_video(ctx); 1951 } 1952 1953 sii8620_write(ctx, REG_INTR5, stat); 1954 } 1955 1956 static void sii8620_got_xdevcap(struct sii8620 *ctx, int ret) ··· 2043 2044 if (stat & BIT_DDC_CMD_DONE) { 2045 sii8620_write(ctx, REG_INTR3_MASK, 0); 2046 + if (sii8620_is_mhl3(ctx) && !ctx->feature_complete) 2047 sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), 2048 MHL_INT_RC_FEAT_REQ); 2049 else 2050 + sii8620_enable_hpd(ctx); 2051 } 2052 sii8620_write(ctx, REG_INTR3, stat); 2053 } ··· 2074 { BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid }, 2075 { BIT_FAST_INTR_STAT_DDC, sii8620_irq_ddc }, 2076 { BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt }, 2077 }; 2078 struct sii8620 *ctx = data; 2079 u8 stats[LEN_FAST_INTR_STAT]; ··· 2112 dev_err(dev, "Error powering on, %d.\n", ret); 2113 return; 2114 } 2115 2116 sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver)); 2117 ret = sii8620_clear_error(ctx); ··· 2268 rc_unregister_device(ctx->rc_dev); 2269 } 2270 2271 + static int sii8620_is_packing_required(struct sii8620 *ctx, 2272 + const struct drm_display_mode *mode) 2273 + { 2274 + int max_pclk, max_pclk_pp_mode; 2275 + 2276 + if (sii8620_is_mhl3(ctx)) { 2277 + max_pclk = MHL3_MAX_PCLK; 2278 + max_pclk_pp_mode = MHL3_MAX_PCLK_PP_MODE; 2279 + } else { 2280 + max_pclk = MHL1_MAX_PCLK; 2281 + max_pclk_pp_mode = MHL1_MAX_PCLK_PP_MODE; 2282 + } 2283 + 2284 + if (mode->clock < max_pclk) 2285 + return 0; 2286 + else if (mode->clock < max_pclk_pp_mode) 2287 + return 1; 2288 + else 2289 + return -1; 2290 + } 2291 + 2292 static enum drm_mode_status sii8620_mode_valid(struct drm_bridge *bridge, 2293 const struct drm_display_mode *mode) 2294 { 2295 struct sii8620 *ctx = bridge_to_sii8620(bridge); 2296 + int pack_required = sii8620_is_packing_required(ctx, mode); 2297 bool can_pack = ctx->devcap[MHL_DCAP_VID_LINK_MODE] & 2298 MHL_DCAP_VID_LINK_PPIXEL; 2299 2300 + switch (pack_required) { 2301 + case 0: 2302 + return MODE_OK; 2303 + case 1: 2304 + return (can_pack) ? MODE_OK : MODE_CLOCK_HIGH; 2305 + default: 2306 + return MODE_CLOCK_HIGH; 2307 + } 2308 } 2309 2310 static bool sii8620_mode_fixup(struct drm_bridge *bridge, ··· 2286 struct drm_display_mode *adjusted_mode) 2287 { 2288 struct sii8620 *ctx = bridge_to_sii8620(bridge); 2289 2290 mutex_lock(&ctx->lock); 2291 2292 + ctx->use_packed_pixel = sii8620_is_packing_required(ctx, adjusted_mode); 2293 + ctx->video_code = drm_match_cea_mode(adjusted_mode); 2294 + ctx->pixel_clock = adjusted_mode->clock; 2295 2296 mutex_unlock(&ctx->lock); 2297 + 2298 + return true; 2299 } 2300 2301 static const struct drm_bridge_funcs sii8620_bridge_funcs = {
+7 -7
drivers/gpu/drm/drm_drv.c
··· 369 */ 370 void drm_dev_unplug(struct drm_device *dev) 371 { 372 - drm_dev_unregister(dev); 373 - 374 - mutex_lock(&drm_global_mutex); 375 - if (dev->open_count == 0) 376 - drm_dev_put(dev); 377 - mutex_unlock(&drm_global_mutex); 378 - 379 /* 380 * After synchronizing any critical read section is guaranteed to see 381 * the new value of ->unplugged, and any critical section which might ··· 377 */ 378 dev->unplugged = true; 379 synchronize_srcu(&drm_unplug_srcu); 380 } 381 EXPORT_SYMBOL(drm_dev_unplug); 382
··· 369 */ 370 void drm_dev_unplug(struct drm_device *dev) 371 { 372 /* 373 * After synchronizing any critical read section is guaranteed to see 374 * the new value of ->unplugged, and any critical section which might ··· 384 */ 385 dev->unplugged = true; 386 synchronize_srcu(&drm_unplug_srcu); 387 + 388 + drm_dev_unregister(dev); 389 + 390 + mutex_lock(&drm_global_mutex); 391 + if (dev->open_count == 0) 392 + drm_dev_put(dev); 393 + mutex_unlock(&drm_global_mutex); 394 } 395 EXPORT_SYMBOL(drm_dev_unplug); 396
+14 -7
drivers/gpu/drm/i915/i915_drv.h
··· 340 341 unsigned int bsd_engine; 342 343 - /* Client can have a maximum of 3 contexts banned before 344 - * it is denied of creating new contexts. As one context 345 - * ban needs 4 consecutive hangs, and more if there is 346 - * progress in between, this is a last resort stop gap measure 347 - * to limit the badly behaving clients access to gpu. 348 */ 349 - #define I915_MAX_CLIENT_CONTEXT_BANS 3 350 - atomic_t context_bans; 351 }; 352 353 /* Interface history:
··· 340 341 unsigned int bsd_engine; 342 343 + /* 344 + * Every context ban increments per client ban score. Also 345 + * hangs in short succession increments ban score. If ban threshold 346 + * is reached, client is considered banned and submitting more work 347 + * will fail. This is a stop gap measure to limit the badly behaving 348 + * clients access to gpu. Note that unbannable contexts never increment 349 + * the client ban score. 350 */ 351 + #define I915_CLIENT_SCORE_HANG_FAST 1 352 + #define I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ) 353 + #define I915_CLIENT_SCORE_CONTEXT_BAN 3 354 + #define I915_CLIENT_SCORE_BANNED 9 355 + /** ban_score: Accumulated score of all ctx bans and fast hangs. */ 356 + atomic_t ban_score; 357 + unsigned long hang_timestamp; 358 }; 359 360 /* Interface history:
+40 -17
drivers/gpu/drm/i915/i915_gem.c
··· 2933 return 0; 2934 } 2935 2936 static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) 2937 { 2938 - bool banned; 2939 2940 atomic_inc(&ctx->guilty_count); 2941 2942 - banned = false; 2943 - if (i915_gem_context_is_bannable(ctx)) { 2944 - unsigned int score; 2945 2946 - score = atomic_add_return(CONTEXT_SCORE_GUILTY, 2947 - &ctx->ban_score); 2948 - banned = score >= CONTEXT_SCORE_BAN_THRESHOLD; 2949 2950 - DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n", 2951 - ctx->name, score, yesno(banned)); 2952 - } 2953 - if (!banned) 2954 return; 2955 2956 - i915_gem_context_set_banned(ctx); 2957 - if (!IS_ERR_OR_NULL(ctx->file_priv)) { 2958 - atomic_inc(&ctx->file_priv->context_bans); 2959 - DRM_DEBUG_DRIVER("client %s has had %d context banned\n", 2960 - ctx->name, atomic_read(&ctx->file_priv->context_bans)); 2961 - } 2962 } 2963 2964 static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx) ··· 5758 INIT_LIST_HEAD(&file_priv->mm.request_list); 5759 5760 file_priv->bsd_engine = -1; 5761 5762 ret = i915_gem_context_open(i915, file); 5763 if (ret)
··· 2933 return 0; 2934 } 2935 2936 + static void i915_gem_client_mark_guilty(struct drm_i915_file_private *file_priv, 2937 + const struct i915_gem_context *ctx) 2938 + { 2939 + unsigned int score; 2940 + unsigned long prev_hang; 2941 + 2942 + if (i915_gem_context_is_banned(ctx)) 2943 + score = I915_CLIENT_SCORE_CONTEXT_BAN; 2944 + else 2945 + score = 0; 2946 + 2947 + prev_hang = xchg(&file_priv->hang_timestamp, jiffies); 2948 + if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES)) 2949 + score += I915_CLIENT_SCORE_HANG_FAST; 2950 + 2951 + if (score) { 2952 + atomic_add(score, &file_priv->ban_score); 2953 + 2954 + DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n", 2955 + ctx->name, score, 2956 + atomic_read(&file_priv->ban_score)); 2957 + } 2958 + } 2959 + 2960 static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) 2961 { 2962 + unsigned int score; 2963 + bool banned, bannable; 2964 2965 atomic_inc(&ctx->guilty_count); 2966 2967 + bannable = i915_gem_context_is_bannable(ctx); 2968 + score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score); 2969 + banned = score >= CONTEXT_SCORE_BAN_THRESHOLD; 2970 2971 + DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, ban %s\n", 2972 + ctx->name, atomic_read(&ctx->guilty_count), 2973 + score, yesno(banned && bannable)); 2974 2975 + /* Cool contexts don't accumulate client ban score */ 2976 + if (!bannable) 2977 return; 2978 2979 + if (banned) 2980 + i915_gem_context_set_banned(ctx); 2981 + 2982 + if (!IS_ERR_OR_NULL(ctx->file_priv)) 2983 + i915_gem_client_mark_guilty(ctx->file_priv, ctx); 2984 } 2985 2986 static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx) ··· 5736 INIT_LIST_HEAD(&file_priv->mm.request_list); 5737 5738 file_priv->bsd_engine = -1; 5739 + file_priv->hang_timestamp = jiffies; 5740 5741 ret = i915_gem_context_open(i915, file); 5742 if (ret)
+1 -1
drivers/gpu/drm/i915/i915_gem_context.c
··· 652 653 static bool client_is_banned(struct drm_i915_file_private *file_priv) 654 { 655 - return atomic_read(&file_priv->context_bans) > I915_MAX_CLIENT_CONTEXT_BANS; 656 } 657 658 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
··· 652 653 static bool client_is_banned(struct drm_i915_file_private *file_priv) 654 { 655 + return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; 656 } 657 658 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
+27 -22
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 489 } 490 491 static int 492 - eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma) 493 { 494 struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; 495 int err; ··· 523 eb->vma[i] = vma; 524 eb->flags[i] = entry->flags; 525 vma->exec_flags = &eb->flags[i]; 526 527 err = 0; 528 if (eb_pin_vma(eb, entry, vma)) { ··· 736 { 737 struct radix_tree_root *handles_vma = &eb->ctx->handles_vma; 738 struct drm_i915_gem_object *obj; 739 - unsigned int i; 740 int err; 741 742 if (unlikely(i915_gem_context_is_closed(eb->ctx))) ··· 747 748 INIT_LIST_HEAD(&eb->relocs); 749 INIT_LIST_HEAD(&eb->unbound); 750 751 for (i = 0; i < eb->buffer_count; i++) { 752 u32 handle = eb->exec[i].handle; ··· 792 lut->handle = handle; 793 794 add_vma: 795 - err = eb_add_vma(eb, i, vma); 796 if (unlikely(err)) 797 goto err_vma; 798 799 GEM_BUG_ON(vma != eb->vma[i]); 800 GEM_BUG_ON(vma->exec_flags != &eb->flags[i]); 801 } 802 - 803 - /* take note of the batch buffer before we might reorder the lists */ 804 - i = eb_batch_index(eb); 805 - eb->batch = eb->vma[i]; 806 - GEM_BUG_ON(eb->batch->exec_flags != &eb->flags[i]); 807 - 808 - /* 809 - * SNA is doing fancy tricks with compressing batch buffers, which leads 810 - * to negative relocation deltas. Usually that works out ok since the 811 - * relocate address is still positive, except when the batch is placed 812 - * very low in the GTT. Ensure this doesn't happen. 813 - * 814 - * Note that actual hangs have only been observed on gen7, but for 815 - * paranoia do it everywhere. 816 - */ 817 - if (!(eb->flags[i] & EXEC_OBJECT_PINNED)) 818 - eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS; 819 - if (eb->reloc_cache.has_fence) 820 - eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE; 821 822 eb->args->flags |= __EXEC_VALIDATED; 823 return eb_reserve(eb);
··· 489 } 490 491 static int 492 + eb_add_vma(struct i915_execbuffer *eb, 493 + unsigned int i, unsigned batch_idx, 494 + struct i915_vma *vma) 495 { 496 struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; 497 int err; ··· 521 eb->vma[i] = vma; 522 eb->flags[i] = entry->flags; 523 vma->exec_flags = &eb->flags[i]; 524 + 525 + /* 526 + * SNA is doing fancy tricks with compressing batch buffers, which leads 527 + * to negative relocation deltas. Usually that works out ok since the 528 + * relocate address is still positive, except when the batch is placed 529 + * very low in the GTT. Ensure this doesn't happen. 530 + * 531 + * Note that actual hangs have only been observed on gen7, but for 532 + * paranoia do it everywhere. 533 + */ 534 + if (i == batch_idx) { 535 + if (!(eb->flags[i] & EXEC_OBJECT_PINNED)) 536 + eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS; 537 + if (eb->reloc_cache.has_fence) 538 + eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE; 539 + 540 + eb->batch = vma; 541 + } 542 543 err = 0; 544 if (eb_pin_vma(eb, entry, vma)) { ··· 716 { 717 struct radix_tree_root *handles_vma = &eb->ctx->handles_vma; 718 struct drm_i915_gem_object *obj; 719 + unsigned int i, batch; 720 int err; 721 722 if (unlikely(i915_gem_context_is_closed(eb->ctx))) ··· 727 728 INIT_LIST_HEAD(&eb->relocs); 729 INIT_LIST_HEAD(&eb->unbound); 730 + 731 + batch = eb_batch_index(eb); 732 733 for (i = 0; i < eb->buffer_count; i++) { 734 u32 handle = eb->exec[i].handle; ··· 770 lut->handle = handle; 771 772 add_vma: 773 + err = eb_add_vma(eb, i, batch, vma); 774 if (unlikely(err)) 775 goto err_vma; 776 777 GEM_BUG_ON(vma != eb->vma[i]); 778 GEM_BUG_ON(vma->exec_flags != &eb->flags[i]); 779 + GEM_BUG_ON(drm_mm_node_allocated(&vma->node) && 780 + eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i])); 781 } 782 783 eb->args->flags |= __EXEC_VALIDATED; 784 return eb_reserve(eb);
+10 -2
drivers/gpu/drm/i915/i915_irq.c
··· 1893 1894 /* 1895 * Clear the PIPE*STAT regs before the IIR 1896 */ 1897 - if (pipe_stats[pipe]) 1898 - I915_WRITE(reg, enable_mask | pipe_stats[pipe]); 1899 } 1900 spin_unlock(&dev_priv->irq_lock); 1901 }
··· 1893 1894 /* 1895 * Clear the PIPE*STAT regs before the IIR 1896 + * 1897 + * Toggle the enable bits to make sure we get an 1898 + * edge in the ISR pipe event bit if we don't clear 1899 + * all the enabled status bits. Otherwise the edge 1900 + * triggered IIR on i965/g4x wouldn't notice that 1901 + * an interrupt is still pending. 1902 */ 1903 + if (pipe_stats[pipe]) { 1904 + I915_WRITE(reg, pipe_stats[pipe]); 1905 + I915_WRITE(reg, enable_mask); 1906 + } 1907 } 1908 spin_unlock(&dev_priv->irq_lock); 1909 }
+5
drivers/gpu/drm/i915/i915_reg.h
··· 2425 #define _3D_CHICKEN _MMIO(0x2084) 2426 #define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10) 2427 #define _3D_CHICKEN2 _MMIO(0x208c) 2428 /* Disables pipelining of read flushes past the SF-WIZ interface. 2429 * Required on all Ironlake steppings according to the B-Spec, but the 2430 * particular danger of not doing so is not specified. 2431 */ 2432 # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) 2433 #define _3D_CHICKEN3 _MMIO(0x2090) 2434 #define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) 2435 #define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5) 2436 #define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
··· 2425 #define _3D_CHICKEN _MMIO(0x2084) 2426 #define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10) 2427 #define _3D_CHICKEN2 _MMIO(0x208c) 2428 + 2429 + #define FF_SLICE_CHICKEN _MMIO(0x2088) 2430 + #define FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX (1 << 1) 2431 + 2432 /* Disables pipelining of read flushes past the SF-WIZ interface. 2433 * Required on all Ironlake steppings according to the B-Spec, but the 2434 * particular danger of not doing so is not specified. 2435 */ 2436 # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) 2437 #define _3D_CHICKEN3 _MMIO(0x2090) 2438 + #define _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX (1 << 12) 2439 #define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) 2440 #define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5) 2441 #define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
+20
drivers/gpu/drm/i915/intel_crt.c
··· 304 int max_dotclk = dev_priv->max_dotclk_freq; 305 int max_clock; 306 307 if (mode->clock < 25000) 308 return MODE_CLOCK_LOW; 309 ··· 340 struct intel_crtc_state *pipe_config, 341 struct drm_connector_state *conn_state) 342 { 343 return true; 344 } 345 ··· 353 struct intel_crtc_state *pipe_config, 354 struct drm_connector_state *conn_state) 355 { 356 pipe_config->has_pch_encoder = true; 357 358 return true; ··· 369 struct drm_connector_state *conn_state) 370 { 371 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 372 373 pipe_config->has_pch_encoder = true; 374
··· 304 int max_dotclk = dev_priv->max_dotclk_freq; 305 int max_clock; 306 307 + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 308 + return MODE_NO_DBLESCAN; 309 + 310 if (mode->clock < 25000) 311 return MODE_CLOCK_LOW; 312 ··· 337 struct intel_crtc_state *pipe_config, 338 struct drm_connector_state *conn_state) 339 { 340 + struct drm_display_mode *adjusted_mode = 341 + &pipe_config->base.adjusted_mode; 342 + 343 + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 344 + return false; 345 + 346 return true; 347 } 348 ··· 344 struct intel_crtc_state *pipe_config, 345 struct drm_connector_state *conn_state) 346 { 347 + struct drm_display_mode *adjusted_mode = 348 + &pipe_config->base.adjusted_mode; 349 + 350 + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 351 + return false; 352 + 353 pipe_config->has_pch_encoder = true; 354 355 return true; ··· 354 struct drm_connector_state *conn_state) 355 { 356 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 357 + struct drm_display_mode *adjusted_mode = 358 + &pipe_config->base.adjusted_mode; 359 + 360 + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 361 + return false; 362 363 pipe_config->has_pch_encoder = true; 364
+13 -3
drivers/gpu/drm/i915/intel_display.c
··· 14469 intel_mode_valid(struct drm_device *dev, 14470 const struct drm_display_mode *mode) 14471 { 14472 if (mode->vscan > 1) 14473 return MODE_NO_VSCAN; 14474 - 14475 - if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 14476 - return MODE_NO_DBLESCAN; 14477 14478 if (mode->flags & DRM_MODE_FLAG_HSKEW) 14479 return MODE_H_ILLEGAL;
··· 14469 intel_mode_valid(struct drm_device *dev, 14470 const struct drm_display_mode *mode) 14471 { 14472 + /* 14473 + * Can't reject DBLSCAN here because Xorg ddxen can add piles 14474 + * of DBLSCAN modes to the output's mode list when they detect 14475 + * the scaling mode property on the connector. And they don't 14476 + * ask the kernel to validate those modes in any way until 14477 + * modeset time at which point the client gets a protocol error. 14478 + * So in order to not upset those clients we silently ignore the 14479 + * DBLSCAN flag on such connectors. For other connectors we will 14480 + * reject modes with the DBLSCAN flag in encoder->compute_config(). 14481 + * And we always reject DBLSCAN modes in connector->mode_valid() 14482 + * as we never want such modes on the connector's mode list. 14483 + */ 14484 + 14485 if (mode->vscan > 1) 14486 return MODE_NO_VSCAN; 14487 14488 if (mode->flags & DRM_MODE_FLAG_HSKEW) 14489 return MODE_H_ILLEGAL;
+16 -18
drivers/gpu/drm/i915/intel_dp.c
··· 420 int max_rate, mode_rate, max_lanes, max_link_clock; 421 int max_dotclk; 422 423 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp); 424 425 if (intel_dp_is_edp(intel_dp) && fixed_mode) { ··· 1865 conn_state->scaling_mode); 1866 } 1867 1868 - if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1869 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 1870 return false; 1871 ··· 2790 const struct drm_connector_state *old_conn_state) 2791 { 2792 intel_disable_dp(encoder, old_crtc_state, old_conn_state); 2793 - 2794 - /* disable the port before the pipe on g4x */ 2795 - intel_dp_link_down(encoder, old_crtc_state); 2796 - } 2797 - 2798 - static void ilk_disable_dp(struct intel_encoder *encoder, 2799 - const struct intel_crtc_state *old_crtc_state, 2800 - const struct drm_connector_state *old_conn_state) 2801 - { 2802 - intel_disable_dp(encoder, old_crtc_state, old_conn_state); 2803 } 2804 2805 static void vlv_disable_dp(struct intel_encoder *encoder, ··· 2803 intel_disable_dp(encoder, old_crtc_state, old_conn_state); 2804 } 2805 2806 - static void ilk_post_disable_dp(struct intel_encoder *encoder, 2807 const struct intel_crtc_state *old_crtc_state, 2808 const struct drm_connector_state *old_conn_state) 2809 { 2810 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2811 enum port port = encoder->port; 2812 2813 intel_dp_link_down(encoder, old_crtc_state); 2814 2815 /* Only ilk+ has port A */ ··· 6339 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 6340 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 6341 6342 - if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 6343 connector->interlace_allowed = true; 6344 connector->doublescan_allowed = 0; 6345 ··· 6438 intel_encoder->enable = vlv_enable_dp; 6439 intel_encoder->disable = vlv_disable_dp; 6440 intel_encoder->post_disable = vlv_post_disable_dp; 6441 - } else if (INTEL_GEN(dev_priv) >= 5) { 6442 - intel_encoder->pre_enable = g4x_pre_enable_dp; 6443 - intel_encoder->enable = g4x_enable_dp; 6444 - intel_encoder->disable = ilk_disable_dp; 6445 - intel_encoder->post_disable = ilk_post_disable_dp; 6446 } else { 6447 intel_encoder->pre_enable = g4x_pre_enable_dp; 6448 intel_encoder->enable = g4x_enable_dp; 6449 intel_encoder->disable = g4x_disable_dp; 6450 } 6451 6452 intel_dig_port->dp.output_reg = output_reg;
··· 420 int max_rate, mode_rate, max_lanes, max_link_clock; 421 int max_dotclk; 422 423 + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 424 + return MODE_NO_DBLESCAN; 425 + 426 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp); 427 428 if (intel_dp_is_edp(intel_dp) && fixed_mode) { ··· 1862 conn_state->scaling_mode); 1863 } 1864 1865 + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 1866 + return false; 1867 + 1868 + if (HAS_GMCH_DISPLAY(dev_priv) && 1869 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 1870 return false; 1871 ··· 2784 const struct drm_connector_state *old_conn_state) 2785 { 2786 intel_disable_dp(encoder, old_crtc_state, old_conn_state); 2787 } 2788 2789 static void vlv_disable_dp(struct intel_encoder *encoder, ··· 2807 intel_disable_dp(encoder, old_crtc_state, old_conn_state); 2808 } 2809 2810 + static void g4x_post_disable_dp(struct intel_encoder *encoder, 2811 const struct intel_crtc_state *old_crtc_state, 2812 const struct drm_connector_state *old_conn_state) 2813 { 2814 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2815 enum port port = encoder->port; 2816 2817 + /* 2818 + * Bspec does not list a specific disable sequence for g4x DP. 2819 + * Follow the ilk+ sequence (disable pipe before the port) for 2820 + * g4x DP as it does not suffer from underruns like the normal 2821 + * g4x modeset sequence (disable pipe after the port). 2822 + */ 2823 intel_dp_link_down(encoder, old_crtc_state); 2824 2825 /* Only ilk+ has port A */ ··· 6337 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 6338 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 6339 6340 + if (!HAS_GMCH_DISPLAY(dev_priv)) 6341 connector->interlace_allowed = true; 6342 connector->doublescan_allowed = 0; 6343 ··· 6436 intel_encoder->enable = vlv_enable_dp; 6437 intel_encoder->disable = vlv_disable_dp; 6438 intel_encoder->post_disable = vlv_post_disable_dp; 6439 } else { 6440 intel_encoder->pre_enable = g4x_pre_enable_dp; 6441 intel_encoder->enable = g4x_enable_dp; 6442 intel_encoder->disable = g4x_disable_dp; 6443 + intel_encoder->post_disable = g4x_post_disable_dp; 6444 } 6445 6446 intel_dig_port->dp.output_reg = output_reg;
+6
drivers/gpu/drm/i915/intel_dp_mst.c
··· 48 bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc, 49 DP_DPCD_QUIRK_LIMITED_M_N); 50 51 pipe_config->has_pch_encoder = false; 52 bpp = 24; 53 if (intel_dp->compliance.test_data.bpc) { ··· 368 369 if (!intel_dp) 370 return MODE_ERROR; 371 372 max_link_clock = intel_dp_max_link_rate(intel_dp); 373 max_lanes = intel_dp_max_lane_count(intel_dp);
··· 48 bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc, 49 DP_DPCD_QUIRK_LIMITED_M_N); 50 51 + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 52 + return false; 53 + 54 pipe_config->has_pch_encoder = false; 55 bpp = 24; 56 if (intel_dp->compliance.test_data.bpc) { ··· 365 366 if (!intel_dp) 367 return MODE_ERROR; 368 + 369 + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 370 + return MODE_NO_DBLESCAN; 371 372 max_link_clock = intel_dp_max_link_rate(intel_dp); 373 max_lanes = intel_dp_max_lane_count(intel_dp);
+6
drivers/gpu/drm/i915/intel_dsi.c
··· 326 conn_state->scaling_mode); 327 } 328 329 /* DSI uses short packets for sync events, so clear mode flags for DSI */ 330 adjusted_mode->flags = 0; 331 ··· 1268 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; 1269 1270 DRM_DEBUG_KMS("\n"); 1271 1272 if (fixed_mode) { 1273 if (mode->hdisplay > fixed_mode->hdisplay)
··· 326 conn_state->scaling_mode); 327 } 328 329 + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 330 + return false; 331 + 332 /* DSI uses short packets for sync events, so clear mode flags for DSI */ 333 adjusted_mode->flags = 0; 334 ··· 1265 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; 1266 1267 DRM_DEBUG_KMS("\n"); 1268 + 1269 + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 1270 + return MODE_NO_DBLESCAN; 1271 1272 if (fixed_mode) { 1273 if (mode->hdisplay > fixed_mode->hdisplay)
+6
drivers/gpu/drm/i915/intel_dvo.c
··· 219 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; 220 int target_clock = mode->clock; 221 222 /* XXX: Validate clock range */ 223 224 if (fixed_mode) { ··· 256 */ 257 if (fixed_mode) 258 intel_fixed_panel_mode(fixed_mode, adjusted_mode); 259 260 return true; 261 }
··· 219 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; 220 int target_clock = mode->clock; 221 222 + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 223 + return MODE_NO_DBLESCAN; 224 + 225 /* XXX: Validate clock range */ 226 227 if (fixed_mode) { ··· 253 */ 254 if (fixed_mode) 255 intel_fixed_panel_mode(fixed_mode, adjusted_mode); 256 + 257 + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 258 + return false; 259 260 return true; 261 }
+6
drivers/gpu/drm/i915/intel_hdmi.c
··· 1557 bool force_dvi = 1558 READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI; 1559 1560 clock = mode->clock; 1561 1562 if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING) ··· 1679 int clock_12bpc = clock_8bpc * 3 / 2; 1680 int desired_bpp; 1681 bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI; 1682 1683 pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink; 1684
··· 1557 bool force_dvi = 1558 READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI; 1559 1560 + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 1561 + return MODE_NO_DBLESCAN; 1562 + 1563 clock = mode->clock; 1564 1565 if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING) ··· 1676 int clock_12bpc = clock_8bpc * 3 / 2; 1677 int desired_bpp; 1678 bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI; 1679 + 1680 + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 1681 + return false; 1682 1683 pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink; 1684
+13 -5
drivers/gpu/drm/i915/intel_lrc.c
··· 1545 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */ 1546 batch = gen8_emit_flush_coherentl3_wa(engine, batch); 1547 1548 /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */ 1549 - *batch++ = MI_LOAD_REGISTER_IMM(1); 1550 *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2); 1551 *batch++ = _MASKED_BIT_DISABLE( 1552 GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE); 1553 *batch++ = MI_NOOP; 1554 1555 /* WaClearSlmSpaceAtContextSwitch:kbl */ ··· 2651 context_size += LRC_HEADER_PAGES * PAGE_SIZE; 2652 2653 ctx_obj = i915_gem_object_create(ctx->i915, context_size); 2654 - if (IS_ERR(ctx_obj)) { 2655 - ret = PTR_ERR(ctx_obj); 2656 - goto error_deref_obj; 2657 - } 2658 2659 vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL); 2660 if (IS_ERR(vma)) {
··· 1545 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */ 1546 batch = gen8_emit_flush_coherentl3_wa(engine, batch); 1547 1548 + *batch++ = MI_LOAD_REGISTER_IMM(3); 1549 + 1550 /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */ 1551 *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2); 1552 *batch++ = _MASKED_BIT_DISABLE( 1553 GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE); 1554 + 1555 + /* BSpec: 11391 */ 1556 + *batch++ = i915_mmio_reg_offset(FF_SLICE_CHICKEN); 1557 + *batch++ = _MASKED_BIT_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX); 1558 + 1559 + /* BSpec: 11299 */ 1560 + *batch++ = i915_mmio_reg_offset(_3D_CHICKEN3); 1561 + *batch++ = _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX); 1562 + 1563 *batch++ = MI_NOOP; 1564 1565 /* WaClearSlmSpaceAtContextSwitch:kbl */ ··· 2641 context_size += LRC_HEADER_PAGES * PAGE_SIZE; 2642 2643 ctx_obj = i915_gem_object_create(ctx->i915, context_size); 2644 + if (IS_ERR(ctx_obj)) 2645 + return PTR_ERR(ctx_obj); 2646 2647 vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL); 2648 if (IS_ERR(vma)) {
+5
drivers/gpu/drm/i915/intel_lvds.c
··· 380 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 381 int max_pixclk = to_i915(connector->dev)->max_dotclk_freq; 382 383 if (mode->hdisplay > fixed_mode->hdisplay) 384 return MODE_PANEL; 385 if (mode->vdisplay > fixed_mode->vdisplay) ··· 430 */ 431 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 432 adjusted_mode); 433 434 if (HAS_PCH_SPLIT(dev_priv)) { 435 pipe_config->has_pch_encoder = true;
··· 380 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 381 int max_pixclk = to_i915(connector->dev)->max_dotclk_freq; 382 383 + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 384 + return MODE_NO_DBLESCAN; 385 if (mode->hdisplay > fixed_mode->hdisplay) 386 return MODE_PANEL; 387 if (mode->vdisplay > fixed_mode->vdisplay) ··· 428 */ 429 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 430 adjusted_mode); 431 + 432 + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 433 + return false; 434 435 if (HAS_PCH_SPLIT(dev_priv)) { 436 pipe_config->has_pch_encoder = true;
+6
drivers/gpu/drm/i915/intel_sdvo.c
··· 1160 adjusted_mode); 1161 } 1162 1163 /* 1164 * Make the CRTC code factor in the SDVO pixel multiplier. The 1165 * SDVO device will factor out the multiplier during mode_set. ··· 1623 { 1624 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); 1625 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; 1626 1627 if (intel_sdvo->pixel_clock_min > mode->clock) 1628 return MODE_CLOCK_LOW;
··· 1160 adjusted_mode); 1161 } 1162 1163 + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 1164 + return false; 1165 + 1166 /* 1167 * Make the CRTC code factor in the SDVO pixel multiplier. The 1168 * SDVO device will factor out the multiplier during mode_set. ··· 1620 { 1621 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); 1622 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; 1623 + 1624 + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 1625 + return MODE_NO_DBLESCAN; 1626 1627 if (intel_sdvo->pixel_clock_min > mode->clock) 1628 return MODE_CLOCK_LOW;
+10 -2
drivers/gpu/drm/i915/intel_tv.c
··· 850 const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state); 851 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; 852 853 if (mode->clock > max_dotclk) 854 return MODE_CLOCK_HIGH; 855 ··· 880 struct drm_connector_state *conn_state) 881 { 882 const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state); 883 884 if (!tv_mode) 885 return false; 886 887 - pipe_config->base.adjusted_mode.crtc_clock = tv_mode->clock; 888 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); 889 pipe_config->pipe_bpp = 8*3; 890 891 /* TV has it's own notion of sync and other mode flags, so clear them. */ 892 - pipe_config->base.adjusted_mode.flags = 0; 893 894 /* 895 * FIXME: We don't check whether the input mode is actually what we want
··· 850 const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state); 851 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; 852 853 + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 854 + return MODE_NO_DBLESCAN; 855 + 856 if (mode->clock > max_dotclk) 857 return MODE_CLOCK_HIGH; 858 ··· 877 struct drm_connector_state *conn_state) 878 { 879 const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state); 880 + struct drm_display_mode *adjusted_mode = 881 + &pipe_config->base.adjusted_mode; 882 883 if (!tv_mode) 884 return false; 885 886 + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 887 + return false; 888 + 889 + adjusted_mode->crtc_clock = tv_mode->clock; 890 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); 891 pipe_config->pipe_bpp = 8*3; 892 893 /* TV has it's own notion of sync and other mode flags, so clear them. */ 894 + adjusted_mode->flags = 0; 895 896 /* 897 * FIXME: We don't check whether the input mode is actually what we want
+1 -1
drivers/gpu/drm/nouveau/dispnv50/curs507a.c
··· 132 133 nvif_object_map(&wndw->wimm.base.user, NULL, 0); 134 wndw->immd = func; 135 - wndw->ctxdma.parent = &disp->core->chan.base.user; 136 return 0; 137 } 138
··· 132 133 nvif_object_map(&wndw->wimm.base.user, NULL, 0); 134 wndw->immd = func; 135 + wndw->ctxdma.parent = NULL; 136 return 0; 137 } 138
+8 -5
drivers/gpu/drm/nouveau/dispnv50/wndw.c
··· 444 if (ret) 445 return ret; 446 447 - ctxdma = nv50_wndw_ctxdma_new(wndw, fb); 448 - if (IS_ERR(ctxdma)) { 449 - nouveau_bo_unpin(fb->nvbo); 450 - return PTR_ERR(ctxdma); 451 } 452 453 asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv); 454 - asyw->image.handle[0] = ctxdma->object.handle; 455 asyw->image.offset[0] = fb->nvbo->bo.offset; 456 457 if (wndw->func->prepare) {
··· 444 if (ret) 445 return ret; 446 447 + if (wndw->ctxdma.parent) { 448 + ctxdma = nv50_wndw_ctxdma_new(wndw, fb); 449 + if (IS_ERR(ctxdma)) { 450 + nouveau_bo_unpin(fb->nvbo); 451 + return PTR_ERR(ctxdma); 452 + } 453 + 454 + asyw->image.handle[0] = ctxdma->object.handle; 455 } 456 457 asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv); 458 asyw->image.offset[0] = fb->nvbo->bo.offset; 459 460 if (wndw->func->prepare) {
+5 -2
drivers/gpu/drm/qxl/qxl_display.c
··· 623 struct qxl_cursor_cmd *cmd; 624 struct qxl_cursor *cursor; 625 struct drm_gem_object *obj; 626 - struct qxl_bo *cursor_bo = NULL, *user_bo = NULL; 627 int ret; 628 void *user_ptr; 629 int size = 64*64*4; ··· 677 cursor_bo, 0); 678 cmd->type = QXL_CURSOR_SET; 679 680 - qxl_bo_unref(&qcrtc->cursor_bo); 681 qcrtc->cursor_bo = cursor_bo; 682 cursor_bo = NULL; 683 } else { ··· 696 qxl_release_unmap(qdev, release, &cmd->release_info); 697 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 698 qxl_release_fence_buffer_objects(release); 699 700 qxl_bo_unref(&cursor_bo); 701
··· 623 struct qxl_cursor_cmd *cmd; 624 struct qxl_cursor *cursor; 625 struct drm_gem_object *obj; 626 + struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL; 627 int ret; 628 void *user_ptr; 629 int size = 64*64*4; ··· 677 cursor_bo, 0); 678 cmd->type = QXL_CURSOR_SET; 679 680 + old_cursor_bo = qcrtc->cursor_bo; 681 qcrtc->cursor_bo = cursor_bo; 682 cursor_bo = NULL; 683 } else { ··· 696 qxl_release_unmap(qdev, release, &cmd->release_info); 697 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 698 qxl_release_fence_buffer_objects(release); 699 + 700 + if (old_cursor_bo) 701 + qxl_bo_unref(&old_cursor_bo); 702 703 qxl_bo_unref(&cursor_bo); 704
-25
drivers/gpu/drm/sun4i/sun4i_tcon.c
··· 17 #include <drm/drm_encoder.h> 18 #include <drm/drm_modes.h> 19 #include <drm/drm_of.h> 20 - #include <drm/drm_panel.h> 21 22 #include <uapi/drm/drm_mode.h> 23 ··· 417 static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, 418 const struct drm_display_mode *mode) 419 { 420 - struct drm_panel *panel = tcon->panel; 421 - struct drm_connector *connector = panel->connector; 422 - struct drm_display_info display_info = connector->display_info; 423 unsigned int bp, hsync, vsync; 424 u8 clk_delay; 425 u32 val = 0; ··· 473 474 if (mode->flags & DRM_MODE_FLAG_PVSYNC) 475 val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE; 476 - 477 - /* 478 - * On A20 and similar SoCs, the only way to achieve Positive Edge 479 - * (Rising Edge), is setting dclk clock phase to 2/3(240°). 480 - * By default TCON works in Negative Edge(Falling Edge), 481 - * this is why phase is set to 0 in that case. 482 - * Unfortunately there's no way to logically invert dclk through 483 - * IO_POL register. 484 - * The only acceptable way to work, triple checked with scope, 485 - * is using clock phase set to 0° for Negative Edge and set to 240° 486 - * for Positive Edge. 487 - * On A33 and similar SoCs there would be a 90° phase option, 488 - * but it divides also dclk by 2. 489 - * Following code is a way to avoid quirks all around TCON 490 - * and DOTCLOCK drivers. 491 - */ 492 - if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE) 493 - clk_set_phase(tcon->dclk, 240); 494 - 495 - if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE) 496 - clk_set_phase(tcon->dclk, 0); 497 498 regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG, 499 SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE,
··· 17 #include <drm/drm_encoder.h> 18 #include <drm/drm_modes.h> 19 #include <drm/drm_of.h> 20 21 #include <uapi/drm/drm_mode.h> 22 ··· 418 static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, 419 const struct drm_display_mode *mode) 420 { 421 unsigned int bp, hsync, vsync; 422 u8 clk_delay; 423 u32 val = 0; ··· 477 478 if (mode->flags & DRM_MODE_FLAG_PVSYNC) 479 val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE; 480 481 regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG, 482 SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE,