Merge tag 'drm-fixes-2018-06-22' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
"Just run of the mill fixes,

core:
- regression fix in device unplug

qxl:
- regression fix for might sleep in cursor handling

nouveau:
- regression fix in multi-screen cursor handling

amdgpu:
- switch off DC by default on Kaveri and older
- some minor fixes

i915:
- some GEM regression fixes
- doublescan mode fixes

sun4i:
- revert fix for a regression

sii8620 bridge:
- misc fixes"

* tag 'drm-fixes-2018-06-22' of git://anongit.freedesktop.org/drm/drm: (28 commits)
drm/bridge/sii8620: fix display of packed pixel modes in MHL2
drm/amdgpu: Make amdgpu_vram_mgr_bo_invisible_size always accurate
drm/amdgpu: Refactor amdgpu_vram_mgr_bo_invisible_size helper
drm/amdgpu: Update pin_size values before unpinning BO
drm/amdgpu:All UVD instances share one idle_work handle
drm/amdgpu: Don't default to DC support for Kaveri and older
drm/amdgpu: Use kvmalloc_array for allocating VRAM manager nodes array
drm/amd/pp: Fix uninitialized variable
drm/i915: Enable provoking vertex fix on Gen9 systems.
drm/i915: Fix context ban and hang accounting for client
drm/i915: Turn off g4x DP port in .post_disable()
drm/i915: Disallow interlaced modes on g4x DP outputs
drm/i915: Fix PIPESTAT irq ack on i965/g4x
drm/i915: Allow DBLSCAN user modes with eDP/LVDS/DSI
drm/i915/execlists: Avoid putting the error pointer
drm/i915: Apply batch location restrictions before pinning
drm/nouveau/kms/nv50-: cursors always use core channel vram ctxdma
Revert "drm/sun4i: Handle DRM_BUS_FLAG_PIXDATA_*EDGE"
drm/atmel-hlcdc: check stride values in the first plane
drm/bridge/sii8620: fix HDMI cable connection to dongle
...

+407 -336
+9 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 2158 2158 switch (asic_type) { 2159 2159 #if defined(CONFIG_DRM_AMD_DC) 2160 2160 case CHIP_BONAIRE: 2161 - case CHIP_HAWAII: 2162 2161 case CHIP_KAVERI: 2163 2162 case CHIP_KABINI: 2164 2163 case CHIP_MULLINS: 2164 + /* 2165 + * We have systems in the wild with these ASICs that require 2166 + * LVDS and VGA support which is not supported with DC. 2167 + * 2168 + * Fallback to the non-DC driver here by default so as not to 2169 + * cause regressions. 2170 + */ 2171 + return amdgpu_dc > 0; 2172 + case CHIP_HAWAII: 2165 2173 case CHIP_CARRIZO: 2166 2174 case CHIP_STONEY: 2167 2175 case CHIP_POLARIS10:
+10 -14
drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
··· 762 762 domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); 763 763 if (domain == AMDGPU_GEM_DOMAIN_VRAM) { 764 764 adev->vram_pin_size += amdgpu_bo_size(bo); 765 - if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) 766 - adev->invisible_pin_size += amdgpu_bo_size(bo); 765 + adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo); 767 766 } else if (domain == AMDGPU_GEM_DOMAIN_GTT) { 768 767 adev->gart_pin_size += amdgpu_bo_size(bo); 769 768 } ··· 789 790 bo->pin_count--; 790 791 if (bo->pin_count) 791 792 return 0; 793 + 794 + if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { 795 + adev->vram_pin_size -= amdgpu_bo_size(bo); 796 + adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo); 797 + } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { 798 + adev->gart_pin_size -= amdgpu_bo_size(bo); 799 + } 800 + 792 801 for (i = 0; i < bo->placement.num_placement; i++) { 793 802 bo->placements[i].lpfn = 0; 794 803 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; 795 804 } 796 805 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 797 - if (unlikely(r)) { 806 + if (unlikely(r)) 798 807 dev_err(adev->dev, "%p validate failed for unpin\n", bo); 799 - goto error; 800 - } 801 808 802 - if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { 803 - adev->vram_pin_size -= amdgpu_bo_size(bo); 804 - if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) 805 - adev->invisible_pin_size -= amdgpu_bo_size(bo); 806 - } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { 807 - adev->gart_pin_size -= amdgpu_bo_size(bo); 808 - } 809 - 810 - error: 811 809 return r; 812 810 } 813 811
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
··· 73 73 uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man); 74 74 int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man); 75 75 76 + u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo); 76 77 uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man); 77 78 uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man); 78 79
+7 -7
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
··· 130 130 unsigned version_major, version_minor, family_id; 131 131 int i, j, r; 132 132 133 - INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler); 133 + INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); 134 134 135 135 switch (adev->asic_type) { 136 136 #ifdef CONFIG_DRM_AMDGPU_CIK ··· 314 314 void *ptr; 315 315 int i, j; 316 316 317 + cancel_delayed_work_sync(&adev->uvd.idle_work); 318 + 317 319 for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { 318 320 if (adev->uvd.inst[j].vcpu_bo == NULL) 319 321 continue; 320 - 321 - cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work); 322 322 323 323 /* only valid for physical mode */ 324 324 if (adev->asic_type < CHIP_POLARIS10) { ··· 1145 1145 static void amdgpu_uvd_idle_work_handler(struct work_struct *work) 1146 1146 { 1147 1147 struct amdgpu_device *adev = 1148 - container_of(work, struct amdgpu_device, uvd.inst->idle_work.work); 1148 + container_of(work, struct amdgpu_device, uvd.idle_work.work); 1149 1149 unsigned fences = 0, i, j; 1150 1150 1151 1151 for (i = 0; i < adev->uvd.num_uvd_inst; ++i) { ··· 1167 1167 AMD_CG_STATE_GATE); 1168 1168 } 1169 1169 } else { 1170 - schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); 1170 + schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT); 1171 1171 } 1172 1172 } 1173 1173 ··· 1179 1179 if (amdgpu_sriov_vf(adev)) 1180 1180 return; 1181 1181 1182 - set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work); 1182 + set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); 1183 1183 if (set_clocks) { 1184 1184 if (adev->pm.dpm_enabled) { 1185 1185 amdgpu_dpm_enable_uvd(adev, true); ··· 1196 1196 void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring) 1197 1197 { 1198 1198 if (!amdgpu_sriov_vf(ring->adev)) 1199 - schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT); 1199 + schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT); 1200 1200 } 1201 1201 1202 1202 /**
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
··· 44 44 void *saved_bo; 45 45 atomic_t handles[AMDGPU_MAX_UVD_HANDLES]; 46 46 struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES]; 47 - struct delayed_work idle_work; 48 47 struct amdgpu_ring ring; 49 48 struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS]; 50 49 struct amdgpu_irq_src irq; ··· 61 62 bool address_64_bit; 62 63 bool use_ctx_buf; 63 64 struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES]; 65 + struct delayed_work idle_work; 64 66 }; 65 67 66 68 int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
+36 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
··· 97 97 } 98 98 99 99 /** 100 + * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size 101 + * 102 + * @bo: &amdgpu_bo buffer object (must be in VRAM) 103 + * 104 + * Returns: 105 + * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM. 106 + */ 107 + u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo) 108 + { 109 + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 110 + struct ttm_mem_reg *mem = &bo->tbo.mem; 111 + struct drm_mm_node *nodes = mem->mm_node; 112 + unsigned pages = mem->num_pages; 113 + u64 usage = 0; 114 + 115 + if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size) 116 + return 0; 117 + 118 + if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT) 119 + return amdgpu_bo_size(bo); 120 + 121 + while (nodes && pages) { 122 + usage += nodes->size << PAGE_SHIFT; 123 + usage -= amdgpu_vram_mgr_vis_size(adev, nodes); 124 + pages -= nodes->size; 125 + ++nodes; 126 + } 127 + 128 + return usage; 129 + } 130 + 131 + /** 100 132 * amdgpu_vram_mgr_new - allocate new ranges 101 133 * 102 134 * @man: TTM memory type manager ··· 167 135 num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node); 168 136 } 169 137 170 - nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL); 138 + nodes = kvmalloc_array(num_nodes, sizeof(*nodes), 139 + GFP_KERNEL | __GFP_ZERO); 171 140 if (!nodes) 172 141 return -ENOMEM; 173 142 ··· 223 190 drm_mm_remove_node(&nodes[i]); 224 191 spin_unlock(&mgr->lock); 225 192 226 - kfree(nodes); 193 + kvfree(nodes); 227 194 return r == -ENOSPC ? 0 : r; 228 195 } 229 196 ··· 262 229 atomic64_sub(usage, &mgr->usage); 263 230 atomic64_sub(vis_usage, &mgr->vis_usage); 264 231 265 - kfree(mem->mm_node); 232 + kvfree(mem->mm_node); 266 233 mem->mm_node = NULL; 267 234 } 268 235
+1 -1
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
··· 1090 1090 static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr) 1091 1091 { 1092 1092 struct amdgpu_device *adev = hwmgr->adev; 1093 - int result; 1093 + int result = 0; 1094 1094 uint32_t num_se = 0; 1095 1095 uint32_t count, data; 1096 1096
+1 -1
drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
··· 839 839 return ret; 840 840 } 841 841 842 - if (desc->layout.xstride && desc->layout.pstride) { 842 + if (desc->layout.xstride[0] && desc->layout.pstride[0]) { 843 843 int ret; 844 844 845 845 ret = drm_plane_create_rotation_property(&plane->base,
+116 -191
drivers/gpu/drm/bridge/sil-sii8620.c
··· 36 36 37 37 #define SII8620_BURST_BUF_LEN 288 38 38 #define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3) 39 - #define MHL1_MAX_LCLK 225000 40 - #define MHL3_MAX_LCLK 600000 39 + 40 + #define MHL1_MAX_PCLK 75000 41 + #define MHL1_MAX_PCLK_PP_MODE 150000 42 + #define MHL3_MAX_PCLK 200000 43 + #define MHL3_MAX_PCLK_PP_MODE 300000 41 44 42 45 enum sii8620_mode { 43 46 CM_DISCONNECTED, ··· 83 80 u8 devcap[MHL_DCAP_SIZE]; 84 81 u8 xdevcap[MHL_XDC_SIZE]; 85 82 u8 avif[HDMI_INFOFRAME_SIZE(AVI)]; 83 + bool feature_complete; 84 + bool devcap_read; 85 + bool sink_detected; 86 86 struct edid *edid; 87 87 unsigned int gen2_write_burst:1; 88 88 enum sii8620_mt_state mt_state; ··· 482 476 } 483 477 } 484 478 485 - static void sii8620_sink_detected(struct sii8620 *ctx, int ret) 479 + static void sii8620_identify_sink(struct sii8620 *ctx) 486 480 { 487 481 static const char * const sink_str[] = { 488 482 [SINK_NONE] = "NONE", ··· 493 487 char sink_name[20]; 494 488 struct device *dev = ctx->dev; 495 489 496 - if (ret < 0) 490 + if (!ctx->sink_detected || !ctx->devcap_read) 497 491 return; 498 492 499 493 sii8620_fetch_edid(ctx); ··· 502 496 sii8620_mhl_disconnected(ctx); 503 497 return; 504 498 } 499 + sii8620_set_upstream_edid(ctx); 505 500 506 501 if (drm_detect_hdmi_monitor(ctx->edid)) 507 502 ctx->sink_type = SINK_HDMI; ··· 513 506 514 507 dev_info(dev, "detected sink(type: %s): %s\n", 515 508 sink_str[ctx->sink_type], sink_name); 516 - } 517 - 518 - static void sii8620_hsic_init(struct sii8620 *ctx) 519 - { 520 - if (!sii8620_is_mhl3(ctx)) 521 - return; 522 - 523 - sii8620_write(ctx, REG_FCGC, 524 - BIT_FCGC_HSIC_HOSTMODE | BIT_FCGC_HSIC_ENABLE); 525 - sii8620_setbits(ctx, REG_HRXCTRL3, 526 - BIT_HRXCTRL3_HRX_STAY_RESET | BIT_HRXCTRL3_STATUS_EN, ~0); 527 - sii8620_setbits(ctx, REG_TTXNUMB, MSK_TTXNUMB_TTX_NUMBPS, 4); 528 - sii8620_setbits(ctx, REG_TRXCTRL, BIT_TRXCTRL_TRX_FROM_SE_COC, ~0); 529 - sii8620_setbits(ctx, REG_HTXCTRL, BIT_HTXCTRL_HTX_DRVCONN1, 0); 530 - sii8620_setbits(ctx, REG_KEEPER, MSK_KEEPER_MODE, VAL_KEEPER_MODE_HOST); 531 - sii8620_write_seq_static(ctx, 532 - REG_TDMLLCTL, 0, 533 - REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST | 534 - BIT_UTSRST_KEEPER_SRST | BIT_UTSRST_FC_SRST, 535 - REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST, 536 - REG_HRXINTL, 0xff, 537 - REG_HRXINTH, 0xff, 538 - REG_TTXINTL, 0xff, 539 - REG_TTXINTH, 0xff, 540 - REG_TRXINTL, 0xff, 541 - REG_TRXINTH, 0xff, 542 - REG_HTXINTL, 0xff, 543 - REG_HTXINTH, 0xff, 544 - REG_FCINTR0, 0xff, 545 - REG_FCINTR1, 0xff, 546 - REG_FCINTR2, 0xff, 547 - REG_FCINTR3, 0xff, 548 - REG_FCINTR4, 0xff, 549 - REG_FCINTR5, 0xff, 550 - REG_FCINTR6, 0xff, 551 - REG_FCINTR7, 0xff 552 - ); 553 - } 554 - 555 - static void sii8620_edid_read(struct sii8620 *ctx, int ret) 556 - { 557 - if (ret < 0) 558 - return; 559 - 560 - sii8620_set_upstream_edid(ctx); 561 - sii8620_hsic_init(ctx); 562 - sii8620_enable_hpd(ctx); 563 509 } 564 510 565 511 static void sii8620_mr_devcap(struct sii8620 *ctx) ··· 530 570 dcap[MHL_DCAP_ADOPTER_ID_H], dcap[MHL_DCAP_ADOPTER_ID_L], 531 571 dcap[MHL_DCAP_DEVICE_ID_H], dcap[MHL_DCAP_DEVICE_ID_L]); 532 572 sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE); 573 + ctx->devcap_read = true; 574 + sii8620_identify_sink(ctx); 533 575 } 534 576 535 577 static void sii8620_mr_xdevcap(struct sii8620 *ctx) ··· 769 807 static void sii8620_fetch_edid(struct sii8620 *ctx) 770 808 { 771 809 u8 lm_ddc, ddc_cmd, int3, cbus; 810 + unsigned long timeout; 772 811 int fetched, i; 773 812 int edid_len = EDID_LENGTH; 774 813 u8 *edid; ··· 819 856 REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK 820 857 ); 821 858 822 - do { 823 - int3 = sii8620_readb(ctx, REG_INTR3); 859 + int3 = 0; 860 + timeout = jiffies + msecs_to_jiffies(200); 861 + for (;;) { 824 862 cbus = sii8620_readb(ctx, REG_CBUS_STATUS); 825 - 826 - if (int3 & BIT_DDC_CMD_DONE) 827 - break; 828 - 829 - if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) { 863 + if (~cbus & BIT_CBUS_STATUS_CBUS_CONNECTED) { 830 864 kfree(edid); 831 865 edid = NULL; 832 866 goto end; 833 867 } 834 - } while (1); 835 - 836 - sii8620_readb(ctx, REG_DDC_STATUS); 837 - while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE) 868 + if (int3 & BIT_DDC_CMD_DONE) { 869 + if (sii8620_readb(ctx, REG_DDC_DOUT_CNT) 870 + >= FETCH_SIZE) 871 + break; 872 + } else { 873 + int3 = sii8620_readb(ctx, REG_INTR3); 874 + } 875 + if (time_is_before_jiffies(timeout)) { 876 + ctx->error = -ETIMEDOUT; 877 + dev_err(ctx->dev, "timeout during EDID read\n"); 878 + kfree(edid); 879 + edid = NULL; 880 + goto end; 881 + } 838 882 usleep_range(10, 20); 883 + } 839 884 840 885 sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE); 841 886 if (fetched + FETCH_SIZE == EDID_LENGTH) { ··· 942 971 ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); 943 972 if (ret) 944 973 return ret; 974 + 945 975 usleep_range(10000, 20000); 946 - return clk_prepare_enable(ctx->clk_xtal); 976 + ret = clk_prepare_enable(ctx->clk_xtal); 977 + if (ret) 978 + return ret; 979 + 980 + msleep(100); 981 + gpiod_set_value(ctx->gpio_reset, 0); 982 + msleep(100); 983 + 984 + return 0; 947 985 } 948 986 949 987 static int sii8620_hw_off(struct sii8620 *ctx) ··· 960 980 clk_disable_unprepare(ctx->clk_xtal); 961 981 gpiod_set_value(ctx->gpio_reset, 1); 962 982 return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); 963 - } 964 - 965 - static void sii8620_hw_reset(struct sii8620 *ctx) 966 - { 967 - usleep_range(10000, 20000); 968 - gpiod_set_value(ctx->gpio_reset, 0); 969 - usleep_range(5000, 20000); 970 - gpiod_set_value(ctx->gpio_reset, 1); 971 - usleep_range(10000, 20000); 972 - gpiod_set_value(ctx->gpio_reset, 0); 973 - msleep(300); 974 983 } 975 984 976 985 static void sii8620_cbus_reset(struct sii8620 *ctx) ··· 1017 1048 1018 1049 static void sii8620_set_format(struct sii8620 *ctx) 1019 1050 { 1020 - u8 out_fmt; 1021 - 1022 1051 if (sii8620_is_mhl3(ctx)) { 1023 1052 sii8620_setbits(ctx, REG_M3_P0CTRL, 1024 1053 BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED, 1025 1054 ctx->use_packed_pixel ? ~0 : 0); 1026 1055 } else { 1027 - if (ctx->use_packed_pixel) 1028 - sii8620_write_seq_static(ctx, 1029 - REG_VID_MODE, BIT_VID_MODE_M1080P, 1030 - REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1, 1031 - REG_MHLTX_CTL6, 0x60 1032 - ); 1033 - else 1034 1056 sii8620_write_seq_static(ctx, 1035 1057 REG_VID_MODE, 0, 1036 1058 REG_MHL_TOP_CTL, 1, ··· 1029 1069 ); 1030 1070 } 1031 1071 1032 - if (ctx->use_packed_pixel) 1033 - out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL) | 1034 - BIT_TPI_OUTPUT_CSCMODE709; 1035 - else 1036 - out_fmt = VAL_TPI_FORMAT(RGB, FULL); 1037 - 1038 1072 sii8620_write_seq(ctx, 1039 1073 REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL), 1040 - REG_TPI_OUTPUT, out_fmt, 1074 + REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL), 1041 1075 ); 1042 1076 } 1043 1077 ··· 1170 1216 int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3); 1171 1217 int i; 1172 1218 1173 - for (i = 0; i < ARRAY_SIZE(clk_spec); ++i) 1219 + for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i) 1174 1220 if (clk < clk_spec[i].max_clk) 1175 1221 break; 1176 1222 ··· 1488 1534 ); 1489 1535 } 1490 1536 1537 + static void sii8620_hpd_unplugged(struct sii8620 *ctx) 1538 + { 1539 + sii8620_disable_hpd(ctx); 1540 + ctx->sink_type = SINK_NONE; 1541 + ctx->sink_detected = false; 1542 + ctx->feature_complete = false; 1543 + kfree(ctx->edid); 1544 + ctx->edid = NULL; 1545 + } 1546 + 1491 1547 static void sii8620_disconnect(struct sii8620 *ctx) 1492 1548 { 1493 1549 sii8620_disable_gen2_write_burst(ctx); ··· 1525 1561 REG_MHL_DP_CTL6, 0x2A, 1526 1562 REG_MHL_DP_CTL7, 0x03 1527 1563 ); 1528 - sii8620_disable_hpd(ctx); 1564 + sii8620_hpd_unplugged(ctx); 1529 1565 sii8620_write_seq_static(ctx, 1530 1566 REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE, 1531 1567 REG_MHL_COC_CTL1, 0x07, ··· 1573 1609 memset(ctx->xstat, 0, sizeof(ctx->xstat)); 1574 1610 memset(ctx->devcap, 0, sizeof(ctx->devcap)); 1575 1611 memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap)); 1612 + ctx->devcap_read = false; 1576 1613 ctx->cbus_status = 0; 1577 - ctx->sink_type = SINK_NONE; 1578 - kfree(ctx->edid); 1579 - ctx->edid = NULL; 1580 1614 sii8620_mt_cleanup(ctx); 1581 1615 } 1582 1616 ··· 1665 1703 sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), 1666 1704 MHL_DST_LM_CLK_MODE_NORMAL 1667 1705 | MHL_DST_LM_PATH_ENABLED); 1668 - if (!sii8620_is_mhl3(ctx)) 1669 - sii8620_mt_read_devcap(ctx, false); 1670 - sii8620_mt_set_cont(ctx, sii8620_sink_detected); 1671 1706 } else { 1672 1707 sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), 1673 1708 MHL_DST_LM_CLK_MODE_NORMAL); ··· 1681 1722 sii8620_update_array(ctx->stat, st, MHL_DST_SIZE); 1682 1723 sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE); 1683 1724 1684 - if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY) 1725 + if (ctx->stat[MHL_DST_CONNECTED_RDY] & st[MHL_DST_CONNECTED_RDY] & 1726 + MHL_DST_CONN_DCAP_RDY) { 1685 1727 sii8620_status_dcap_ready(ctx); 1728 + 1729 + if (!sii8620_is_mhl3(ctx)) 1730 + sii8620_mt_read_devcap(ctx, false); 1731 + } 1686 1732 1687 1733 if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) 1688 1734 sii8620_status_changed_path(ctx); ··· 1772 1808 } 1773 1809 if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_REQ) 1774 1810 sii8620_send_features(ctx); 1775 - if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE) 1776 - sii8620_edid_read(ctx, 0); 1811 + if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE) { 1812 + ctx->feature_complete = true; 1813 + if (ctx->edid) 1814 + sii8620_enable_hpd(ctx); 1815 + } 1777 1816 } 1778 1817 1779 1818 static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx) ··· 1851 1884 if (stat & BIT_CBUS_MSC_MR_WRITE_STAT) 1852 1885 sii8620_msc_mr_write_stat(ctx); 1853 1886 1887 + if (stat & BIT_CBUS_HPD_CHG) { 1888 + if (ctx->cbus_status & BIT_CBUS_STATUS_CBUS_HPD) { 1889 + ctx->sink_detected = true; 1890 + sii8620_identify_sink(ctx); 1891 + } else { 1892 + sii8620_hpd_unplugged(ctx); 1893 + } 1894 + } 1895 + 1854 1896 if (stat & BIT_CBUS_MSC_MR_SET_INT) 1855 1897 sii8620_msc_mr_set_int(ctx); 1856 1898 ··· 1907 1931 ctx->mt_state = MT_STATE_DONE; 1908 1932 } 1909 1933 1910 - static void sii8620_scdt_high(struct sii8620 *ctx) 1911 - { 1912 - sii8620_write_seq_static(ctx, 1913 - REG_INTR8_MASK, BIT_CEA_NEW_AVI | BIT_CEA_NEW_VSI, 1914 - REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI, 1915 - ); 1916 - } 1917 - 1918 1934 static void sii8620_irq_scdt(struct sii8620 *ctx) 1919 1935 { 1920 1936 u8 stat = sii8620_readb(ctx, REG_INTR5); ··· 1914 1946 if (stat & BIT_INTR_SCDT_CHANGE) { 1915 1947 u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3); 1916 1948 1917 - if (cstat & BIT_TMDS_CSTAT_P3_SCDT) { 1918 - if (ctx->sink_type == SINK_HDMI) 1919 - /* enable infoframe interrupt */ 1920 - sii8620_scdt_high(ctx); 1921 - else 1922 - sii8620_start_video(ctx); 1923 - } 1949 + if (cstat & BIT_TMDS_CSTAT_P3_SCDT) 1950 + sii8620_start_video(ctx); 1924 1951 } 1925 1952 1926 1953 sii8620_write(ctx, REG_INTR5, stat); 1927 - } 1928 - 1929 - static void sii8620_new_vsi(struct sii8620 *ctx) 1930 - { 1931 - u8 vsif[11]; 1932 - 1933 - sii8620_write(ctx, REG_RX_HDMI_CTRL2, 1934 - VAL_RX_HDMI_CTRL2_DEFVAL | 1935 - BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI); 1936 - sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, vsif, 1937 - ARRAY_SIZE(vsif)); 1938 - } 1939 - 1940 - static void sii8620_new_avi(struct sii8620 *ctx) 1941 - { 1942 - sii8620_write(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL); 1943 - sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, ctx->avif, 1944 - ARRAY_SIZE(ctx->avif)); 1945 - } 1946 - 1947 - static void sii8620_irq_infr(struct sii8620 *ctx) 1948 - { 1949 - u8 stat = sii8620_readb(ctx, REG_INTR8) 1950 - & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI); 1951 - 1952 - sii8620_write(ctx, REG_INTR8, stat); 1953 - 1954 - if (stat & BIT_CEA_NEW_VSI) 1955 - sii8620_new_vsi(ctx); 1956 - 1957 - if (stat & BIT_CEA_NEW_AVI) 1958 - sii8620_new_avi(ctx); 1959 - 1960 - if (stat & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI)) 1961 - sii8620_start_video(ctx); 1962 1954 } 1963 1955 1964 1956 static void sii8620_got_xdevcap(struct sii8620 *ctx, int ret) ··· 1971 2043 1972 2044 if (stat & BIT_DDC_CMD_DONE) { 1973 2045 sii8620_write(ctx, REG_INTR3_MASK, 0); 1974 - if (sii8620_is_mhl3(ctx)) 2046 + if (sii8620_is_mhl3(ctx) && !ctx->feature_complete) 1975 2047 sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE), 1976 2048 MHL_INT_RC_FEAT_REQ); 1977 2049 else 1978 - sii8620_edid_read(ctx, 0); 2050 + sii8620_enable_hpd(ctx); 1979 2051 } 1980 2052 sii8620_write(ctx, REG_INTR3, stat); 1981 2053 } ··· 2002 2074 { BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid }, 2003 2075 { BIT_FAST_INTR_STAT_DDC, sii8620_irq_ddc }, 2004 2076 { BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt }, 2005 - { BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr }, 2006 2077 }; 2007 2078 struct sii8620 *ctx = data; 2008 2079 u8 stats[LEN_FAST_INTR_STAT]; ··· 2039 2112 dev_err(dev, "Error powering on, %d.\n", ret); 2040 2113 return; 2041 2114 } 2042 - sii8620_hw_reset(ctx); 2043 2115 2044 2116 sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver)); 2045 2117 ret = sii8620_clear_error(ctx); ··· 2194 2268 rc_unregister_device(ctx->rc_dev); 2195 2269 } 2196 2270 2271 + static int sii8620_is_packing_required(struct sii8620 *ctx, 2272 + const struct drm_display_mode *mode) 2273 + { 2274 + int max_pclk, max_pclk_pp_mode; 2275 + 2276 + if (sii8620_is_mhl3(ctx)) { 2277 + max_pclk = MHL3_MAX_PCLK; 2278 + max_pclk_pp_mode = MHL3_MAX_PCLK_PP_MODE; 2279 + } else { 2280 + max_pclk = MHL1_MAX_PCLK; 2281 + max_pclk_pp_mode = MHL1_MAX_PCLK_PP_MODE; 2282 + } 2283 + 2284 + if (mode->clock < max_pclk) 2285 + return 0; 2286 + else if (mode->clock < max_pclk_pp_mode) 2287 + return 1; 2288 + else 2289 + return -1; 2290 + } 2291 + 2197 2292 static enum drm_mode_status sii8620_mode_valid(struct drm_bridge *bridge, 2198 2293 const struct drm_display_mode *mode) 2199 2294 { 2200 2295 struct sii8620 *ctx = bridge_to_sii8620(bridge); 2296 + int pack_required = sii8620_is_packing_required(ctx, mode); 2201 2297 bool can_pack = ctx->devcap[MHL_DCAP_VID_LINK_MODE] & 2202 2298 MHL_DCAP_VID_LINK_PPIXEL; 2203 - unsigned int max_pclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK : 2204 - MHL1_MAX_LCLK; 2205 - max_pclk /= can_pack ? 2 : 3; 2206 2299 2207 - return (mode->clock > max_pclk) ? MODE_CLOCK_HIGH : MODE_OK; 2300 + switch (pack_required) { 2301 + case 0: 2302 + return MODE_OK; 2303 + case 1: 2304 + return (can_pack) ? MODE_OK : MODE_CLOCK_HIGH; 2305 + default: 2306 + return MODE_CLOCK_HIGH; 2307 + } 2208 2308 } 2209 2309 2210 2310 static bool sii8620_mode_fixup(struct drm_bridge *bridge, ··· 2238 2286 struct drm_display_mode *adjusted_mode) 2239 2287 { 2240 2288 struct sii8620 *ctx = bridge_to_sii8620(bridge); 2241 - int max_lclk; 2242 - bool ret = true; 2243 2289 2244 2290 mutex_lock(&ctx->lock); 2245 2291 2246 - max_lclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK : MHL1_MAX_LCLK; 2247 - if (max_lclk > 3 * adjusted_mode->clock) { 2248 - ctx->use_packed_pixel = 0; 2249 - goto end; 2250 - } 2251 - if ((ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL) && 2252 - max_lclk > 2 * adjusted_mode->clock) { 2253 - ctx->use_packed_pixel = 1; 2254 - goto end; 2255 - } 2256 - ret = false; 2257 - end: 2258 - if (ret) { 2259 - u8 vic = drm_match_cea_mode(adjusted_mode); 2292 + ctx->use_packed_pixel = sii8620_is_packing_required(ctx, adjusted_mode); 2293 + ctx->video_code = drm_match_cea_mode(adjusted_mode); 2294 + ctx->pixel_clock = adjusted_mode->clock; 2260 2295 2261 - if (!vic) { 2262 - union hdmi_infoframe frm; 2263 - u8 mhl_vic[] = { 0, 95, 94, 93, 98 }; 2264 - 2265 - /* FIXME: We need the connector here */ 2266 - drm_hdmi_vendor_infoframe_from_display_mode( 2267 - &frm.vendor.hdmi, NULL, adjusted_mode); 2268 - vic = frm.vendor.hdmi.vic; 2269 - if (vic >= ARRAY_SIZE(mhl_vic)) 2270 - vic = 0; 2271 - vic = mhl_vic[vic]; 2272 - } 2273 - ctx->video_code = vic; 2274 - ctx->pixel_clock = adjusted_mode->clock; 2275 - } 2276 2296 mutex_unlock(&ctx->lock); 2277 - return ret; 2297 + 2298 + return true; 2278 2299 } 2279 2300 2280 2301 static const struct drm_bridge_funcs sii8620_bridge_funcs = {
+7 -7
drivers/gpu/drm/drm_drv.c
··· 369 369 */ 370 370 void drm_dev_unplug(struct drm_device *dev) 371 371 { 372 - drm_dev_unregister(dev); 373 - 374 - mutex_lock(&drm_global_mutex); 375 - if (dev->open_count == 0) 376 - drm_dev_put(dev); 377 - mutex_unlock(&drm_global_mutex); 378 - 379 372 /* 380 373 * After synchronizing any critical read section is guaranteed to see 381 374 * the new value of ->unplugged, and any critical section which might ··· 377 384 */ 378 385 dev->unplugged = true; 379 386 synchronize_srcu(&drm_unplug_srcu); 387 + 388 + drm_dev_unregister(dev); 389 + 390 + mutex_lock(&drm_global_mutex); 391 + if (dev->open_count == 0) 392 + drm_dev_put(dev); 393 + mutex_unlock(&drm_global_mutex); 380 394 } 381 395 EXPORT_SYMBOL(drm_dev_unplug); 382 396
+14 -7
drivers/gpu/drm/i915/i915_drv.h
··· 340 340 341 341 unsigned int bsd_engine; 342 342 343 - /* Client can have a maximum of 3 contexts banned before 344 - * it is denied of creating new contexts. As one context 345 - * ban needs 4 consecutive hangs, and more if there is 346 - * progress in between, this is a last resort stop gap measure 347 - * to limit the badly behaving clients access to gpu. 343 + /* 344 + * Every context ban increments per client ban score. Also 345 + * hangs in short succession increments ban score. If ban threshold 346 + * is reached, client is considered banned and submitting more work 347 + * will fail. This is a stop gap measure to limit the badly behaving 348 + * clients access to gpu. Note that unbannable contexts never increment 349 + * the client ban score. 348 350 */ 349 - #define I915_MAX_CLIENT_CONTEXT_BANS 3 350 - atomic_t context_bans; 351 + #define I915_CLIENT_SCORE_HANG_FAST 1 352 + #define I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ) 353 + #define I915_CLIENT_SCORE_CONTEXT_BAN 3 354 + #define I915_CLIENT_SCORE_BANNED 9 355 + /** ban_score: Accumulated score of all ctx bans and fast hangs. */ 356 + atomic_t ban_score; 357 + unsigned long hang_timestamp; 351 358 }; 352 359 353 360 /* Interface history:
+40 -17
drivers/gpu/drm/i915/i915_gem.c
··· 2933 2933 return 0; 2934 2934 } 2935 2935 2936 + static void i915_gem_client_mark_guilty(struct drm_i915_file_private *file_priv, 2937 + const struct i915_gem_context *ctx) 2938 + { 2939 + unsigned int score; 2940 + unsigned long prev_hang; 2941 + 2942 + if (i915_gem_context_is_banned(ctx)) 2943 + score = I915_CLIENT_SCORE_CONTEXT_BAN; 2944 + else 2945 + score = 0; 2946 + 2947 + prev_hang = xchg(&file_priv->hang_timestamp, jiffies); 2948 + if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES)) 2949 + score += I915_CLIENT_SCORE_HANG_FAST; 2950 + 2951 + if (score) { 2952 + atomic_add(score, &file_priv->ban_score); 2953 + 2954 + DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n", 2955 + ctx->name, score, 2956 + atomic_read(&file_priv->ban_score)); 2957 + } 2958 + } 2959 + 2936 2960 static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx) 2937 2961 { 2938 - bool banned; 2962 + unsigned int score; 2963 + bool banned, bannable; 2939 2964 2940 2965 atomic_inc(&ctx->guilty_count); 2941 2966 2942 - banned = false; 2943 - if (i915_gem_context_is_bannable(ctx)) { 2944 - unsigned int score; 2967 + bannable = i915_gem_context_is_bannable(ctx); 2968 + score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score); 2969 + banned = score >= CONTEXT_SCORE_BAN_THRESHOLD; 2945 2970 2946 - score = atomic_add_return(CONTEXT_SCORE_GUILTY, 2947 - &ctx->ban_score); 2948 - banned = score >= CONTEXT_SCORE_BAN_THRESHOLD; 2971 + DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, ban %s\n", 2972 + ctx->name, atomic_read(&ctx->guilty_count), 2973 + score, yesno(banned && bannable)); 2949 2974 2950 - DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n", 2951 - ctx->name, score, yesno(banned)); 2952 - } 2953 - if (!banned) 2975 + /* Cool contexts don't accumulate client ban score */ 2976 + if (!bannable) 2954 2977 return; 2955 2978 2956 - i915_gem_context_set_banned(ctx); 2957 - if (!IS_ERR_OR_NULL(ctx->file_priv)) { 2958 - atomic_inc(&ctx->file_priv->context_bans); 2959 - DRM_DEBUG_DRIVER("client %s has had %d context banned\n", 2960 - ctx->name, atomic_read(&ctx->file_priv->context_bans)); 2961 - } 2979 + if (banned) 2980 + i915_gem_context_set_banned(ctx); 2981 + 2982 + if (!IS_ERR_OR_NULL(ctx->file_priv)) 2983 + i915_gem_client_mark_guilty(ctx->file_priv, ctx); 2962 2984 } 2963 2985 2964 2986 static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx) ··· 5758 5736 INIT_LIST_HEAD(&file_priv->mm.request_list); 5759 5737 5760 5738 file_priv->bsd_engine = -1; 5739 + file_priv->hang_timestamp = jiffies; 5761 5740 5762 5741 ret = i915_gem_context_open(i915, file); 5763 5742 if (ret)
+1 -1
drivers/gpu/drm/i915/i915_gem_context.c
··· 652 652 653 653 static bool client_is_banned(struct drm_i915_file_private *file_priv) 654 654 { 655 - return atomic_read(&file_priv->context_bans) > I915_MAX_CLIENT_CONTEXT_BANS; 655 + return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED; 656 656 } 657 657 658 658 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
+27 -22
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 489 489 } 490 490 491 491 static int 492 - eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma) 492 + eb_add_vma(struct i915_execbuffer *eb, 493 + unsigned int i, unsigned batch_idx, 494 + struct i915_vma *vma) 493 495 { 494 496 struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; 495 497 int err; ··· 523 521 eb->vma[i] = vma; 524 522 eb->flags[i] = entry->flags; 525 523 vma->exec_flags = &eb->flags[i]; 524 + 525 + /* 526 + * SNA is doing fancy tricks with compressing batch buffers, which leads 527 + * to negative relocation deltas. Usually that works out ok since the 528 + * relocate address is still positive, except when the batch is placed 529 + * very low in the GTT. Ensure this doesn't happen. 530 + * 531 + * Note that actual hangs have only been observed on gen7, but for 532 + * paranoia do it everywhere. 533 + */ 534 + if (i == batch_idx) { 535 + if (!(eb->flags[i] & EXEC_OBJECT_PINNED)) 536 + eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS; 537 + if (eb->reloc_cache.has_fence) 538 + eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE; 539 + 540 + eb->batch = vma; 541 + } 526 542 527 543 err = 0; 528 544 if (eb_pin_vma(eb, entry, vma)) { ··· 736 716 { 737 717 struct radix_tree_root *handles_vma = &eb->ctx->handles_vma; 738 718 struct drm_i915_gem_object *obj; 739 - unsigned int i; 719 + unsigned int i, batch; 740 720 int err; 741 721 742 722 if (unlikely(i915_gem_context_is_closed(eb->ctx))) ··· 747 727 748 728 INIT_LIST_HEAD(&eb->relocs); 749 729 INIT_LIST_HEAD(&eb->unbound); 730 + 731 + batch = eb_batch_index(eb); 750 732 751 733 for (i = 0; i < eb->buffer_count; i++) { 752 734 u32 handle = eb->exec[i].handle; ··· 792 770 lut->handle = handle; 793 771 794 772 add_vma: 795 - err = eb_add_vma(eb, i, vma); 773 + err = eb_add_vma(eb, i, batch, vma); 796 774 if (unlikely(err)) 797 775 goto err_vma; 798 776 799 777 GEM_BUG_ON(vma != eb->vma[i]); 800 778 GEM_BUG_ON(vma->exec_flags != &eb->flags[i]); 779 + GEM_BUG_ON(drm_mm_node_allocated(&vma->node) && 780 + eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i])); 801 781 } 802 - 803 - /* take note of the batch buffer before we might reorder the lists */ 804 - i = eb_batch_index(eb); 805 - eb->batch = eb->vma[i]; 806 - GEM_BUG_ON(eb->batch->exec_flags != &eb->flags[i]); 807 - 808 - /* 809 - * SNA is doing fancy tricks with compressing batch buffers, which leads 810 - * to negative relocation deltas. Usually that works out ok since the 811 - * relocate address is still positive, except when the batch is placed 812 - * very low in the GTT. Ensure this doesn't happen. 813 - * 814 - * Note that actual hangs have only been observed on gen7, but for 815 - * paranoia do it everywhere. 816 - */ 817 - if (!(eb->flags[i] & EXEC_OBJECT_PINNED)) 818 - eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS; 819 - if (eb->reloc_cache.has_fence) 820 - eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE; 821 782 822 783 eb->args->flags |= __EXEC_VALIDATED; 823 784 return eb_reserve(eb);
+10 -2
drivers/gpu/drm/i915/i915_irq.c
··· 1893 1893 1894 1894 /* 1895 1895 * Clear the PIPE*STAT regs before the IIR 1896 + * 1897 + * Toggle the enable bits to make sure we get an 1898 + * edge in the ISR pipe event bit if we don't clear 1899 + * all the enabled status bits. Otherwise the edge 1900 + * triggered IIR on i965/g4x wouldn't notice that 1901 + * an interrupt is still pending. 1896 1902 */ 1897 - if (pipe_stats[pipe]) 1898 - I915_WRITE(reg, enable_mask | pipe_stats[pipe]); 1903 + if (pipe_stats[pipe]) { 1904 + I915_WRITE(reg, pipe_stats[pipe]); 1905 + I915_WRITE(reg, enable_mask); 1906 + } 1899 1907 } 1900 1908 spin_unlock(&dev_priv->irq_lock); 1901 1909 }
+5
drivers/gpu/drm/i915/i915_reg.h
··· 2425 2425 #define _3D_CHICKEN _MMIO(0x2084) 2426 2426 #define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10) 2427 2427 #define _3D_CHICKEN2 _MMIO(0x208c) 2428 + 2429 + #define FF_SLICE_CHICKEN _MMIO(0x2088) 2430 + #define FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX (1 << 1) 2431 + 2428 2432 /* Disables pipelining of read flushes past the SF-WIZ interface. 2429 2433 * Required on all Ironlake steppings according to the B-Spec, but the 2430 2434 * particular danger of not doing so is not specified. 2431 2435 */ 2432 2436 # define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) 2433 2437 #define _3D_CHICKEN3 _MMIO(0x2090) 2438 + #define _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX (1 << 12) 2434 2439 #define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10) 2435 2440 #define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5) 2436 2441 #define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
+20
drivers/gpu/drm/i915/intel_crt.c
··· 304 304 int max_dotclk = dev_priv->max_dotclk_freq; 305 305 int max_clock; 306 306 307 + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 308 + return MODE_NO_DBLESCAN; 309 + 307 310 if (mode->clock < 25000) 308 311 return MODE_CLOCK_LOW; 309 312 ··· 340 337 struct intel_crtc_state *pipe_config, 341 338 struct drm_connector_state *conn_state) 342 339 { 340 + struct drm_display_mode *adjusted_mode = 341 + &pipe_config->base.adjusted_mode; 342 + 343 + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 344 + return false; 345 + 343 346 return true; 344 347 } 345 348 ··· 353 344 struct intel_crtc_state *pipe_config, 354 345 struct drm_connector_state *conn_state) 355 346 { 347 + struct drm_display_mode *adjusted_mode = 348 + &pipe_config->base.adjusted_mode; 349 + 350 + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 351 + return false; 352 + 356 353 pipe_config->has_pch_encoder = true; 357 354 358 355 return true; ··· 369 354 struct drm_connector_state *conn_state) 370 355 { 371 356 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); 357 + struct drm_display_mode *adjusted_mode = 358 + &pipe_config->base.adjusted_mode; 359 + 360 + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 361 + return false; 372 362 373 363 pipe_config->has_pch_encoder = true; 374 364
+13 -3
drivers/gpu/drm/i915/intel_display.c
··· 14469 14469 intel_mode_valid(struct drm_device *dev, 14470 14470 const struct drm_display_mode *mode) 14471 14471 { 14472 + /* 14473 + * Can't reject DBLSCAN here because Xorg ddxen can add piles 14474 + * of DBLSCAN modes to the output's mode list when they detect 14475 + * the scaling mode property on the connector. And they don't 14476 + * ask the kernel to validate those modes in any way until 14477 + * modeset time at which point the client gets a protocol error. 14478 + * So in order to not upset those clients we silently ignore the 14479 + * DBLSCAN flag on such connectors. For other connectors we will 14480 + * reject modes with the DBLSCAN flag in encoder->compute_config(). 14481 + * And we always reject DBLSCAN modes in connector->mode_valid() 14482 + * as we never want such modes on the connector's mode list. 14483 + */ 14484 + 14472 14485 if (mode->vscan > 1) 14473 14486 return MODE_NO_VSCAN; 14474 - 14475 - if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 14476 - return MODE_NO_DBLESCAN; 14477 14487 14478 14488 if (mode->flags & DRM_MODE_FLAG_HSKEW) 14479 14489 return MODE_H_ILLEGAL;
+16 -18
drivers/gpu/drm/i915/intel_dp.c
··· 420 420 int max_rate, mode_rate, max_lanes, max_link_clock; 421 421 int max_dotclk; 422 422 423 + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 424 + return MODE_NO_DBLESCAN; 425 + 423 426 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp); 424 427 425 428 if (intel_dp_is_edp(intel_dp) && fixed_mode) { ··· 1865 1862 conn_state->scaling_mode); 1866 1863 } 1867 1864 1868 - if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && 1865 + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 1866 + return false; 1867 + 1868 + if (HAS_GMCH_DISPLAY(dev_priv) && 1869 1869 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 1870 1870 return false; 1871 1871 ··· 2790 2784 const struct drm_connector_state *old_conn_state) 2791 2785 { 2792 2786 intel_disable_dp(encoder, old_crtc_state, old_conn_state); 2793 - 2794 - /* disable the port before the pipe on g4x */ 2795 - intel_dp_link_down(encoder, old_crtc_state); 2796 - } 2797 - 2798 - static void ilk_disable_dp(struct intel_encoder *encoder, 2799 - const struct intel_crtc_state *old_crtc_state, 2800 - const struct drm_connector_state *old_conn_state) 2801 - { 2802 - intel_disable_dp(encoder, old_crtc_state, old_conn_state); 2803 2787 } 2804 2788 2805 2789 static void vlv_disable_dp(struct intel_encoder *encoder, ··· 2803 2807 intel_disable_dp(encoder, old_crtc_state, old_conn_state); 2804 2808 } 2805 2809 2806 - static void ilk_post_disable_dp(struct intel_encoder *encoder, 2810 + static void g4x_post_disable_dp(struct intel_encoder *encoder, 2807 2811 const struct intel_crtc_state *old_crtc_state, 2808 2812 const struct drm_connector_state *old_conn_state) 2809 2813 { 2810 2814 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2811 2815 enum port port = encoder->port; 2812 2816 2817 + /* 2818 + * Bspec does not list a specific disable sequence for g4x DP. 2819 + * Follow the ilk+ sequence (disable pipe before the port) for 2820 + * g4x DP as it does not suffer from underruns like the normal 2821 + * g4x modeset sequence (disable pipe after the port). 2822 + */ 2813 2823 intel_dp_link_down(encoder, old_crtc_state); 2814 2824 2815 2825 /* Only ilk+ has port A */ ··· 6339 6337 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 6340 6338 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 6341 6339 6342 - if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) 6340 + if (!HAS_GMCH_DISPLAY(dev_priv)) 6343 6341 connector->interlace_allowed = true; 6344 6342 connector->doublescan_allowed = 0; 6345 6343 ··· 6438 6436 intel_encoder->enable = vlv_enable_dp; 6439 6437 intel_encoder->disable = vlv_disable_dp; 6440 6438 intel_encoder->post_disable = vlv_post_disable_dp; 6441 - } else if (INTEL_GEN(dev_priv) >= 5) { 6442 - intel_encoder->pre_enable = g4x_pre_enable_dp; 6443 - intel_encoder->enable = g4x_enable_dp; 6444 - intel_encoder->disable = ilk_disable_dp; 6445 - intel_encoder->post_disable = ilk_post_disable_dp; 6446 6439 } else { 6447 6440 intel_encoder->pre_enable = g4x_pre_enable_dp; 6448 6441 intel_encoder->enable = g4x_enable_dp; 6449 6442 intel_encoder->disable = g4x_disable_dp; 6443 + intel_encoder->post_disable = g4x_post_disable_dp; 6450 6444 } 6451 6445 6452 6446 intel_dig_port->dp.output_reg = output_reg;
+6
drivers/gpu/drm/i915/intel_dp_mst.c
··· 48 48 bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc, 49 49 DP_DPCD_QUIRK_LIMITED_M_N); 50 50 51 + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 52 + return false; 53 + 51 54 pipe_config->has_pch_encoder = false; 52 55 bpp = 24; 53 56 if (intel_dp->compliance.test_data.bpc) { ··· 368 365 369 366 if (!intel_dp) 370 367 return MODE_ERROR; 368 + 369 + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 370 + return MODE_NO_DBLESCAN; 371 371 372 372 max_link_clock = intel_dp_max_link_rate(intel_dp); 373 373 max_lanes = intel_dp_max_lane_count(intel_dp);
+6
drivers/gpu/drm/i915/intel_dsi.c
··· 326 326 conn_state->scaling_mode); 327 327 } 328 328 329 + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 330 + return false; 331 + 329 332 /* DSI uses short packets for sync events, so clear mode flags for DSI */ 330 333 adjusted_mode->flags = 0; 331 334 ··· 1268 1265 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; 1269 1266 1270 1267 DRM_DEBUG_KMS("\n"); 1268 + 1269 + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 1270 + return MODE_NO_DBLESCAN; 1271 1271 1272 1272 if (fixed_mode) { 1273 1273 if (mode->hdisplay > fixed_mode->hdisplay)
+6
drivers/gpu/drm/i915/intel_dvo.c
··· 219 219 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; 220 220 int target_clock = mode->clock; 221 221 222 + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 223 + return MODE_NO_DBLESCAN; 224 + 222 225 /* XXX: Validate clock range */ 223 226 224 227 if (fixed_mode) { ··· 256 253 */ 257 254 if (fixed_mode) 258 255 intel_fixed_panel_mode(fixed_mode, adjusted_mode); 256 + 257 + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 258 + return false; 259 259 260 260 return true; 261 261 }
+6
drivers/gpu/drm/i915/intel_hdmi.c
··· 1557 1557 bool force_dvi = 1558 1558 READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI; 1559 1559 1560 + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 1561 + return MODE_NO_DBLESCAN; 1562 + 1560 1563 clock = mode->clock; 1561 1564 1562 1565 if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING) ··· 1679 1676 int clock_12bpc = clock_8bpc * 3 / 2; 1680 1677 int desired_bpp; 1681 1678 bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI; 1679 + 1680 + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 1681 + return false; 1682 1682 1683 1683 pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink; 1684 1684
+13 -5
drivers/gpu/drm/i915/intel_lrc.c
··· 1545 1545 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */ 1546 1546 batch = gen8_emit_flush_coherentl3_wa(engine, batch); 1547 1547 1548 + *batch++ = MI_LOAD_REGISTER_IMM(3); 1549 + 1548 1550 /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */ 1549 - *batch++ = MI_LOAD_REGISTER_IMM(1); 1550 1551 *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2); 1551 1552 *batch++ = _MASKED_BIT_DISABLE( 1552 1553 GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE); 1554 + 1555 + /* BSpec: 11391 */ 1556 + *batch++ = i915_mmio_reg_offset(FF_SLICE_CHICKEN); 1557 + *batch++ = _MASKED_BIT_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX); 1558 + 1559 + /* BSpec: 11299 */ 1560 + *batch++ = i915_mmio_reg_offset(_3D_CHICKEN3); 1561 + *batch++ = _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX); 1562 + 1553 1563 *batch++ = MI_NOOP; 1554 1564 1555 1565 /* WaClearSlmSpaceAtContextSwitch:kbl */ ··· 2651 2641 context_size += LRC_HEADER_PAGES * PAGE_SIZE; 2652 2642 2653 2643 ctx_obj = i915_gem_object_create(ctx->i915, context_size); 2654 - if (IS_ERR(ctx_obj)) { 2655 - ret = PTR_ERR(ctx_obj); 2656 - goto error_deref_obj; 2657 - } 2644 + if (IS_ERR(ctx_obj)) 2645 + return PTR_ERR(ctx_obj); 2658 2646 2659 2647 vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL); 2660 2648 if (IS_ERR(vma)) {
+5
drivers/gpu/drm/i915/intel_lvds.c
··· 380 380 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; 381 381 int max_pixclk = to_i915(connector->dev)->max_dotclk_freq; 382 382 383 + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 384 + return MODE_NO_DBLESCAN; 383 385 if (mode->hdisplay > fixed_mode->hdisplay) 384 386 return MODE_PANEL; 385 387 if (mode->vdisplay > fixed_mode->vdisplay) ··· 430 428 */ 431 429 intel_fixed_panel_mode(intel_connector->panel.fixed_mode, 432 430 adjusted_mode); 431 + 432 + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 433 + return false; 433 434 434 435 if (HAS_PCH_SPLIT(dev_priv)) { 435 436 pipe_config->has_pch_encoder = true;
+6
drivers/gpu/drm/i915/intel_sdvo.c
··· 1160 1160 adjusted_mode); 1161 1161 } 1162 1162 1163 + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 1164 + return false; 1165 + 1163 1166 /* 1164 1167 * Make the CRTC code factor in the SDVO pixel multiplier. The 1165 1168 * SDVO device will factor out the multiplier during mode_set. ··· 1623 1620 { 1624 1621 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); 1625 1622 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; 1623 + 1624 + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 1625 + return MODE_NO_DBLESCAN; 1626 1626 1627 1627 if (intel_sdvo->pixel_clock_min > mode->clock) 1628 1628 return MODE_CLOCK_LOW;
+10 -2
drivers/gpu/drm/i915/intel_tv.c
··· 850 850 const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state); 851 851 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; 852 852 853 + if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 854 + return MODE_NO_DBLESCAN; 855 + 853 856 if (mode->clock > max_dotclk) 854 857 return MODE_CLOCK_HIGH; 855 858 ··· 880 877 struct drm_connector_state *conn_state) 881 878 { 882 879 const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state); 880 + struct drm_display_mode *adjusted_mode = 881 + &pipe_config->base.adjusted_mode; 883 882 884 883 if (!tv_mode) 885 884 return false; 886 885 887 - pipe_config->base.adjusted_mode.crtc_clock = tv_mode->clock; 886 + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) 887 + return false; 888 + 889 + adjusted_mode->crtc_clock = tv_mode->clock; 888 890 DRM_DEBUG_KMS("forcing bpc to 8 for TV\n"); 889 891 pipe_config->pipe_bpp = 8*3; 890 892 891 893 /* TV has it's own notion of sync and other mode flags, so clear them. */ 892 - pipe_config->base.adjusted_mode.flags = 0; 894 + adjusted_mode->flags = 0; 893 895 894 896 /* 895 897 * FIXME: We don't check whether the input mode is actually what we want
+1 -1
drivers/gpu/drm/nouveau/dispnv50/curs507a.c
··· 132 132 133 133 nvif_object_map(&wndw->wimm.base.user, NULL, 0); 134 134 wndw->immd = func; 135 - wndw->ctxdma.parent = &disp->core->chan.base.user; 135 + wndw->ctxdma.parent = NULL; 136 136 return 0; 137 137 } 138 138
+8 -5
drivers/gpu/drm/nouveau/dispnv50/wndw.c
··· 444 444 if (ret) 445 445 return ret; 446 446 447 - ctxdma = nv50_wndw_ctxdma_new(wndw, fb); 448 - if (IS_ERR(ctxdma)) { 449 - nouveau_bo_unpin(fb->nvbo); 450 - return PTR_ERR(ctxdma); 447 + if (wndw->ctxdma.parent) { 448 + ctxdma = nv50_wndw_ctxdma_new(wndw, fb); 449 + if (IS_ERR(ctxdma)) { 450 + nouveau_bo_unpin(fb->nvbo); 451 + return PTR_ERR(ctxdma); 452 + } 453 + 454 + asyw->image.handle[0] = ctxdma->object.handle; 451 455 } 452 456 453 457 asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv); 454 - asyw->image.handle[0] = ctxdma->object.handle; 455 458 asyw->image.offset[0] = fb->nvbo->bo.offset; 456 459 457 460 if (wndw->func->prepare) {
+5 -2
drivers/gpu/drm/qxl/qxl_display.c
··· 623 623 struct qxl_cursor_cmd *cmd; 624 624 struct qxl_cursor *cursor; 625 625 struct drm_gem_object *obj; 626 - struct qxl_bo *cursor_bo = NULL, *user_bo = NULL; 626 + struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL; 627 627 int ret; 628 628 void *user_ptr; 629 629 int size = 64*64*4; ··· 677 677 cursor_bo, 0); 678 678 cmd->type = QXL_CURSOR_SET; 679 679 680 - qxl_bo_unref(&qcrtc->cursor_bo); 680 + old_cursor_bo = qcrtc->cursor_bo; 681 681 qcrtc->cursor_bo = cursor_bo; 682 682 cursor_bo = NULL; 683 683 } else { ··· 696 696 qxl_release_unmap(qdev, release, &cmd->release_info); 697 697 qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); 698 698 qxl_release_fence_buffer_objects(release); 699 + 700 + if (old_cursor_bo) 701 + qxl_bo_unref(&old_cursor_bo); 699 702 700 703 qxl_bo_unref(&cursor_bo); 701 704
-25
drivers/gpu/drm/sun4i/sun4i_tcon.c
··· 17 17 #include <drm/drm_encoder.h> 18 18 #include <drm/drm_modes.h> 19 19 #include <drm/drm_of.h> 20 - #include <drm/drm_panel.h> 21 20 22 21 #include <uapi/drm/drm_mode.h> 23 22 ··· 417 418 static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon, 418 419 const struct drm_display_mode *mode) 419 420 { 420 - struct drm_panel *panel = tcon->panel; 421 - struct drm_connector *connector = panel->connector; 422 - struct drm_display_info display_info = connector->display_info; 423 421 unsigned int bp, hsync, vsync; 424 422 u8 clk_delay; 425 423 u32 val = 0; ··· 473 477 474 478 if (mode->flags & DRM_MODE_FLAG_PVSYNC) 475 479 val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE; 476 - 477 - /* 478 - * On A20 and similar SoCs, the only way to achieve Positive Edge 479 - * (Rising Edge), is setting dclk clock phase to 2/3(240°). 480 - * By default TCON works in Negative Edge(Falling Edge), 481 - * this is why phase is set to 0 in that case. 482 - * Unfortunately there's no way to logically invert dclk through 483 - * IO_POL register. 484 - * The only acceptable way to work, triple checked with scope, 485 - * is using clock phase set to 0° for Negative Edge and set to 240° 486 - * for Positive Edge. 487 - * On A33 and similar SoCs there would be a 90° phase option, 488 - * but it divides also dclk by 2. 489 - * Following code is a way to avoid quirks all around TCON 490 - * and DOTCLOCK drivers. 491 - */ 492 - if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE) 493 - clk_set_phase(tcon->dclk, 240); 494 - 495 - if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE) 496 - clk_set_phase(tcon->dclk, 0); 497 480 498 481 regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG, 499 482 SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE,