Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-next-fixes-2016-05-25' of git://anongit.freedesktop.org/drm-intel into drm-next

I see the main drm pull got merged, here's the first batch of fixes for
4.7 already. Fixes all around, a large portion cc: stable stuff.

[airlied: the DP++ stuff is a regression fix].
* tag 'drm-intel-next-fixes-2016-05-25' of git://anongit.freedesktop.org/drm-intel:
drm/i915: Stop automatically retiring requests after a GPU hang
drm/i915: Unify intel_ring_begin()
drm/i915: Ignore stale wm register values on resume on ilk-bdw (v2)
drm/i915/psr: Try to program link training times correctly
drm/i915/bxt: Adjusting the error in horizontal timings retrieval
drm/i915: Don't leave old junk in ilk active watermarks on readout
drm/i915: s/DPPL/DPLL/ for SKL DPLLs
drm/i915: Fix gen8 semaphores id for legacy mode
drm/i915: Set crtc_state->lane_count for HDMI
drm/i915/BXT: Retrieving the horizontal timing for DSI
drm/i915: Protect gen7 irq_seqno_barrier with uncore lock
drm/i915: Re-enable GGTT earlier during resume on pre-gen6 platforms
drm/i915: Determine DP++ type 1 DVI adaptor presence based on VBT
drm/i915: Enable/disable TMDS output buffers in DP++ adaptor as needed
drm/i915: Respect DP++ adaptor TMDS clock limit
drm: Add helper for DP++ adaptors

+951 -311
+6
Documentation/DocBook/gpu.tmpl
··· 1628 1628 !Edrivers/gpu/drm/drm_dp_helper.c 1629 1629 </sect2> 1630 1630 <sect2> 1631 + <title>Display Port Dual Mode Adaptor Helper Functions Reference</title> 1632 + !Pdrivers/gpu/drm/drm_dp_dual_mode_helper.c dp dual mode helpers 1633 + !Iinclude/drm/drm_dp_dual_mode_helper.h 1634 + !Edrivers/gpu/drm/drm_dp_dual_mode_helper.c 1635 + </sect2> 1636 + <sect2> 1631 1637 <title>Display Port MST Helper Functions Reference</title> 1632 1638 !Pdrivers/gpu/drm/drm_dp_mst_topology.c dp mst helper 1633 1639 !Iinclude/drm/drm_dp_mst_helper.h
+1 -1
drivers/gpu/drm/Makefile
··· 23 23 24 24 drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_probe_helper.o \ 25 25 drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ 26 - drm_kms_helper_common.o 26 + drm_kms_helper_common.o drm_dp_dual_mode_helper.o 27 27 28 28 drm_kms_helper-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o 29 29 drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
+366
drivers/gpu/drm/drm_dp_dual_mode_helper.c
··· 1 + /* 2 + * Copyright © 2016 Intel Corporation 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + 23 + #include <linux/errno.h> 24 + #include <linux/export.h> 25 + #include <linux/i2c.h> 26 + #include <linux/slab.h> 27 + #include <linux/string.h> 28 + #include <drm/drm_dp_dual_mode_helper.h> 29 + #include <drm/drmP.h> 30 + 31 + /** 32 + * DOC: dp dual mode helpers 33 + * 34 + * Helper functions to deal with DP dual mode (aka. DP++) adaptors. 35 + * 36 + * Type 1: 37 + * Adaptor registers (if any) and the sink DDC bus may be accessed via I2C. 38 + * 39 + * Type 2: 40 + * Adaptor registers and sink DDC bus can be accessed either via I2C or 41 + * I2C-over-AUX. Source devices may choose to implement either of these 42 + * access methods. 43 + */ 44 + 45 + #define DP_DUAL_MODE_SLAVE_ADDRESS 0x40 46 + 47 + /** 48 + * drm_dp_dual_mode_read - Read from the DP dual mode adaptor register(s) 49 + * @adapter: I2C adapter for the DDC bus 50 + * @offset: register offset 51 + * @buffer: buffer for return data 52 + * @size: sizo of the buffer 53 + * 54 + * Reads @size bytes from the DP dual mode adaptor registers 55 + * starting at @offset. 56 + * 57 + * Returns: 58 + * 0 on success, negative error code on failure 59 + */ 60 + ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter, 61 + u8 offset, void *buffer, size_t size) 62 + { 63 + struct i2c_msg msgs[] = { 64 + { 65 + .addr = DP_DUAL_MODE_SLAVE_ADDRESS, 66 + .flags = 0, 67 + .len = 1, 68 + .buf = &offset, 69 + }, 70 + { 71 + .addr = DP_DUAL_MODE_SLAVE_ADDRESS, 72 + .flags = I2C_M_RD, 73 + .len = size, 74 + .buf = buffer, 75 + }, 76 + }; 77 + int ret; 78 + 79 + ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs)); 80 + if (ret < 0) 81 + return ret; 82 + if (ret != ARRAY_SIZE(msgs)) 83 + return -EPROTO; 84 + 85 + return 0; 86 + } 87 + EXPORT_SYMBOL(drm_dp_dual_mode_read); 88 + 89 + /** 90 + * drm_dp_dual_mode_write - Write to the DP dual mode adaptor register(s) 91 + * @adapter: I2C adapter for the DDC bus 92 + * @offset: register offset 93 + * @buffer: buffer for write data 94 + * @size: sizo of the buffer 95 + * 96 + * Writes @size bytes to the DP dual mode adaptor registers 97 + * starting at @offset. 98 + * 99 + * Returns: 100 + * 0 on success, negative error code on failure 101 + */ 102 + ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter, 103 + u8 offset, const void *buffer, size_t size) 104 + { 105 + struct i2c_msg msg = { 106 + .addr = DP_DUAL_MODE_SLAVE_ADDRESS, 107 + .flags = 0, 108 + .len = 1 + size, 109 + .buf = NULL, 110 + }; 111 + void *data; 112 + int ret; 113 + 114 + data = kmalloc(msg.len, GFP_TEMPORARY); 115 + if (!data) 116 + return -ENOMEM; 117 + 118 + msg.buf = data; 119 + 120 + memcpy(data, &offset, 1); 121 + memcpy(data + 1, buffer, size); 122 + 123 + ret = i2c_transfer(adapter, &msg, 1); 124 + 125 + kfree(data); 126 + 127 + if (ret < 0) 128 + return ret; 129 + if (ret != 1) 130 + return -EPROTO; 131 + 132 + return 0; 133 + } 134 + EXPORT_SYMBOL(drm_dp_dual_mode_write); 135 + 136 + static bool is_hdmi_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN]) 137 + { 138 + static const char dp_dual_mode_hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] = 139 + "DP-HDMI ADAPTOR\x04"; 140 + 141 + return memcmp(hdmi_id, dp_dual_mode_hdmi_id, 142 + sizeof(dp_dual_mode_hdmi_id)) == 0; 143 + } 144 + 145 + static bool is_type2_adaptor(uint8_t adaptor_id) 146 + { 147 + return adaptor_id == (DP_DUAL_MODE_TYPE_TYPE2 | 148 + DP_DUAL_MODE_REV_TYPE2); 149 + } 150 + 151 + /** 152 + * drm_dp_dual_mode_detect - Identify the DP dual mode adaptor 153 + * @adapter: I2C adapter for the DDC bus 154 + * 155 + * Attempt to identify the type of the DP dual mode adaptor used. 156 + * 157 + * Note that when the answer is @DRM_DP_DUAL_MODE_UNKNOWN it's not 158 + * certain whether we're dealing with a native HDMI port or 159 + * a type 1 DVI dual mode adaptor. The driver will have to use 160 + * some other hardware/driver specific mechanism to make that 161 + * distinction. 162 + * 163 + * Returns: 164 + * The type of the DP dual mode adaptor used 165 + */ 166 + enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter) 167 + { 168 + char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] = {}; 169 + uint8_t adaptor_id = 0x00; 170 + ssize_t ret; 171 + 172 + /* 173 + * Let's see if the adaptor is there the by reading the 174 + * HDMI ID registers. 175 + * 176 + * Note that type 1 DVI adaptors are not required to implemnt 177 + * any registers, and that presents a problem for detection. 178 + * If the i2c transfer is nacked, we may or may not be dealing 179 + * with a type 1 DVI adaptor. Some other mechanism of detecting 180 + * the presence of the adaptor is required. One way would be 181 + * to check the state of the CONFIG1 pin, Another method would 182 + * simply require the driver to know whether the port is a DP++ 183 + * port or a native HDMI port. Both of these methods are entirely 184 + * hardware/driver specific so we can't deal with them here. 185 + */ 186 + ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID, 187 + hdmi_id, sizeof(hdmi_id)); 188 + if (ret) 189 + return DRM_DP_DUAL_MODE_UNKNOWN; 190 + 191 + /* 192 + * Sigh. Some (maybe all?) type 1 adaptors are broken and ack 193 + * the offset but ignore it, and instead they just always return 194 + * data from the start of the HDMI ID buffer. So for a broken 195 + * type 1 HDMI adaptor a single byte read will always give us 196 + * 0x44, and for a type 1 DVI adaptor it should give 0x00 197 + * (assuming it implements any registers). Fortunately neither 198 + * of those values will match the type 2 signature of the 199 + * DP_DUAL_MODE_ADAPTOR_ID register so we can proceed with 200 + * the type 2 adaptor detection safely even in the presence 201 + * of broken type 1 adaptors. 202 + */ 203 + ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID, 204 + &adaptor_id, sizeof(adaptor_id)); 205 + if (ret == 0) { 206 + if (is_type2_adaptor(adaptor_id)) { 207 + if (is_hdmi_adaptor(hdmi_id)) 208 + return DRM_DP_DUAL_MODE_TYPE2_HDMI; 209 + else 210 + return DRM_DP_DUAL_MODE_TYPE2_DVI; 211 + } 212 + } 213 + 214 + if (is_hdmi_adaptor(hdmi_id)) 215 + return DRM_DP_DUAL_MODE_TYPE1_HDMI; 216 + else 217 + return DRM_DP_DUAL_MODE_TYPE1_DVI; 218 + } 219 + EXPORT_SYMBOL(drm_dp_dual_mode_detect); 220 + 221 + /** 222 + * drm_dp_dual_mode_max_tmds_clock - Max TMDS clock for DP dual mode adaptor 223 + * @type: DP dual mode adaptor type 224 + * @adapter: I2C adapter for the DDC bus 225 + * 226 + * Determine the max TMDS clock the adaptor supports based on the 227 + * type of the dual mode adaptor and the DP_DUAL_MODE_MAX_TMDS_CLOCK 228 + * register (on type2 adaptors). As some type 1 adaptors have 229 + * problems with registers (see comments in drm_dp_dual_mode_detect()) 230 + * we don't read the register on those, instead we simply assume 231 + * a 165 MHz limit based on the specification. 232 + * 233 + * Returns: 234 + * Maximum supported TMDS clock rate for the DP dual mode adaptor in kHz. 235 + */ 236 + int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type, 237 + struct i2c_adapter *adapter) 238 + { 239 + uint8_t max_tmds_clock; 240 + ssize_t ret; 241 + 242 + /* native HDMI so no limit */ 243 + if (type == DRM_DP_DUAL_MODE_NONE) 244 + return 0; 245 + 246 + /* 247 + * Type 1 adaptors are limited to 165MHz 248 + * Type 2 adaptors can tells us their limit 249 + */ 250 + if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) 251 + return 165000; 252 + 253 + ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_MAX_TMDS_CLOCK, 254 + &max_tmds_clock, sizeof(max_tmds_clock)); 255 + if (ret || max_tmds_clock == 0x00 || max_tmds_clock == 0xff) { 256 + DRM_DEBUG_KMS("Failed to query max TMDS clock\n"); 257 + return 165000; 258 + } 259 + 260 + return max_tmds_clock * 5000 / 2; 261 + } 262 + EXPORT_SYMBOL(drm_dp_dual_mode_max_tmds_clock); 263 + 264 + /** 265 + * drm_dp_dual_mode_get_tmds_output - Get the state of the TMDS output buffers in the DP dual mode adaptor 266 + * @type: DP dual mode adaptor type 267 + * @adapter: I2C adapter for the DDC bus 268 + * @enabled: current state of the TMDS output buffers 269 + * 270 + * Get the state of the TMDS output buffers in the adaptor. For 271 + * type2 adaptors this is queried from the DP_DUAL_MODE_TMDS_OEN 272 + * register. As some type 1 adaptors have problems with registers 273 + * (see comments in drm_dp_dual_mode_detect()) we don't read the 274 + * register on those, instead we simply assume that the buffers 275 + * are always enabled. 276 + * 277 + * Returns: 278 + * 0 on success, negative error code on failure 279 + */ 280 + int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type, 281 + struct i2c_adapter *adapter, 282 + bool *enabled) 283 + { 284 + uint8_t tmds_oen; 285 + ssize_t ret; 286 + 287 + if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) { 288 + *enabled = true; 289 + return 0; 290 + } 291 + 292 + ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN, 293 + &tmds_oen, sizeof(tmds_oen)); 294 + if (ret) { 295 + DRM_DEBUG_KMS("Failed to query state of TMDS output buffers\n"); 296 + return ret; 297 + } 298 + 299 + *enabled = !(tmds_oen & DP_DUAL_MODE_TMDS_DISABLE); 300 + 301 + return 0; 302 + } 303 + EXPORT_SYMBOL(drm_dp_dual_mode_get_tmds_output); 304 + 305 + /** 306 + * drm_dp_dual_mode_set_tmds_output - Enable/disable TMDS output buffers in the DP dual mode adaptor 307 + * @type: DP dual mode adaptor type 308 + * @adapter: I2C adapter for the DDC bus 309 + * @enable: enable (as opposed to disable) the TMDS output buffers 310 + * 311 + * Set the state of the TMDS output buffers in the adaptor. For 312 + * type2 this is set via the DP_DUAL_MODE_TMDS_OEN register. As 313 + * some type 1 adaptors have problems with registers (see comments 314 + * in drm_dp_dual_mode_detect()) we avoid touching the register, 315 + * making this function a no-op on type 1 adaptors. 316 + * 317 + * Returns: 318 + * 0 on success, negative error code on failure 319 + */ 320 + int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type, 321 + struct i2c_adapter *adapter, bool enable) 322 + { 323 + uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE; 324 + ssize_t ret; 325 + 326 + if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) 327 + return 0; 328 + 329 + ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN, 330 + &tmds_oen, sizeof(tmds_oen)); 331 + if (ret) { 332 + DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n", 333 + enable ? "enable" : "disable"); 334 + return ret; 335 + } 336 + 337 + return 0; 338 + } 339 + EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output); 340 + 341 + /** 342 + * drm_dp_get_dual_mode_type_name - Get the name of the DP dual mode adaptor type as a string 343 + * @type: DP dual mode adaptor type 344 + * 345 + * Returns: 346 + * String representation of the DP dual mode adaptor type 347 + */ 348 + const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type) 349 + { 350 + switch (type) { 351 + case DRM_DP_DUAL_MODE_NONE: 352 + return "none"; 353 + case DRM_DP_DUAL_MODE_TYPE1_DVI: 354 + return "type 1 DVI"; 355 + case DRM_DP_DUAL_MODE_TYPE1_HDMI: 356 + return "type 1 HDMI"; 357 + case DRM_DP_DUAL_MODE_TYPE2_DVI: 358 + return "type 2 DVI"; 359 + case DRM_DP_DUAL_MODE_TYPE2_HDMI: 360 + return "type 2 HDMI"; 361 + default: 362 + WARN_ON(type != DRM_DP_DUAL_MODE_UNKNOWN); 363 + return "unknown"; 364 + } 365 + } 366 + EXPORT_SYMBOL(drm_dp_get_dual_mode_type_name);
+6
drivers/gpu/drm/i915/i915_dma.c
··· 1183 1183 if (ret) 1184 1184 return ret; 1185 1185 1186 + ret = i915_ggtt_enable_hw(dev); 1187 + if (ret) { 1188 + DRM_ERROR("failed to enable GGTT\n"); 1189 + goto out_ggtt; 1190 + } 1191 + 1186 1192 /* WARNING: Apparently we must kick fbdev drivers before vgacon, 1187 1193 * otherwise the vga fbdev driver falls over. */ 1188 1194 ret = i915_kick_out_firmware_fb(dev_priv);
+5
drivers/gpu/drm/i915/i915_drv.c
··· 734 734 static int i915_drm_resume(struct drm_device *dev) 735 735 { 736 736 struct drm_i915_private *dev_priv = dev->dev_private; 737 + int ret; 737 738 738 739 disable_rpm_wakeref_asserts(dev_priv); 740 + 741 + ret = i915_ggtt_enable_hw(dev); 742 + if (ret) 743 + DRM_ERROR("failed to re-enable GGTT\n"); 739 744 740 745 intel_csr_ucode_resume(dev_priv); 741 746
+1
drivers/gpu/drm/i915/i915_drv.h
··· 3482 3482 bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); 3483 3483 bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin); 3484 3484 bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port); 3485 + bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port); 3485 3486 bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port); 3486 3487 bool intel_bios_is_port_hpd_inverted(struct drm_i915_private *dev_priv, 3487 3488 enum port port);
+6 -5
drivers/gpu/drm/i915/i915_gem.c
··· 1456 1456 if (ret) 1457 1457 return ret; 1458 1458 1459 - __i915_gem_request_retire__upto(req); 1459 + /* If the GPU hung, we want to keep the requests to find the guilty. */ 1460 + if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error)) 1461 + __i915_gem_request_retire__upto(req); 1462 + 1460 1463 return 0; 1461 1464 } 1462 1465 ··· 1516 1513 else if (obj->last_write_req == req) 1517 1514 i915_gem_object_retire__write(obj); 1518 1515 1519 - __i915_gem_request_retire__upto(req); 1516 + if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error)) 1517 + __i915_gem_request_retire__upto(req); 1520 1518 } 1521 1519 1522 1520 /* A nonblocking variant of the above wait. This is a highly dangerous routine ··· 4863 4859 struct drm_i915_private *dev_priv = dev->dev_private; 4864 4860 struct intel_engine_cs *engine; 4865 4861 int ret, j; 4866 - 4867 - if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) 4868 - return -EIO; 4869 4862 4870 4863 /* Double layer security blanket, see i915_gem_init() */ 4871 4864 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+8
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 3236 3236 return ret; 3237 3237 } 3238 3238 3239 + int i915_ggtt_enable_hw(struct drm_device *dev) 3240 + { 3241 + if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) 3242 + return -EIO; 3243 + 3244 + return 0; 3245 + } 3246 + 3239 3247 void i915_gem_restore_gtt_mappings(struct drm_device *dev) 3240 3248 { 3241 3249 struct drm_i915_private *dev_priv = to_i915(dev);
+1
drivers/gpu/drm/i915/i915_gem_gtt.h
··· 514 514 } 515 515 516 516 int i915_ggtt_init_hw(struct drm_device *dev); 517 + int i915_ggtt_enable_hw(struct drm_device *dev); 517 518 void i915_gem_init_ggtt(struct drm_device *dev); 518 519 void i915_ggtt_cleanup_hw(struct drm_device *dev); 519 520
+36
drivers/gpu/drm/i915/intel_bios.c
··· 1578 1578 return false; 1579 1579 } 1580 1580 1581 + bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port) 1582 + { 1583 + static const struct { 1584 + u16 dp, hdmi; 1585 + } port_mapping[] = { 1586 + /* 1587 + * Buggy VBTs may declare DP ports as having 1588 + * HDMI type dvo_port :( So let's check both. 1589 + */ 1590 + [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, }, 1591 + [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, }, 1592 + [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, 1593 + [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, 1594 + }; 1595 + int i; 1596 + 1597 + if (port == PORT_A || port >= ARRAY_SIZE(port_mapping)) 1598 + return false; 1599 + 1600 + if (!dev_priv->vbt.child_dev_num) 1601 + return false; 1602 + 1603 + for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { 1604 + const union child_device_config *p_child = 1605 + &dev_priv->vbt.child_dev[i]; 1606 + 1607 + if ((p_child->common.dvo_port == port_mapping[port].dp || 1608 + p_child->common.dvo_port == port_mapping[port].hdmi) && 1609 + (p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) == 1610 + (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS)) 1611 + return true; 1612 + } 1613 + 1614 + return false; 1615 + } 1616 + 1581 1617 /** 1582 1618 * intel_bios_is_dsi_present - is DSI present in VBT 1583 1619 * @dev_priv: i915 device instance
+15 -1
drivers/gpu/drm/i915/intel_ddi.c
··· 1601 1601 enum port port = intel_ddi_get_encoder_port(intel_encoder); 1602 1602 int type = intel_encoder->type; 1603 1603 1604 + if (type == INTEL_OUTPUT_HDMI) { 1605 + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 1606 + 1607 + intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); 1608 + } 1609 + 1604 1610 intel_prepare_ddi_buffer(intel_encoder); 1605 1611 1606 1612 if (type == INTEL_OUTPUT_EDP) { ··· 1673 1667 DPLL_CTRL2_DDI_CLK_OFF(port))); 1674 1668 else if (INTEL_INFO(dev)->gen < 9) 1675 1669 I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE); 1670 + 1671 + if (type == INTEL_OUTPUT_HDMI) { 1672 + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 1673 + 1674 + intel_dp_dual_mode_set_tmds_output(intel_hdmi, false); 1675 + } 1676 1676 } 1677 1677 1678 1678 static void intel_enable_ddi(struct intel_encoder *intel_encoder) ··· 2192 2180 2193 2181 if (intel_hdmi->infoframe_enabled(&encoder->base, pipe_config)) 2194 2182 pipe_config->has_infoframe = true; 2195 - break; 2183 + /* fall through */ 2196 2184 case TRANS_DDI_MODE_SELECT_DVI: 2185 + pipe_config->lane_count = 4; 2186 + break; 2197 2187 case TRANS_DDI_MODE_SELECT_FDI: 2198 2188 break; 2199 2189 case TRANS_DDI_MODE_SELECT_DP_SST:
+6
drivers/gpu/drm/i915/intel_display.c
··· 12005 12005 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); 12006 12006 return ret; 12007 12007 } 12008 + } else if (dev_priv->display.compute_intermediate_wm) { 12009 + if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9) 12010 + pipe_config->wm.intermediate = pipe_config->wm.optimal.ilk; 12008 12011 } 12009 12012 12010 12013 if (INTEL_INFO(dev)->gen >= 9) { ··· 15992 15989 int i; 15993 15990 15994 15991 state->acquire_ctx = &ctx; 15992 + 15993 + /* ignore any reset values/BIOS leftovers in the WM registers */ 15994 + to_intel_atomic_state(state)->skip_intermediate_wm = true; 15995 15995 15996 15996 for_each_crtc_in_state(state, crtc, crtc_state, i) { 15997 15997 /*
+3 -3
drivers/gpu/drm/i915/intel_dpll_mgr.c
··· 1702 1702 1703 1703 static const struct dpll_info skl_plls[] = { 1704 1704 { "DPLL 0", DPLL_ID_SKL_DPLL0, &skl_ddi_dpll0_funcs, INTEL_DPLL_ALWAYS_ON }, 1705 - { "DPPL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs, 0 }, 1706 - { "DPPL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs, 0 }, 1707 - { "DPPL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs, 0 }, 1705 + { "DPLL 1", DPLL_ID_SKL_DPLL1, &skl_ddi_pll_funcs, 0 }, 1706 + { "DPLL 2", DPLL_ID_SKL_DPLL2, &skl_ddi_pll_funcs, 0 }, 1707 + { "DPLL 3", DPLL_ID_SKL_DPLL3, &skl_ddi_pll_funcs, 0 }, 1708 1708 { NULL, -1, NULL, }, 1709 1709 }; 1710 1710
+6
drivers/gpu/drm/i915/intel_drv.h
··· 33 33 #include <drm/drm_crtc.h> 34 34 #include <drm/drm_crtc_helper.h> 35 35 #include <drm/drm_fb_helper.h> 36 + #include <drm/drm_dp_dual_mode_helper.h> 36 37 #include <drm/drm_dp_mst_helper.h> 37 38 #include <drm/drm_rect.h> 38 39 #include <drm/drm_atomic.h> ··· 754 753 struct intel_hdmi { 755 754 i915_reg_t hdmi_reg; 756 755 int ddc_bus; 756 + struct { 757 + enum drm_dp_dual_mode_type type; 758 + int max_tmds_clock; 759 + } dp_dual_mode; 757 760 bool limited_color_range; 758 761 bool color_range_auto; 759 762 bool has_hdmi_sink; ··· 1406 1401 struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder); 1407 1402 bool intel_hdmi_compute_config(struct intel_encoder *encoder, 1408 1403 struct intel_crtc_state *pipe_config); 1404 + void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable); 1409 1405 1410 1406 1411 1407 /* intel_lvds.c */
+125 -16
drivers/gpu/drm/i915/intel_dsi.c
··· 46 46 }, 47 47 }; 48 48 49 + /* return pixels in terms of txbyteclkhs */ 50 + static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count, 51 + u16 burst_mode_ratio) 52 + { 53 + return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio, 54 + 8 * 100), lane_count); 55 + } 56 + 57 + /* return pixels equvalent to txbyteclkhs */ 58 + static u16 pixels_from_txbyteclkhs(u16 clk_hs, int bpp, int lane_count, 59 + u16 burst_mode_ratio) 60 + { 61 + return DIV_ROUND_UP((clk_hs * lane_count * 8 * 100), 62 + (bpp * burst_mode_ratio)); 63 + } 64 + 49 65 enum mipi_dsi_pixel_format pixel_format_from_register_bits(u32 fmt) 50 66 { 51 67 /* It just so happens the VBT matches register contents. */ ··· 796 780 struct drm_i915_private *dev_priv = dev->dev_private; 797 781 struct drm_display_mode *adjusted_mode = 798 782 &pipe_config->base.adjusted_mode; 783 + struct drm_display_mode *adjusted_mode_sw; 784 + struct intel_crtc *intel_crtc; 799 785 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 786 + unsigned int lane_count = intel_dsi->lane_count; 800 787 unsigned int bpp, fmt; 801 788 enum port port; 802 - u16 vfp, vsync, vbp; 789 + u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp; 790 + u16 hfp_sw, hsync_sw, hbp_sw; 791 + u16 crtc_htotal_sw, crtc_hsync_start_sw, crtc_hsync_end_sw, 792 + crtc_hblank_start_sw, crtc_hblank_end_sw; 793 + 794 + intel_crtc = to_intel_crtc(encoder->base.crtc); 795 + adjusted_mode_sw = &intel_crtc->config->base.adjusted_mode; 803 796 804 797 /* 805 798 * Atleast one port is active as encoder->get_config called only if ··· 833 808 adjusted_mode->crtc_vtotal = 834 809 I915_READ(BXT_MIPI_TRANS_VTOTAL(port)); 835 810 811 + hactive = adjusted_mode->crtc_hdisplay; 812 + hfp = I915_READ(MIPI_HFP_COUNT(port)); 813 + 836 814 /* 837 - * TODO: Retrieve hfp, hsync and hbp. Adjust them for dual link and 838 - * calculate hsync_start, hsync_end, htotal and hblank_end 815 + * Meaningful for video mode non-burst sync pulse mode only, 816 + * can be zero for non-burst sync events and burst modes 839 817 */ 818 + hsync = I915_READ(MIPI_HSYNC_PADDING_COUNT(port)); 819 + hbp = I915_READ(MIPI_HBP_COUNT(port)); 820 + 821 + /* harizontal values are in terms of high speed byte clock */ 822 + hfp = pixels_from_txbyteclkhs(hfp, bpp, lane_count, 823 + intel_dsi->burst_mode_ratio); 824 + hsync = pixels_from_txbyteclkhs(hsync, bpp, lane_count, 825 + intel_dsi->burst_mode_ratio); 826 + hbp = pixels_from_txbyteclkhs(hbp, bpp, lane_count, 827 + intel_dsi->burst_mode_ratio); 828 + 829 + if (intel_dsi->dual_link) { 830 + hfp *= 2; 831 + hsync *= 2; 832 + hbp *= 2; 833 + } 840 834 841 835 /* vertical values are in terms of lines */ 842 836 vfp = I915_READ(MIPI_VFP_COUNT(port)); 843 837 vsync = I915_READ(MIPI_VSYNC_PADDING_COUNT(port)); 844 838 vbp = I915_READ(MIPI_VBP_COUNT(port)); 845 839 840 + adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp; 841 + adjusted_mode->crtc_hsync_start = hfp + adjusted_mode->crtc_hdisplay; 842 + adjusted_mode->crtc_hsync_end = hsync + adjusted_mode->crtc_hsync_start; 846 843 adjusted_mode->crtc_hblank_start = adjusted_mode->crtc_hdisplay; 844 + adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_htotal; 847 845 848 - adjusted_mode->crtc_vsync_start = 849 - vfp + adjusted_mode->crtc_vdisplay; 850 - adjusted_mode->crtc_vsync_end = 851 - vsync + adjusted_mode->crtc_vsync_start; 846 + adjusted_mode->crtc_vsync_start = vfp + adjusted_mode->crtc_vdisplay; 847 + adjusted_mode->crtc_vsync_end = vsync + adjusted_mode->crtc_vsync_start; 852 848 adjusted_mode->crtc_vblank_start = adjusted_mode->crtc_vdisplay; 853 849 adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vtotal; 854 - } 855 850 851 + /* 852 + * In BXT DSI there is no regs programmed with few horizontal timings 853 + * in Pixels but txbyteclkhs.. So retrieval process adds some 854 + * ROUND_UP ERRORS in the process of PIXELS<==>txbyteclkhs. 855 + * Actually here for the given adjusted_mode, we are calculating the 856 + * value programmed to the port and then back to the horizontal timing 857 + * param in pixels. This is the expected value, including roundup errors 858 + * And if that is same as retrieved value from port, then 859 + * (HW state) adjusted_mode's horizontal timings are corrected to 860 + * match with SW state to nullify the errors. 861 + */ 862 + /* Calculating the value programmed to the Port register */ 863 + hfp_sw = adjusted_mode_sw->crtc_hsync_start - 864 + adjusted_mode_sw->crtc_hdisplay; 865 + hsync_sw = adjusted_mode_sw->crtc_hsync_end - 866 + adjusted_mode_sw->crtc_hsync_start; 867 + hbp_sw = adjusted_mode_sw->crtc_htotal - 868 + adjusted_mode_sw->crtc_hsync_end; 869 + 870 + if (intel_dsi->dual_link) { 871 + hfp_sw /= 2; 872 + hsync_sw /= 2; 873 + hbp_sw /= 2; 874 + } 875 + 876 + hfp_sw = txbyteclkhs(hfp_sw, bpp, lane_count, 877 + intel_dsi->burst_mode_ratio); 878 + hsync_sw = txbyteclkhs(hsync_sw, bpp, lane_count, 879 + intel_dsi->burst_mode_ratio); 880 + hbp_sw = txbyteclkhs(hbp_sw, bpp, lane_count, 881 + intel_dsi->burst_mode_ratio); 882 + 883 + /* Reverse calculating the adjusted mode parameters from port reg vals*/ 884 + hfp_sw = pixels_from_txbyteclkhs(hfp_sw, bpp, lane_count, 885 + intel_dsi->burst_mode_ratio); 886 + hsync_sw = pixels_from_txbyteclkhs(hsync_sw, bpp, lane_count, 887 + intel_dsi->burst_mode_ratio); 888 + hbp_sw = pixels_from_txbyteclkhs(hbp_sw, bpp, lane_count, 889 + intel_dsi->burst_mode_ratio); 890 + 891 + if (intel_dsi->dual_link) { 892 + hfp_sw *= 2; 893 + hsync_sw *= 2; 894 + hbp_sw *= 2; 895 + } 896 + 897 + crtc_htotal_sw = adjusted_mode_sw->crtc_hdisplay + hfp_sw + 898 + hsync_sw + hbp_sw; 899 + crtc_hsync_start_sw = hfp_sw + adjusted_mode_sw->crtc_hdisplay; 900 + crtc_hsync_end_sw = hsync_sw + crtc_hsync_start_sw; 901 + crtc_hblank_start_sw = adjusted_mode_sw->crtc_hdisplay; 902 + crtc_hblank_end_sw = crtc_htotal_sw; 903 + 904 + if (adjusted_mode->crtc_htotal == crtc_htotal_sw) 905 + adjusted_mode->crtc_htotal = adjusted_mode_sw->crtc_htotal; 906 + 907 + if (adjusted_mode->crtc_hsync_start == crtc_hsync_start_sw) 908 + adjusted_mode->crtc_hsync_start = 909 + adjusted_mode_sw->crtc_hsync_start; 910 + 911 + if (adjusted_mode->crtc_hsync_end == crtc_hsync_end_sw) 912 + adjusted_mode->crtc_hsync_end = 913 + adjusted_mode_sw->crtc_hsync_end; 914 + 915 + if (adjusted_mode->crtc_hblank_start == crtc_hblank_start_sw) 916 + adjusted_mode->crtc_hblank_start = 917 + adjusted_mode_sw->crtc_hblank_start; 918 + 919 + if (adjusted_mode->crtc_hblank_end == crtc_hblank_end_sw) 920 + adjusted_mode->crtc_hblank_end = 921 + adjusted_mode_sw->crtc_hblank_end; 922 + } 856 923 857 924 static void intel_dsi_get_config(struct intel_encoder *encoder, 858 925 struct intel_crtc_state *pipe_config) ··· 1006 889 case ESCAPE_CLOCK_DIVIDER_4: 1007 890 return 5 * us; 1008 891 } 1009 - } 1010 - 1011 - /* return pixels in terms of txbyteclkhs */ 1012 - static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count, 1013 - u16 burst_mode_ratio) 1014 - { 1015 - return DIV_ROUND_UP(DIV_ROUND_UP(pixels * bpp * burst_mode_ratio, 1016 - 8 * 100), lane_count); 1017 892 } 1018 893 1019 894 static void set_dsi_timings(struct drm_encoder *encoder,
+96 -8
drivers/gpu/drm/i915/intel_hdmi.c
··· 836 836 intel_hdmi_set_hdmi_infoframe(encoder, adjusted_mode); 837 837 } 838 838 839 + void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable) 840 + { 841 + struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi)); 842 + struct i2c_adapter *adapter = 843 + intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus); 844 + 845 + if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI) 846 + return; 847 + 848 + DRM_DEBUG_KMS("%s DP dual mode adaptor TMDS output\n", 849 + enable ? "Enabling" : "Disabling"); 850 + 851 + drm_dp_dual_mode_set_tmds_output(hdmi->dp_dual_mode.type, 852 + adapter, enable); 853 + } 854 + 839 855 static void intel_hdmi_prepare(struct intel_encoder *encoder) 840 856 { 841 857 struct drm_device *dev = encoder->base.dev; ··· 860 844 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 861 845 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 862 846 u32 hdmi_val; 847 + 848 + intel_dp_dual_mode_set_tmds_output(intel_hdmi, true); 863 849 864 850 hdmi_val = SDVO_ENCODING_HDMI; 865 851 if (!HAS_PCH_SPLIT(dev) && crtc->config->limited_color_range) ··· 971 953 dotclock /= pipe_config->pixel_multiplier; 972 954 973 955 pipe_config->base.adjusted_mode.crtc_clock = dotclock; 956 + 957 + pipe_config->lane_count = 4; 974 958 } 975 959 976 960 static void intel_enable_hdmi_audio(struct intel_encoder *encoder) ··· 1160 1140 } 1161 1141 1162 1142 intel_hdmi->set_infoframes(&encoder->base, false, NULL); 1143 + 1144 + intel_dp_dual_mode_set_tmds_output(intel_hdmi, false); 1163 1145 } 1164 1146 1165 1147 static void g4x_disable_hdmi(struct intel_encoder *encoder) ··· 1187 1165 intel_disable_hdmi(encoder); 1188 1166 } 1189 1167 1190 - static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit) 1168 + static int intel_hdmi_source_max_tmds_clock(struct drm_i915_private *dev_priv) 1191 1169 { 1192 - struct drm_device *dev = intel_hdmi_to_dev(hdmi); 1193 - 1194 - if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev)) 1170 + if (IS_G4X(dev_priv)) 1195 1171 return 165000; 1196 - else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) 1172 + else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8) 1197 1173 return 300000; 1198 1174 else 1199 1175 return 225000; 1200 1176 } 1201 1177 1178 + static int hdmi_port_clock_limit(struct intel_hdmi *hdmi, 1179 + bool respect_downstream_limits) 1180 + { 1181 + struct drm_device *dev = intel_hdmi_to_dev(hdmi); 1182 + int max_tmds_clock = intel_hdmi_source_max_tmds_clock(to_i915(dev)); 1183 + 1184 + if (respect_downstream_limits) { 1185 + if (hdmi->dp_dual_mode.max_tmds_clock) 1186 + max_tmds_clock = min(max_tmds_clock, 1187 + hdmi->dp_dual_mode.max_tmds_clock); 1188 + if (!hdmi->has_hdmi_sink) 1189 + max_tmds_clock = min(max_tmds_clock, 165000); 1190 + } 1191 + 1192 + return max_tmds_clock; 1193 + } 1194 + 1202 1195 static enum drm_mode_status 1203 1196 hdmi_port_clock_valid(struct intel_hdmi *hdmi, 1204 - int clock, bool respect_dvi_limit) 1197 + int clock, bool respect_downstream_limits) 1205 1198 { 1206 1199 struct drm_device *dev = intel_hdmi_to_dev(hdmi); 1207 1200 1208 1201 if (clock < 25000) 1209 1202 return MODE_CLOCK_LOW; 1210 - if (clock > hdmi_port_clock_limit(hdmi, respect_dvi_limit)) 1203 + if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits)) 1211 1204 return MODE_CLOCK_HIGH; 1212 1205 1213 1206 /* BXT DPLL can't generate 223-240 MHz */ ··· 1346 1309 * within limits. 1347 1310 */ 1348 1311 if (pipe_config->pipe_bpp > 8*3 && pipe_config->has_hdmi_sink && 1349 - hdmi_port_clock_valid(intel_hdmi, clock_12bpc, false) == MODE_OK && 1312 + hdmi_port_clock_valid(intel_hdmi, clock_12bpc, true) == MODE_OK && 1350 1313 hdmi_12bpc_possible(pipe_config)) { 1351 1314 DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); 1352 1315 desired_bpp = 12*3; ··· 1374 1337 /* Set user selected PAR to incoming mode's member */ 1375 1338 adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio; 1376 1339 1340 + pipe_config->lane_count = 4; 1341 + 1377 1342 return true; 1378 1343 } 1379 1344 ··· 1388 1349 intel_hdmi->has_audio = false; 1389 1350 intel_hdmi->rgb_quant_range_selectable = false; 1390 1351 1352 + intel_hdmi->dp_dual_mode.type = DRM_DP_DUAL_MODE_NONE; 1353 + intel_hdmi->dp_dual_mode.max_tmds_clock = 0; 1354 + 1391 1355 kfree(to_intel_connector(connector)->detect_edid); 1392 1356 to_intel_connector(connector)->detect_edid = NULL; 1357 + } 1358 + 1359 + static void 1360 + intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid) 1361 + { 1362 + struct drm_i915_private *dev_priv = to_i915(connector->dev); 1363 + struct intel_hdmi *hdmi = intel_attached_hdmi(connector); 1364 + enum port port = hdmi_to_dig_port(hdmi)->port; 1365 + struct i2c_adapter *adapter = 1366 + intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus); 1367 + enum drm_dp_dual_mode_type type = drm_dp_dual_mode_detect(adapter); 1368 + 1369 + /* 1370 + * Type 1 DVI adaptors are not required to implement any 1371 + * registers, so we can't always detect their presence. 1372 + * Ideally we should be able to check the state of the 1373 + * CONFIG1 pin, but no such luck on our hardware. 1374 + * 1375 + * The only method left to us is to check the VBT to see 1376 + * if the port is a dual mode capable DP port. But let's 1377 + * only do that when we sucesfully read the EDID, to avoid 1378 + * confusing log messages about DP dual mode adaptors when 1379 + * there's nothing connected to the port. 1380 + */ 1381 + if (type == DRM_DP_DUAL_MODE_UNKNOWN) { 1382 + if (has_edid && 1383 + intel_bios_is_port_dp_dual_mode(dev_priv, port)) { 1384 + DRM_DEBUG_KMS("Assuming DP dual mode adaptor presence based on VBT\n"); 1385 + type = DRM_DP_DUAL_MODE_TYPE1_DVI; 1386 + } else { 1387 + type = DRM_DP_DUAL_MODE_NONE; 1388 + } 1389 + } 1390 + 1391 + if (type == DRM_DP_DUAL_MODE_NONE) 1392 + return; 1393 + 1394 + hdmi->dp_dual_mode.type = type; 1395 + hdmi->dp_dual_mode.max_tmds_clock = 1396 + drm_dp_dual_mode_max_tmds_clock(type, adapter); 1397 + 1398 + DRM_DEBUG_KMS("DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n", 1399 + drm_dp_get_dual_mode_type_name(type), 1400 + hdmi->dp_dual_mode.max_tmds_clock); 1393 1401 } 1394 1402 1395 1403 static bool ··· 1453 1367 edid = drm_get_edid(connector, 1454 1368 intel_gmbus_get_adapter(dev_priv, 1455 1369 intel_hdmi->ddc_bus)); 1370 + 1371 + intel_hdmi_dp_dual_mode_detect(connector, edid != NULL); 1456 1372 1457 1373 intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS); 1458 1374 }
+9 -137
drivers/gpu/drm/i915/intel_lrc.c
··· 721 721 return ret; 722 722 } 723 723 724 - static int logical_ring_wait_for_space(struct drm_i915_gem_request *req, 725 - int bytes) 726 - { 727 - struct intel_ringbuffer *ringbuf = req->ringbuf; 728 - struct intel_engine_cs *engine = req->engine; 729 - struct drm_i915_gem_request *target; 730 - unsigned space; 731 - int ret; 732 - 733 - if (intel_ring_space(ringbuf) >= bytes) 734 - return 0; 735 - 736 - /* The whole point of reserving space is to not wait! */ 737 - WARN_ON(ringbuf->reserved_in_use); 738 - 739 - list_for_each_entry(target, &engine->request_list, list) { 740 - /* 741 - * The request queue is per-engine, so can contain requests 742 - * from multiple ringbuffers. Here, we must ignore any that 743 - * aren't from the ringbuffer we're considering. 744 - */ 745 - if (target->ringbuf != ringbuf) 746 - continue; 747 - 748 - /* Would completion of this request free enough space? */ 749 - space = __intel_ring_space(target->postfix, ringbuf->tail, 750 - ringbuf->size); 751 - if (space >= bytes) 752 - break; 753 - } 754 - 755 - if (WARN_ON(&target->list == &engine->request_list)) 756 - return -ENOSPC; 757 - 758 - ret = i915_wait_request(target); 759 - if (ret) 760 - return ret; 761 - 762 - ringbuf->space = space; 763 - return 0; 764 - } 765 - 766 724 /* 767 725 * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload 768 726 * @request: Request to advance the logical ringbuffer of. ··· 772 814 return 0; 773 815 } 774 816 775 - static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf) 776 - { 777 - uint32_t __iomem *virt; 778 - int rem = ringbuf->size - ringbuf->tail; 779 - 780 - virt = ringbuf->virtual_start + ringbuf->tail; 781 - rem /= 4; 782 - while (rem--) 783 - iowrite32(MI_NOOP, virt++); 784 - 785 - ringbuf->tail = 0; 786 - intel_ring_update_space(ringbuf); 787 - } 788 - 789 - static int logical_ring_prepare(struct drm_i915_gem_request *req, int bytes) 790 - { 791 - struct intel_ringbuffer *ringbuf = req->ringbuf; 792 - int remain_usable = ringbuf->effective_size - ringbuf->tail; 793 - int remain_actual = ringbuf->size - ringbuf->tail; 794 - int ret, total_bytes, wait_bytes = 0; 795 - bool need_wrap = false; 796 - 797 - if (ringbuf->reserved_in_use) 798 - total_bytes = bytes; 799 - else 800 - total_bytes = bytes + ringbuf->reserved_size; 801 - 802 - if (unlikely(bytes > remain_usable)) { 803 - /* 804 - * Not enough space for the basic request. So need to flush 805 - * out the remainder and then wait for base + reserved. 806 - */ 807 - wait_bytes = remain_actual + total_bytes; 808 - need_wrap = true; 809 - } else { 810 - if (unlikely(total_bytes > remain_usable)) { 811 - /* 812 - * The base request will fit but the reserved space 813 - * falls off the end. So don't need an immediate wrap 814 - * and only need to effectively wait for the reserved 815 - * size space from the start of ringbuffer. 816 - */ 817 - wait_bytes = remain_actual + ringbuf->reserved_size; 818 - } else if (total_bytes > ringbuf->space) { 819 - /* No wrapping required, just waiting. */ 820 - wait_bytes = total_bytes; 821 - } 822 - } 823 - 824 - if (wait_bytes) { 825 - ret = logical_ring_wait_for_space(req, wait_bytes); 826 - if (unlikely(ret)) 827 - return ret; 828 - 829 - if (need_wrap) 830 - __wrap_ring_buffer(ringbuf); 831 - } 832 - 833 - return 0; 834 - } 835 - 836 - /** 837 - * intel_logical_ring_begin() - prepare the logical ringbuffer to accept some commands 838 - * 839 - * @req: The request to start some new work for 840 - * @num_dwords: number of DWORDs that we plan to write to the ringbuffer. 841 - * 842 - * The ringbuffer might not be ready to accept the commands right away (maybe it needs to 843 - * be wrapped, or wait a bit for the tail to be updated). This function takes care of that 844 - * and also preallocates a request (every workload submission is still mediated through 845 - * requests, same as it did with legacy ringbuffer submission). 846 - * 847 - * Return: non-zero if the ringbuffer is not ready to be written to. 848 - */ 849 - int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords) 850 - { 851 - int ret; 852 - 853 - ret = logical_ring_prepare(req, num_dwords * sizeof(uint32_t)); 854 - if (ret) 855 - return ret; 856 - 857 - req->ringbuf->space -= num_dwords * sizeof(uint32_t); 858 - return 0; 859 - } 860 - 861 817 int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request) 862 818 { 863 819 /* ··· 784 912 */ 785 913 intel_ring_reserved_space_reserve(request->ringbuf, MIN_SPACE_FOR_ADD_REQUEST); 786 914 787 - return intel_logical_ring_begin(request, 0); 915 + return intel_ring_begin(request, 0); 788 916 } 789 917 790 918 /** ··· 854 982 855 983 if (engine == &dev_priv->engine[RCS] && 856 984 instp_mode != dev_priv->relative_constants_mode) { 857 - ret = intel_logical_ring_begin(params->request, 4); 985 + ret = intel_ring_begin(params->request, 4); 858 986 if (ret) 859 987 return ret; 860 988 ··· 1050 1178 if (ret) 1051 1179 return ret; 1052 1180 1053 - ret = intel_logical_ring_begin(req, w->count * 2 + 2); 1181 + ret = intel_ring_begin(req, w->count * 2 + 2); 1054 1182 if (ret) 1055 1183 return ret; 1056 1184 ··· 1541 1669 const int num_lri_cmds = GEN8_LEGACY_PDPES * 2; 1542 1670 int i, ret; 1543 1671 1544 - ret = intel_logical_ring_begin(req, num_lri_cmds * 2 + 2); 1672 + ret = intel_ring_begin(req, num_lri_cmds * 2 + 2); 1545 1673 if (ret) 1546 1674 return ret; 1547 1675 ··· 1588 1716 req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine); 1589 1717 } 1590 1718 1591 - ret = intel_logical_ring_begin(req, 4); 1719 + ret = intel_ring_begin(req, 4); 1592 1720 if (ret) 1593 1721 return ret; 1594 1722 ··· 1650 1778 uint32_t cmd; 1651 1779 int ret; 1652 1780 1653 - ret = intel_logical_ring_begin(request, 4); 1781 + ret = intel_ring_begin(request, 4); 1654 1782 if (ret) 1655 1783 return ret; 1656 1784 ··· 1718 1846 vf_flush_wa = true; 1719 1847 } 1720 1848 1721 - ret = intel_logical_ring_begin(request, vf_flush_wa ? 12 : 6); 1849 + ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6); 1722 1850 if (ret) 1723 1851 return ret; 1724 1852 ··· 1792 1920 struct intel_ringbuffer *ringbuf = request->ringbuf; 1793 1921 int ret; 1794 1922 1795 - ret = intel_logical_ring_begin(request, 6 + WA_TAIL_DWORDS); 1923 + ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS); 1796 1924 if (ret) 1797 1925 return ret; 1798 1926 ··· 1816 1944 struct intel_ringbuffer *ringbuf = request->ringbuf; 1817 1945 int ret; 1818 1946 1819 - ret = intel_logical_ring_begin(request, 8 + WA_TAIL_DWORDS); 1947 + ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS); 1820 1948 if (ret) 1821 1949 return ret; 1822 1950
-1
drivers/gpu/drm/i915/intel_lrc.h
··· 63 63 void intel_logical_ring_stop(struct intel_engine_cs *engine); 64 64 void intel_logical_ring_cleanup(struct intel_engine_cs *engine); 65 65 int intel_logical_rings_init(struct drm_device *dev); 66 - int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords); 67 66 68 67 int logical_ring_flush_all_caches(struct drm_i915_gem_request *req); 69 68 /**
+4 -8
drivers/gpu/drm/i915/intel_mocs.c
··· 239 239 if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) 240 240 return -ENODEV; 241 241 242 - ret = intel_logical_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES); 243 - if (ret) { 244 - DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret); 242 + ret = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES); 243 + if (ret) 245 244 return ret; 246 - } 247 245 248 246 intel_logical_ring_emit(ringbuf, 249 247 MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES)); ··· 303 305 if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES)) 304 306 return -ENODEV; 305 307 306 - ret = intel_logical_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES); 307 - if (ret) { 308 - DRM_DEBUG("intel_logical_ring_begin failed %d\n", ret); 308 + ret = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES); 309 + if (ret) 309 310 return ret; 310 - } 311 311 312 312 intel_logical_ring_emit(ringbuf, 313 313 MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
+2
drivers/gpu/drm/i915/intel_pm.c
··· 3904 3904 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 3905 3905 hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); 3906 3906 3907 + memset(active, 0, sizeof(*active)); 3908 + 3907 3909 active->pipe_enabled = intel_crtc->active; 3908 3910 3909 3911 if (active->pipe_enabled) {
+47 -8
drivers/gpu/drm/i915/intel_psr.c
··· 280 280 * with the 5 or 6 idle patterns. 281 281 */ 282 282 uint32_t idle_frames = max(6, dev_priv->vbt.psr.idle_frames); 283 - uint32_t val = 0x0; 283 + uint32_t val = EDP_PSR_ENABLE; 284 + 285 + val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT; 286 + val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT; 284 287 285 288 if (IS_HASWELL(dev)) 286 289 val |= EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES; ··· 291 288 if (dev_priv->psr.link_standby) 292 289 val |= EDP_PSR_LINK_STANDBY; 293 290 294 - I915_WRITE(EDP_PSR_CTL, val | 295 - max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT | 296 - idle_frames << EDP_PSR_IDLE_FRAME_SHIFT | 297 - EDP_PSR_ENABLE); 291 + if (dev_priv->vbt.psr.tp1_wakeup_time > 5) 292 + val |= EDP_PSR_TP1_TIME_2500us; 293 + else if (dev_priv->vbt.psr.tp1_wakeup_time > 1) 294 + val |= EDP_PSR_TP1_TIME_500us; 295 + else if (dev_priv->vbt.psr.tp1_wakeup_time > 0) 296 + val |= EDP_PSR_TP1_TIME_100us; 297 + else 298 + val |= EDP_PSR_TP1_TIME_0us; 298 299 299 - if (dev_priv->psr.psr2_support) 300 - I915_WRITE(EDP_PSR2_CTL, EDP_PSR2_ENABLE | 301 - EDP_SU_TRACK_ENABLE | EDP_PSR2_TP2_TIME_100); 300 + if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5) 301 + val |= EDP_PSR_TP2_TP3_TIME_2500us; 302 + else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1) 303 + val |= EDP_PSR_TP2_TP3_TIME_500us; 304 + else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0) 305 + val |= EDP_PSR_TP2_TP3_TIME_100us; 306 + else 307 + val |= EDP_PSR_TP2_TP3_TIME_0us; 308 + 309 + if (intel_dp_source_supports_hbr2(intel_dp) && 310 + drm_dp_tps3_supported(intel_dp->dpcd)) 311 + val |= EDP_PSR_TP1_TP3_SEL; 312 + else 313 + val |= EDP_PSR_TP1_TP2_SEL; 314 + 315 + I915_WRITE(EDP_PSR_CTL, val); 316 + 317 + if (!dev_priv->psr.psr2_support) 318 + return; 319 + 320 + /* FIXME: selective update is probably totally broken because it doesn't 321 + * mesh at all with our frontbuffer tracking. And the hw alone isn't 322 + * good enough. */ 323 + val = EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE; 324 + 325 + if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 5) 326 + val |= EDP_PSR2_TP2_TIME_2500; 327 + else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 1) 328 + val |= EDP_PSR2_TP2_TIME_500; 329 + else if (dev_priv->vbt.psr.tp2_tp3_wakeup_time > 0) 330 + val |= EDP_PSR2_TP2_TIME_100; 331 + else 332 + val |= EDP_PSR2_TP2_TIME_50; 333 + 334 + I915_WRITE(EDP_PSR2_CTL, val); 302 335 } 303 336 304 337 static bool intel_psr_match_conditions(struct intel_dp *intel_dp)
+96 -119
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 53 53 ringbuf->tail, ringbuf->size); 54 54 } 55 55 56 - int intel_ring_space(struct intel_ringbuffer *ringbuf) 57 - { 58 - intel_ring_update_space(ringbuf); 59 - return ringbuf->space; 60 - } 61 - 62 56 bool intel_engine_stopped(struct intel_engine_cs *engine) 63 57 { 64 58 struct drm_i915_private *dev_priv = engine->dev->dev_private; ··· 1303 1309 intel_ring_emit(signaller, seqno); 1304 1310 intel_ring_emit(signaller, 0); 1305 1311 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | 1306 - MI_SEMAPHORE_TARGET(waiter->id)); 1312 + MI_SEMAPHORE_TARGET(waiter->hw_id)); 1307 1313 intel_ring_emit(signaller, 0); 1308 1314 } 1309 1315 ··· 1343 1349 intel_ring_emit(signaller, upper_32_bits(gtt_offset)); 1344 1350 intel_ring_emit(signaller, seqno); 1345 1351 intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL | 1346 - MI_SEMAPHORE_TARGET(waiter->id)); 1352 + MI_SEMAPHORE_TARGET(waiter->hw_id)); 1347 1353 intel_ring_emit(signaller, 0); 1348 1354 } 1349 1355 ··· 1567 1573 static void 1568 1574 gen6_seqno_barrier(struct intel_engine_cs *engine) 1569 1575 { 1576 + struct drm_i915_private *dev_priv = engine->dev->dev_private; 1577 + 1570 1578 /* Workaround to force correct ordering between irq and seqno writes on 1571 1579 * ivb (and maybe also on snb) by reading from a CS register (like 1572 1580 * ACTHD) before reading the status page. ··· 1580 1584 * the write time to land, but that would incur a delay after every 1581 1585 * batch i.e. much more frequent than a delay when waiting for the 1582 1586 * interrupt (with the same net latency). 1587 + * 1588 + * Also note that to prevent whole machine hangs on gen7, we have to 1589 + * take the spinlock to guard against concurrent cacheline access. 1583 1590 */ 1584 - struct drm_i915_private *dev_priv = engine->dev->dev_private; 1591 + spin_lock_irq(&dev_priv->uncore.lock); 1585 1592 POSTING_READ_FW(RING_ACTHD(engine->mmio_base)); 1593 + spin_unlock_irq(&dev_priv->uncore.lock); 1586 1594 } 1587 1595 1588 1596 static u32 ··· 2312 2312 engine->dev = NULL; 2313 2313 } 2314 2314 2315 - static int ring_wait_for_space(struct intel_engine_cs *engine, int n) 2316 - { 2317 - struct intel_ringbuffer *ringbuf = engine->buffer; 2318 - struct drm_i915_gem_request *request; 2319 - unsigned space; 2320 - int ret; 2321 - 2322 - if (intel_ring_space(ringbuf) >= n) 2323 - return 0; 2324 - 2325 - /* The whole point of reserving space is to not wait! */ 2326 - WARN_ON(ringbuf->reserved_in_use); 2327 - 2328 - list_for_each_entry(request, &engine->request_list, list) { 2329 - space = __intel_ring_space(request->postfix, ringbuf->tail, 2330 - ringbuf->size); 2331 - if (space >= n) 2332 - break; 2333 - } 2334 - 2335 - if (WARN_ON(&request->list == &engine->request_list)) 2336 - return -ENOSPC; 2337 - 2338 - ret = i915_wait_request(request); 2339 - if (ret) 2340 - return ret; 2341 - 2342 - ringbuf->space = space; 2343 - return 0; 2344 - } 2345 - 2346 - static void __wrap_ring_buffer(struct intel_ringbuffer *ringbuf) 2347 - { 2348 - uint32_t __iomem *virt; 2349 - int rem = ringbuf->size - ringbuf->tail; 2350 - 2351 - virt = ringbuf->virtual_start + ringbuf->tail; 2352 - rem /= 4; 2353 - while (rem--) 2354 - iowrite32(MI_NOOP, virt++); 2355 - 2356 - ringbuf->tail = 0; 2357 - intel_ring_update_space(ringbuf); 2358 - } 2359 - 2360 2315 int intel_engine_idle(struct intel_engine_cs *engine) 2361 2316 { 2362 2317 struct drm_i915_gem_request *req; ··· 2353 2398 2354 2399 void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size) 2355 2400 { 2356 - WARN_ON(ringbuf->reserved_size); 2357 - WARN_ON(ringbuf->reserved_in_use); 2358 - 2401 + GEM_BUG_ON(ringbuf->reserved_size); 2359 2402 ringbuf->reserved_size = size; 2360 2403 } 2361 2404 2362 2405 void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf) 2363 2406 { 2364 - WARN_ON(ringbuf->reserved_in_use); 2365 - 2407 + GEM_BUG_ON(!ringbuf->reserved_size); 2366 2408 ringbuf->reserved_size = 0; 2367 - ringbuf->reserved_in_use = false; 2368 2409 } 2369 2410 2370 2411 void intel_ring_reserved_space_use(struct intel_ringbuffer *ringbuf) 2371 2412 { 2372 - WARN_ON(ringbuf->reserved_in_use); 2373 - 2374 - ringbuf->reserved_in_use = true; 2375 - ringbuf->reserved_tail = ringbuf->tail; 2413 + GEM_BUG_ON(!ringbuf->reserved_size); 2414 + ringbuf->reserved_size = 0; 2376 2415 } 2377 2416 2378 2417 void intel_ring_reserved_space_end(struct intel_ringbuffer *ringbuf) 2379 2418 { 2380 - WARN_ON(!ringbuf->reserved_in_use); 2381 - if (ringbuf->tail > ringbuf->reserved_tail) { 2382 - WARN(ringbuf->tail > ringbuf->reserved_tail + ringbuf->reserved_size, 2383 - "request reserved size too small: %d vs %d!\n", 2384 - ringbuf->tail - ringbuf->reserved_tail, ringbuf->reserved_size); 2385 - } else { 2386 - /* 2387 - * The ring was wrapped while the reserved space was in use. 2388 - * That means that some unknown amount of the ring tail was 2389 - * no-op filled and skipped. Thus simply adding the ring size 2390 - * to the tail and doing the above space check will not work. 2391 - * Rather than attempt to track how much tail was skipped, 2392 - * it is much simpler to say that also skipping the sanity 2393 - * check every once in a while is not a big issue. 2394 - */ 2395 - } 2396 - 2397 - ringbuf->reserved_size = 0; 2398 - ringbuf->reserved_in_use = false; 2419 + GEM_BUG_ON(ringbuf->reserved_size); 2399 2420 } 2400 2421 2401 - static int __intel_ring_prepare(struct intel_engine_cs *engine, int bytes) 2422 + static int wait_for_space(struct drm_i915_gem_request *req, int bytes) 2402 2423 { 2403 - struct intel_ringbuffer *ringbuf = engine->buffer; 2404 - int remain_usable = ringbuf->effective_size - ringbuf->tail; 2424 + struct intel_ringbuffer *ringbuf = req->ringbuf; 2425 + struct intel_engine_cs *engine = req->engine; 2426 + struct drm_i915_gem_request *target; 2427 + 2428 + intel_ring_update_space(ringbuf); 2429 + if (ringbuf->space >= bytes) 2430 + return 0; 2431 + 2432 + /* 2433 + * Space is reserved in the ringbuffer for finalising the request, 2434 + * as that cannot be allowed to fail. During request finalisation, 2435 + * reserved_space is set to 0 to stop the overallocation and the 2436 + * assumption is that then we never need to wait (which has the 2437 + * risk of failing with EINTR). 2438 + * 2439 + * See also i915_gem_request_alloc() and i915_add_request(). 2440 + */ 2441 + GEM_BUG_ON(!ringbuf->reserved_size); 2442 + 2443 + list_for_each_entry(target, &engine->request_list, list) { 2444 + unsigned space; 2445 + 2446 + /* 2447 + * The request queue is per-engine, so can contain requests 2448 + * from multiple ringbuffers. Here, we must ignore any that 2449 + * aren't from the ringbuffer we're considering. 2450 + */ 2451 + if (target->ringbuf != ringbuf) 2452 + continue; 2453 + 2454 + /* Would completion of this request free enough space? */ 2455 + space = __intel_ring_space(target->postfix, ringbuf->tail, 2456 + ringbuf->size); 2457 + if (space >= bytes) 2458 + break; 2459 + } 2460 + 2461 + if (WARN_ON(&target->list == &engine->request_list)) 2462 + return -ENOSPC; 2463 + 2464 + return i915_wait_request(target); 2465 + } 2466 + 2467 + int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords) 2468 + { 2469 + struct intel_ringbuffer *ringbuf = req->ringbuf; 2405 2470 int remain_actual = ringbuf->size - ringbuf->tail; 2406 - int ret, total_bytes, wait_bytes = 0; 2471 + int remain_usable = ringbuf->effective_size - ringbuf->tail; 2472 + int bytes = num_dwords * sizeof(u32); 2473 + int total_bytes, wait_bytes; 2407 2474 bool need_wrap = false; 2408 2475 2409 - if (ringbuf->reserved_in_use) 2410 - total_bytes = bytes; 2411 - else 2412 - total_bytes = bytes + ringbuf->reserved_size; 2476 + total_bytes = bytes + ringbuf->reserved_size; 2413 2477 2414 2478 if (unlikely(bytes > remain_usable)) { 2415 2479 /* ··· 2437 2463 */ 2438 2464 wait_bytes = remain_actual + total_bytes; 2439 2465 need_wrap = true; 2466 + } else if (unlikely(total_bytes > remain_usable)) { 2467 + /* 2468 + * The base request will fit but the reserved space 2469 + * falls off the end. So we don't need an immediate wrap 2470 + * and only need to effectively wait for the reserved 2471 + * size space from the start of ringbuffer. 2472 + */ 2473 + wait_bytes = remain_actual + ringbuf->reserved_size; 2440 2474 } else { 2441 - if (unlikely(total_bytes > remain_usable)) { 2442 - /* 2443 - * The base request will fit but the reserved space 2444 - * falls off the end. So don't need an immediate wrap 2445 - * and only need to effectively wait for the reserved 2446 - * size space from the start of ringbuffer. 2447 - */ 2448 - wait_bytes = remain_actual + ringbuf->reserved_size; 2449 - } else if (total_bytes > ringbuf->space) { 2450 - /* No wrapping required, just waiting. */ 2451 - wait_bytes = total_bytes; 2452 - } 2475 + /* No wrapping required, just waiting. */ 2476 + wait_bytes = total_bytes; 2453 2477 } 2454 2478 2455 - if (wait_bytes) { 2456 - ret = ring_wait_for_space(engine, wait_bytes); 2479 + if (wait_bytes > ringbuf->space) { 2480 + int ret = wait_for_space(req, wait_bytes); 2457 2481 if (unlikely(ret)) 2458 2482 return ret; 2459 2483 2460 - if (need_wrap) 2461 - __wrap_ring_buffer(ringbuf); 2484 + intel_ring_update_space(ringbuf); 2485 + if (unlikely(ringbuf->space < wait_bytes)) 2486 + return -EAGAIN; 2462 2487 } 2463 2488 2464 - return 0; 2465 - } 2489 + if (unlikely(need_wrap)) { 2490 + GEM_BUG_ON(remain_actual > ringbuf->space); 2491 + GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size); 2466 2492 2467 - int intel_ring_begin(struct drm_i915_gem_request *req, 2468 - int num_dwords) 2469 - { 2470 - struct intel_engine_cs *engine = req->engine; 2471 - int ret; 2493 + /* Fill the tail with MI_NOOP */ 2494 + memset(ringbuf->virtual_start + ringbuf->tail, 2495 + 0, remain_actual); 2496 + ringbuf->tail = 0; 2497 + ringbuf->space -= remain_actual; 2498 + } 2472 2499 2473 - ret = __intel_ring_prepare(engine, num_dwords * sizeof(uint32_t)); 2474 - if (ret) 2475 - return ret; 2476 - 2477 - engine->buffer->space -= num_dwords * sizeof(uint32_t); 2500 + ringbuf->space -= bytes; 2501 + GEM_BUG_ON(ringbuf->space < 0); 2478 2502 return 0; 2479 2503 } 2480 2504 ··· 2744 2772 engine->name = "render ring"; 2745 2773 engine->id = RCS; 2746 2774 engine->exec_id = I915_EXEC_RENDER; 2775 + engine->hw_id = 0; 2747 2776 engine->mmio_base = RENDER_RING_BASE; 2748 2777 2749 2778 if (INTEL_INFO(dev)->gen >= 8) { ··· 2896 2923 engine->name = "bsd ring"; 2897 2924 engine->id = VCS; 2898 2925 engine->exec_id = I915_EXEC_BSD; 2926 + engine->hw_id = 1; 2899 2927 2900 2928 engine->write_tail = ring_write_tail; 2901 2929 if (INTEL_INFO(dev)->gen >= 6) { ··· 2975 3001 engine->name = "bsd2 ring"; 2976 3002 engine->id = VCS2; 2977 3003 engine->exec_id = I915_EXEC_BSD; 3004 + engine->hw_id = 4; 2978 3005 2979 3006 engine->write_tail = ring_write_tail; 2980 3007 engine->mmio_base = GEN8_BSD2_RING_BASE; ··· 3008 3033 engine->name = "blitter ring"; 3009 3034 engine->id = BCS; 3010 3035 engine->exec_id = I915_EXEC_BLT; 3036 + engine->hw_id = 2; 3011 3037 3012 3038 engine->mmio_base = BLT_RING_BASE; 3013 3039 engine->write_tail = ring_write_tail; ··· 3068 3092 engine->name = "video enhancement ring"; 3069 3093 engine->id = VECS; 3070 3094 engine->exec_id = I915_EXEC_VEBOX; 3095 + engine->hw_id = 3; 3071 3096 3072 3097 engine->mmio_base = VEBOX_RING_BASE; 3073 3098 engine->write_tail = ring_write_tail;
+2 -4
drivers/gpu/drm/i915/intel_ringbuffer.h
··· 108 108 int size; 109 109 int effective_size; 110 110 int reserved_size; 111 - int reserved_tail; 112 - bool reserved_in_use; 113 111 114 112 /** We track the position of the requests in the ring buffer, and 115 113 * when each is retired we increment last_retired_head as the GPU ··· 154 156 #define I915_NUM_ENGINES 5 155 157 #define _VCS(n) (VCS + (n)) 156 158 unsigned int exec_id; 157 - unsigned int guc_id; 159 + unsigned int hw_id; 160 + unsigned int guc_id; /* XXX same as hw_id? */ 158 161 u32 mmio_base; 159 162 struct drm_device *dev; 160 163 struct intel_ringbuffer *buffer; ··· 458 459 } 459 460 int __intel_ring_space(int head, int tail, int size); 460 461 void intel_ring_update_space(struct intel_ringbuffer *ringbuf); 461 - int intel_ring_space(struct intel_ringbuffer *ringbuf); 462 462 bool intel_engine_stopped(struct intel_engine_cs *engine); 463 463 464 464 int __must_check intel_engine_idle(struct intel_engine_cs *engine);
+12
drivers/gpu/drm/i915/intel_vbt_defs.h
··· 740 740 #define DEVICE_TYPE_INT_TV 0x1009 741 741 #define DEVICE_TYPE_HDMI 0x60D2 742 742 #define DEVICE_TYPE_DP 0x68C6 743 + #define DEVICE_TYPE_DP_DUAL_MODE 0x60D6 743 744 #define DEVICE_TYPE_eDP 0x78C6 744 745 745 746 #define DEVICE_TYPE_CLASS_EXTENSION (1 << 15) ··· 773 772 DEVICE_TYPE_TMDS_DVI_SIGNALING | \ 774 773 DEVICE_TYPE_VIDEO_SIGNALING | \ 775 774 DEVICE_TYPE_DISPLAYPORT_OUTPUT | \ 775 + DEVICE_TYPE_ANALOG_OUTPUT) 776 + 777 + #define DEVICE_TYPE_DP_DUAL_MODE_BITS \ 778 + (DEVICE_TYPE_INTERNAL_CONNECTOR | \ 779 + DEVICE_TYPE_MIPI_OUTPUT | \ 780 + DEVICE_TYPE_COMPOSITE_OUTPUT | \ 781 + DEVICE_TYPE_LVDS_SINGALING | \ 782 + DEVICE_TYPE_TMDS_DVI_SIGNALING | \ 783 + DEVICE_TYPE_VIDEO_SIGNALING | \ 784 + DEVICE_TYPE_DISPLAYPORT_OUTPUT | \ 785 + DEVICE_TYPE_DIGITAL_OUTPUT | \ 776 786 DEVICE_TYPE_ANALOG_OUTPUT) 777 787 778 788 /* define the DVO port for HDMI output type */
+92
include/drm/drm_dp_dual_mode_helper.h
··· 1 + /* 2 + * Copyright © 2016 Intel Corporation 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + 23 + #ifndef DRM_DP_DUAL_MODE_HELPER_H 24 + #define DRM_DP_DUAL_MODE_HELPER_H 25 + 26 + #include <linux/types.h> 27 + 28 + /* 29 + * Optional for type 1 DVI adaptors 30 + * Mandatory for type 1 HDMI and type 2 adaptors 31 + */ 32 + #define DP_DUAL_MODE_HDMI_ID 0x00 /* 00-0f */ 33 + #define DP_DUAL_MODE_HDMI_ID_LEN 16 34 + /* 35 + * Optional for type 1 adaptors 36 + * Mandatory for type 2 adaptors 37 + */ 38 + #define DP_DUAL_MODE_ADAPTOR_ID 0x10 39 + #define DP_DUAL_MODE_REV_MASK 0x07 40 + #define DP_DUAL_MODE_REV_TYPE2 0x00 41 + #define DP_DUAL_MODE_TYPE_MASK 0xf0 42 + #define DP_DUAL_MODE_TYPE_TYPE2 0xa0 43 + #define DP_DUAL_MODE_IEEE_OUI 0x11 /* 11-13*/ 44 + #define DP_DUAL_IEEE_OUI_LEN 3 45 + #define DP_DUAL_DEVICE_ID 0x14 /* 14-19 */ 46 + #define DP_DUAL_DEVICE_ID_LEN 6 47 + #define DP_DUAL_MODE_HARDWARE_REV 0x1a 48 + #define DP_DUAL_MODE_FIRMWARE_MAJOR_REV 0x1b 49 + #define DP_DUAL_MODE_FIRMWARE_MINOR_REV 0x1c 50 + #define DP_DUAL_MODE_MAX_TMDS_CLOCK 0x1d 51 + #define DP_DUAL_MODE_I2C_SPEED_CAP 0x1e 52 + #define DP_DUAL_MODE_TMDS_OEN 0x20 53 + #define DP_DUAL_MODE_TMDS_DISABLE 0x01 54 + #define DP_DUAL_MODE_HDMI_PIN_CTRL 0x21 55 + #define DP_DUAL_MODE_CEC_ENABLE 0x01 56 + #define DP_DUAL_MODE_I2C_SPEED_CTRL 0x22 57 + 58 + struct i2c_adapter; 59 + 60 + ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter, 61 + u8 offset, void *buffer, size_t size); 62 + ssize_t drm_dp_dual_mode_write(struct i2c_adapter *adapter, 63 + u8 offset, const void *buffer, size_t size); 64 + 65 + /** 66 + * enum drm_dp_dual_mode_type - Type of the DP dual mode adaptor 67 + * @DRM_DP_DUAL_MODE_NONE: No DP dual mode adaptor 68 + * @DRM_DP_DUAL_MODE_UNKNOWN: Could be either none or type 1 DVI adaptor 69 + * @DRM_DP_DUAL_MODE_TYPE1_DVI: Type 1 DVI adaptor 70 + * @DRM_DP_DUAL_MODE_TYPE1_HDMI: Type 1 HDMI adaptor 71 + * @DRM_DP_DUAL_MODE_TYPE2_DVI: Type 2 DVI adaptor 72 + * @DRM_DP_DUAL_MODE_TYPE2_HDMI: Type 2 HDMI adaptor 73 + */ 74 + enum drm_dp_dual_mode_type { 75 + DRM_DP_DUAL_MODE_NONE, 76 + DRM_DP_DUAL_MODE_UNKNOWN, 77 + DRM_DP_DUAL_MODE_TYPE1_DVI, 78 + DRM_DP_DUAL_MODE_TYPE1_HDMI, 79 + DRM_DP_DUAL_MODE_TYPE2_DVI, 80 + DRM_DP_DUAL_MODE_TYPE2_HDMI, 81 + }; 82 + 83 + enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter); 84 + int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type, 85 + struct i2c_adapter *adapter); 86 + int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type, 87 + struct i2c_adapter *adapter, bool *enabled); 88 + int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type, 89 + struct i2c_adapter *adapter, bool enable); 90 + const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type); 91 + 92 + #endif