Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-fixes-for-v4.13-rc3' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
"These iare the fixes for 4.13-rc3: vmwgfx, exynos, i915, amdgpu,
nouveau, host1x and displayport fixes.

As expected people woke up this week, i915 didn't do an -rc2 pull so
got a bumper -rc3 pull, and Ben resurfaced on nouveau and fixed a
bunch of major crashers seen on Fedora 26, and there are a few vmwgfx
fixes as well.

Otherwise exynos had some regression fixes/cleanups, and amdgpu has an
rcu locking regression fix and a couple of minor fixes"

* tag 'drm-fixes-for-v4.13-rc3' of git://people.freedesktop.org/~airlied/linux: (44 commits)
drm/i915: Fix bad comparison in skl_compute_plane_wm.
drm/i915: Force CPU synchronisation even if userspace requests ASYNC
drm/i915: Only skip updating execobject.offset after error
drm/i915: Only mark the execobject as pinned on success
drm/i915: Remove assertion from raw __i915_vma_unpin()
drm/i915/cnl: Fix loadgen select programming on ddi vswing sequence
drm/i915: Fix scaler init during CRTC HW state readout
drm/i915/selftests: Fix an error handling path in 'mock_gem_device()'
drm/i915: Unbreak gpu reset vs. modeset locking
gpu: host1x: Free the IOMMU domain when there is no device to attach
drm/i915: Fix cursor updates on some platforms
drm/i915: Fix user ptr check size in eb_relocate_vma()
drm: exynos: mark pm functions as __maybe_unused
drm/exynos: select CEC_CORE if CEC_NOTIFIER
drm/exynos/hdmi: fix disable sequence
drm/exynos: mic: add a bridge at probe
drm/exynos/dsi: Remove error handling for bridge_node DT parsing
drm/exynos: dsi: do not try to find bridge
drm: exynos: hdmi: make of_device_ids const.
drm: exynos: constify mixer_match_types and *_mxr_drv_data.
...

+298 -229
+7 -3
drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
··· 198 198 result = idr_find(&fpriv->bo_list_handles, id); 199 199 200 200 if (result) { 201 - if (kref_get_unless_zero(&result->refcount)) 201 + if (kref_get_unless_zero(&result->refcount)) { 202 + rcu_read_unlock(); 202 203 mutex_lock(&result->lock); 203 - else 204 + } else { 205 + rcu_read_unlock(); 204 206 result = NULL; 207 + } 208 + } else { 209 + rcu_read_unlock(); 205 210 } 206 - rcu_read_unlock(); 207 211 208 212 return result; 209 213 }
+13 -11
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 1475 1475 1476 1476 static void gfx_v9_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance) 1477 1477 { 1478 - u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); 1478 + u32 data; 1479 1479 1480 - if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) { 1481 - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); 1480 + if (instance == 0xffffffff) 1481 + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); 1482 + else 1483 + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance); 1484 + 1485 + if (se_num == 0xffffffff) 1482 1486 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); 1483 - } else if (se_num == 0xffffffff) { 1484 - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); 1485 - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); 1486 - } else if (sh_num == 0xffffffff) { 1487 + else 1488 + data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 1489 + 1490 + if (sh_num == 0xffffffff) 1487 1491 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); 1488 - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 1489 - } else { 1492 + else 1490 1493 data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_INDEX, sh_num); 1491 - data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_INDEX, se_num); 1492 - } 1494 + 1493 1495 WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, data); 1494 1496 } 1495 1497
+3 -9
drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
··· 2128 2128 pp_table->AvfsGbCksOff.m2_shift = 12; 2129 2129 pp_table->AvfsGbCksOff.b_shift = 0; 2130 2130 2131 - for (i = 0; i < dep_table->count; i++) { 2132 - if (dep_table->entries[i].sclk_offset == 0) 2133 - pp_table->StaticVoltageOffsetVid[i] = 248; 2134 - else 2135 - pp_table->StaticVoltageOffsetVid[i] = 2136 - (uint8_t)(dep_table->entries[i].sclk_offset * 2137 - VOLTAGE_VID_OFFSET_SCALE2 / 2138 - VOLTAGE_VID_OFFSET_SCALE1); 2139 - } 2131 + for (i = 0; i < dep_table->count; i++) 2132 + pp_table->StaticVoltageOffsetVid[i] = 2133 + convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset)); 2140 2134 2141 2135 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT != 2142 2136 data->disp_clk_quad_eqn_a) &&
+3 -2
drivers/gpu/drm/drm_dp_helper.c
··· 544 544 DP_DETAILED_CAP_INFO_AVAILABLE; 545 545 int clk; 546 546 int bpc; 547 - char id[6]; 547 + char id[7]; 548 548 int len; 549 549 uint8_t rev[2]; 550 550 int type = port_cap[0] & DP_DS_PORT_TYPE_MASK; ··· 583 583 seq_puts(m, "\t\tType: N/A\n"); 584 584 } 585 585 586 + memset(id, 0, sizeof(id)); 586 587 drm_dp_downstream_id(aux, id); 587 588 seq_printf(m, "\t\tID: %s\n", id); 588 589 ··· 592 591 seq_printf(m, "\t\tHW: %d.%d\n", 593 592 (rev[0] & 0xf0) >> 4, rev[0] & 0xf); 594 593 595 - len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, &rev, 2); 594 + len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, rev, 2); 596 595 if (len > 0) 597 596 seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]); 598 597
+1
drivers/gpu/drm/exynos/Kconfig
··· 75 75 config DRM_EXYNOS_HDMI 76 76 bool "HDMI" 77 77 depends on DRM_EXYNOS_MIXER || DRM_EXYNOS5433_DECON 78 + select CEC_CORE if CEC_NOTIFIER 78 79 help 79 80 Choose this option if you want to use Exynos HDMI for DRM. 80 81
-1
drivers/gpu/drm/exynos/exynos_drm_drv.c
··· 453 453 struct component_match *match; 454 454 455 455 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 456 - exynos_drm_driver.num_ioctls = ARRAY_SIZE(exynos_ioctls); 457 456 458 457 match = exynos_drm_match_add(&pdev->dev); 459 458 if (IS_ERR(match))
+5 -5
drivers/gpu/drm/exynos/exynos_drm_dsi.c
··· 1651 1651 return ret; 1652 1652 1653 1653 dsi->bridge_node = of_graph_get_remote_node(node, DSI_PORT_IN, 0); 1654 - if (!dsi->bridge_node) 1655 - return -EINVAL; 1656 1654 1657 1655 return 0; 1658 1656 } ··· 1685 1687 return ret; 1686 1688 } 1687 1689 1688 - bridge = of_drm_find_bridge(dsi->bridge_node); 1689 - if (bridge) 1690 - drm_bridge_attach(encoder, bridge, NULL); 1690 + if (dsi->bridge_node) { 1691 + bridge = of_drm_find_bridge(dsi->bridge_node); 1692 + if (bridge) 1693 + drm_bridge_attach(encoder, bridge, NULL); 1694 + } 1691 1695 1692 1696 return mipi_dsi_host_register(&dsi->dsi_host); 1693 1697 }
+15 -9
drivers/gpu/drm/exynos/exynos_drm_mic.c
··· 340 340 void *data) 341 341 { 342 342 struct exynos_mic *mic = dev_get_drvdata(dev); 343 - int ret; 344 343 345 - mic->bridge.funcs = &mic_bridge_funcs; 346 - mic->bridge.of_node = dev->of_node; 347 344 mic->bridge.driver_private = mic; 348 - ret = drm_bridge_add(&mic->bridge); 349 - if (ret) 350 - DRM_ERROR("mic: Failed to add MIC to the global bridge list\n"); 351 345 352 - return ret; 346 + return 0; 353 347 } 354 348 355 349 static void exynos_mic_unbind(struct device *dev, struct device *master, ··· 359 365 360 366 already_disabled: 361 367 mutex_unlock(&mic_mutex); 362 - 363 - drm_bridge_remove(&mic->bridge); 364 368 } 365 369 366 370 static const struct component_ops exynos_mic_component_ops = { ··· 453 461 454 462 platform_set_drvdata(pdev, mic); 455 463 464 + mic->bridge.funcs = &mic_bridge_funcs; 465 + mic->bridge.of_node = dev->of_node; 466 + 467 + ret = drm_bridge_add(&mic->bridge); 468 + if (ret) { 469 + DRM_ERROR("mic: Failed to add MIC to the global bridge list\n"); 470 + return ret; 471 + } 472 + 456 473 pm_runtime_enable(dev); 457 474 458 475 ret = component_add(dev, &exynos_mic_component_ops); ··· 480 479 481 480 static int exynos_mic_remove(struct platform_device *pdev) 482 481 { 482 + struct exynos_mic *mic = platform_get_drvdata(pdev); 483 + 483 484 component_del(&pdev->dev, &exynos_mic_component_ops); 484 485 pm_runtime_disable(&pdev->dev); 486 + 487 + drm_bridge_remove(&mic->bridge); 488 + 485 489 return 0; 486 490 } 487 491
+3 -7
drivers/gpu/drm/exynos/exynos_hdmi.c
··· 1501 1501 */ 1502 1502 cancel_delayed_work(&hdata->hotplug_work); 1503 1503 cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID); 1504 - 1505 - hdmiphy_disable(hdata); 1506 1504 } 1507 1505 1508 1506 static const struct drm_encoder_helper_funcs exynos_hdmi_encoder_helper_funcs = { ··· 1674 1676 return hdmi_bridge_init(hdata); 1675 1677 } 1676 1678 1677 - static struct of_device_id hdmi_match_types[] = { 1679 + static const struct of_device_id hdmi_match_types[] = { 1678 1680 { 1679 1681 .compatible = "samsung,exynos4210-hdmi", 1680 1682 .data = &exynos4210_hdmi_driver_data, ··· 1932 1934 return 0; 1933 1935 } 1934 1936 1935 - #ifdef CONFIG_PM 1936 - static int exynos_hdmi_suspend(struct device *dev) 1937 + static int __maybe_unused exynos_hdmi_suspend(struct device *dev) 1937 1938 { 1938 1939 struct hdmi_context *hdata = dev_get_drvdata(dev); 1939 1940 ··· 1941 1944 return 0; 1942 1945 } 1943 1946 1944 - static int exynos_hdmi_resume(struct device *dev) 1947 + static int __maybe_unused exynos_hdmi_resume(struct device *dev) 1945 1948 { 1946 1949 struct hdmi_context *hdata = dev_get_drvdata(dev); 1947 1950 int ret; ··· 1952 1955 1953 1956 return 0; 1954 1957 } 1955 - #endif 1956 1958 1957 1959 static const struct dev_pm_ops exynos_hdmi_pm_ops = { 1958 1960 SET_RUNTIME_PM_OPS(exynos_hdmi_suspend, exynos_hdmi_resume, NULL)
+5 -5
drivers/gpu/drm/exynos/exynos_mixer.c
··· 1094 1094 .atomic_check = mixer_atomic_check, 1095 1095 }; 1096 1096 1097 - static struct mixer_drv_data exynos5420_mxr_drv_data = { 1097 + static const struct mixer_drv_data exynos5420_mxr_drv_data = { 1098 1098 .version = MXR_VER_128_0_0_184, 1099 1099 .is_vp_enabled = 0, 1100 1100 }; 1101 1101 1102 - static struct mixer_drv_data exynos5250_mxr_drv_data = { 1102 + static const struct mixer_drv_data exynos5250_mxr_drv_data = { 1103 1103 .version = MXR_VER_16_0_33_0, 1104 1104 .is_vp_enabled = 0, 1105 1105 }; 1106 1106 1107 - static struct mixer_drv_data exynos4212_mxr_drv_data = { 1107 + static const struct mixer_drv_data exynos4212_mxr_drv_data = { 1108 1108 .version = MXR_VER_0_0_0_16, 1109 1109 .is_vp_enabled = 1, 1110 1110 }; 1111 1111 1112 - static struct mixer_drv_data exynos4210_mxr_drv_data = { 1112 + static const struct mixer_drv_data exynos4210_mxr_drv_data = { 1113 1113 .version = MXR_VER_0_0_0_16, 1114 1114 .is_vp_enabled = 1, 1115 1115 .has_sclk = 1, 1116 1116 }; 1117 1117 1118 - static struct of_device_id mixer_match_types[] = { 1118 + static const struct of_device_id mixer_match_types[] = { 1119 1119 { 1120 1120 .compatible = "samsung,exynos4210-mixer", 1121 1121 .data = &exynos4210_mxr_drv_data,
+11 -11
drivers/gpu/drm/i915/gvt/display.c
··· 323 323 { 324 324 struct intel_gvt_irq *irq = &gvt->irq; 325 325 struct intel_vgpu *vgpu; 326 - bool have_enabled_pipe = false; 327 326 int pipe, id; 328 327 329 328 if (WARN_ON(!mutex_is_locked(&gvt->lock))) 330 329 return; 331 330 332 - hrtimer_cancel(&irq->vblank_timer.timer); 333 - 334 331 for_each_active_vgpu(gvt, vgpu, id) { 335 332 for (pipe = 0; pipe < I915_MAX_PIPES; pipe++) { 336 - have_enabled_pipe = 337 - pipe_is_enabled(vgpu, pipe); 338 - if (have_enabled_pipe) 339 - break; 333 + if (pipe_is_enabled(vgpu, pipe)) 334 + goto out; 340 335 } 341 336 } 342 337 343 - if (have_enabled_pipe) 344 - hrtimer_start(&irq->vblank_timer.timer, 345 - ktime_add_ns(ktime_get(), irq->vblank_timer.period), 346 - HRTIMER_MODE_ABS); 338 + /* all the pipes are disabled */ 339 + hrtimer_cancel(&irq->vblank_timer.timer); 340 + return; 341 + 342 + out: 343 + hrtimer_start(&irq->vblank_timer.timer, 344 + ktime_add_ns(ktime_get(), irq->vblank_timer.period), 345 + HRTIMER_MODE_ABS); 346 + 347 347 } 348 348 349 349 static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
+4 -3
drivers/gpu/drm/i915/i915_gem_clflush.c
··· 114 114 return NOTIFY_DONE; 115 115 } 116 116 117 - void i915_gem_clflush_object(struct drm_i915_gem_object *obj, 117 + bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, 118 118 unsigned int flags) 119 119 { 120 120 struct clflush *clflush; ··· 128 128 */ 129 129 if (!i915_gem_object_has_struct_page(obj)) { 130 130 obj->cache_dirty = false; 131 - return; 131 + return false; 132 132 } 133 133 134 134 /* If the GPU is snooping the contents of the CPU cache, ··· 140 140 * tracking. 141 141 */ 142 142 if (!(flags & I915_CLFLUSH_FORCE) && obj->cache_coherent) 143 - return; 143 + return false; 144 144 145 145 trace_i915_gem_object_clflush(obj); 146 146 ··· 179 179 } 180 180 181 181 obj->cache_dirty = false; 182 + return true; 182 183 }
+1 -1
drivers/gpu/drm/i915/i915_gem_clflush.h
··· 28 28 struct drm_i915_private; 29 29 struct drm_i915_gem_object; 30 30 31 - void i915_gem_clflush_object(struct drm_i915_gem_object *obj, 31 + bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, 32 32 unsigned int flags); 33 33 #define I915_CLFLUSH_FORCE BIT(0) 34 34 #define I915_CLFLUSH_SYNC BIT(1)
+13 -11
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 560 560 eb->args->flags |= __EXEC_HAS_RELOC; 561 561 } 562 562 563 - entry->flags |= __EXEC_OBJECT_HAS_PIN; 564 - GEM_BUG_ON(eb_vma_misplaced(entry, vma)); 565 - 566 563 if (unlikely(entry->flags & EXEC_OBJECT_NEEDS_FENCE)) { 567 564 err = i915_vma_get_fence(vma); 568 565 if (unlikely(err)) { ··· 570 573 if (i915_vma_pin_fence(vma)) 571 574 entry->flags |= __EXEC_OBJECT_HAS_FENCE; 572 575 } 576 + 577 + entry->flags |= __EXEC_OBJECT_HAS_PIN; 578 + GEM_BUG_ON(eb_vma_misplaced(entry, vma)); 573 579 574 580 return 0; 575 581 } ··· 1458 1458 * to read. However, if the array is not writable the user loses 1459 1459 * the updated relocation values. 1460 1460 */ 1461 - if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(urelocs)))) 1461 + if (unlikely(!access_ok(VERIFY_READ, urelocs, remain*sizeof(*urelocs)))) 1462 1462 return -EFAULT; 1463 1463 1464 1464 do { ··· 1775 1775 } 1776 1776 } 1777 1777 1778 - return err ?: have_copy; 1778 + return err; 1779 1779 } 1780 1780 1781 1781 static int eb_relocate(struct i915_execbuffer *eb) ··· 1825 1825 int err; 1826 1826 1827 1827 for (i = 0; i < count; i++) { 1828 - const struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; 1828 + struct drm_i915_gem_exec_object2 *entry = &eb->exec[i]; 1829 1829 struct i915_vma *vma = exec_to_vma(entry); 1830 1830 struct drm_i915_gem_object *obj = vma->obj; 1831 1831 ··· 1841 1841 eb->request->capture_list = capture; 1842 1842 } 1843 1843 1844 + if (unlikely(obj->cache_dirty && !obj->cache_coherent)) { 1845 + if (i915_gem_clflush_object(obj, 0)) 1846 + entry->flags &= ~EXEC_OBJECT_ASYNC; 1847 + } 1848 + 1844 1849 if (entry->flags & EXEC_OBJECT_ASYNC) 1845 1850 goto skip_flushes; 1846 - 1847 - if (unlikely(obj->cache_dirty && !obj->cache_coherent)) 1848 - i915_gem_clflush_object(obj, 0); 1849 1851 1850 1852 err = i915_gem_request_await_object 1851 1853 (eb->request, obj, entry->flags & EXEC_OBJECT_WRITE); ··· 2211 2209 goto err_unlock; 2212 2210 2213 2211 err = eb_relocate(&eb); 2214 - if (err) 2212 + if (err) { 2215 2213 /* 2216 2214 * If the user expects the execobject.offset and 2217 2215 * reloc.presumed_offset to be an exact match, ··· 2220 2218 * relocation. 2221 2219 */ 2222 2220 args->flags &= ~__EXEC_HAS_RELOC; 2223 - if (err < 0) 2224 2221 goto err_vma; 2222 + } 2225 2223 2226 2224 if (unlikely(eb.batch->exec_entry->flags & EXEC_OBJECT_WRITE)) { 2227 2225 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
+1 -1
drivers/gpu/drm/i915/i915_vma.h
··· 284 284 285 285 static inline void __i915_vma_unpin(struct i915_vma *vma) 286 286 { 287 - GEM_BUG_ON(!i915_vma_is_pinned(vma)); 288 287 vma->flags--; 289 288 } 290 289 291 290 static inline void i915_vma_unpin(struct i915_vma *vma) 292 291 { 292 + GEM_BUG_ON(!i915_vma_is_pinned(vma)); 293 293 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); 294 294 __i915_vma_unpin(vma); 295 295 }
+2 -2
drivers/gpu/drm/i915/intel_ddi.c
··· 1896 1896 val = I915_READ(CNL_PORT_TX_DW4_LN(port, ln)); 1897 1897 val &= ~LOADGEN_SELECT; 1898 1898 1899 - if (((rate < 600000) && (width == 4) && (ln >= 1)) || 1900 - ((rate < 600000) && (width < 4) && ((ln == 1) || (ln == 2)))) { 1899 + if ((rate <= 600000 && width == 4 && ln >= 1) || 1900 + (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) { 1901 1901 val |= LOADGEN_SELECT; 1902 1902 } 1903 1903 I915_WRITE(CNL_PORT_TX_DW4_LN(port, ln), val);
+36 -50
drivers/gpu/drm/i915/intel_display.c
··· 3427 3427 intel_finish_page_flip_cs(dev_priv, crtc->pipe); 3428 3428 } 3429 3429 3430 - static void intel_update_primary_planes(struct drm_device *dev) 3431 - { 3432 - struct drm_crtc *crtc; 3433 - 3434 - for_each_crtc(dev, crtc) { 3435 - struct intel_plane *plane = to_intel_plane(crtc->primary); 3436 - struct intel_plane_state *plane_state = 3437 - to_intel_plane_state(plane->base.state); 3438 - 3439 - if (plane_state->base.visible) { 3440 - trace_intel_update_plane(&plane->base, 3441 - to_intel_crtc(crtc)); 3442 - 3443 - plane->update_plane(plane, 3444 - to_intel_crtc_state(crtc->state), 3445 - plane_state); 3446 - } 3447 - } 3448 - } 3449 - 3450 3430 static int 3451 3431 __intel_display_resume(struct drm_device *dev, 3452 3432 struct drm_atomic_state *state, ··· 3479 3499 struct drm_atomic_state *state; 3480 3500 int ret; 3481 3501 3502 + 3503 + /* reset doesn't touch the display */ 3504 + if (!i915.force_reset_modeset_test && 3505 + !gpu_reset_clobbers_display(dev_priv)) 3506 + return; 3507 + 3482 3508 /* 3483 3509 * Need mode_config.mutex so that we don't 3484 3510 * trample ongoing ->detect() and whatnot. ··· 3498 3512 3499 3513 drm_modeset_backoff(ctx); 3500 3514 } 3501 - 3502 - /* reset doesn't touch the display, but flips might get nuked anyway, */ 3503 - if (!i915.force_reset_modeset_test && 3504 - !gpu_reset_clobbers_display(dev_priv)) 3505 - return; 3506 - 3507 3515 /* 3508 3516 * Disabling the crtcs gracefully seems nicer. Also the 3509 3517 * g33 docs say we should at least disable all the planes. ··· 3527 3547 struct drm_atomic_state *state = dev_priv->modeset_restore_state; 3528 3548 int ret; 3529 3549 3550 + /* reset doesn't touch the display */ 3551 + if (!i915.force_reset_modeset_test && 3552 + !gpu_reset_clobbers_display(dev_priv)) 3553 + return; 3554 + 3555 + if (!state) 3556 + goto unlock; 3557 + 3530 3558 /* 3531 3559 * Flips in the rings will be nuked by the reset, 3532 3560 * so complete all pending flips so that user space ··· 3546 3558 3547 3559 /* reset doesn't touch the display */ 3548 3560 if (!gpu_reset_clobbers_display(dev_priv)) { 3549 - if (!state) { 3550 - /* 3551 - * Flips in the rings have been nuked by the reset, 3552 - * so update the base address of all primary 3553 - * planes to the the last fb to make sure we're 3554 - * showing the correct fb after a reset. 3555 - * 3556 - * FIXME: Atomic will make this obsolete since we won't schedule 3557 - * CS-based flips (which might get lost in gpu resets) any more. 3558 - */ 3559 - intel_update_primary_planes(dev); 3560 - } else { 3561 - ret = __intel_display_resume(dev, state, ctx); 3561 + /* for testing only restore the display */ 3562 + ret = __intel_display_resume(dev, state, ctx); 3562 3563 if (ret) 3563 3564 DRM_ERROR("Restoring old state failed with %i\n", ret); 3564 - } 3565 3565 } else { 3566 3566 /* 3567 3567 * The display has been reset as well, ··· 3573 3597 intel_hpd_init(dev_priv); 3574 3598 } 3575 3599 3576 - if (state) 3577 - drm_atomic_state_put(state); 3600 + drm_atomic_state_put(state); 3601 + unlock: 3578 3602 drm_modeset_drop_locks(ctx); 3579 3603 drm_modeset_acquire_fini(ctx); 3580 3604 mutex_unlock(&dev->mode_config.mutex); ··· 9093 9117 u64 power_domain_mask; 9094 9118 bool active; 9095 9119 9120 + if (INTEL_GEN(dev_priv) >= 9) { 9121 + intel_crtc_init_scalers(crtc, pipe_config); 9122 + 9123 + pipe_config->scaler_state.scaler_id = -1; 9124 + pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); 9125 + } 9126 + 9096 9127 power_domain = POWER_DOMAIN_PIPE(crtc->pipe); 9097 9128 if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) 9098 9129 return false; ··· 9127 9144 9128 9145 pipe_config->gamma_mode = 9129 9146 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK; 9130 - 9131 - if (INTEL_GEN(dev_priv) >= 9) { 9132 - intel_crtc_init_scalers(crtc, pipe_config); 9133 - 9134 - pipe_config->scaler_state.scaler_id = -1; 9135 - pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX); 9136 - } 9137 9147 9138 9148 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe); 9139 9149 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) { ··· 9516 9540 * On some platforms writing CURCNTR first will also 9517 9541 * cause CURPOS to be armed by the CURBASE write. 9518 9542 * Without the CURCNTR write the CURPOS write would 9519 - * arm itself. 9543 + * arm itself. Thus we always start the full update 9544 + * with a CURCNTR write. 9545 + * 9546 + * On other platforms CURPOS always requires the 9547 + * CURBASE write to arm the update. Additonally 9548 + * a write to any of the cursor register will cancel 9549 + * an already armed cursor update. Thus leaving out 9550 + * the CURBASE write after CURPOS could lead to a 9551 + * cursor that doesn't appear to move, or even change 9552 + * shape. Thus we always write CURBASE. 9520 9553 * 9521 9554 * CURCNTR and CUR_FBC_CTL are always 9522 9555 * armed by the CURBASE write only. ··· 9544 9559 plane->cursor.cntl = cntl; 9545 9560 } else { 9546 9561 I915_WRITE_FW(CURPOS(pipe), pos); 9562 + I915_WRITE_FW(CURBASE(pipe), base); 9547 9563 } 9548 9564 9549 9565 POSTING_READ_FW(CURBASE(pipe));
+1 -1
drivers/gpu/drm/i915/intel_gvt.c
··· 45 45 return true; 46 46 if (IS_SKYLAKE(dev_priv)) 47 47 return true; 48 - if (IS_KABYLAKE(dev_priv) && INTEL_DEVID(dev_priv) == 0x591D) 48 + if (IS_KABYLAKE(dev_priv)) 49 49 return true; 50 50 return false; 51 51 }
+2 -2
drivers/gpu/drm/i915/intel_pm.c
··· 4463 4463 if ((cpp * cstate->base.adjusted_mode.crtc_htotal / 512 < 1) && 4464 4464 (plane_bytes_per_line / 512 < 1)) 4465 4465 selected_result = method2; 4466 - else if ((ddb_allocation && ddb_allocation / 4467 - fixed_16_16_to_u32_round_up(plane_blocks_per_line)) >= 1) 4466 + else if (ddb_allocation >= 4467 + fixed_16_16_to_u32_round_up(plane_blocks_per_line)) 4468 4468 selected_result = min_fixed_16_16(method1, method2); 4469 4469 else if (latency >= linetime_us) 4470 4470 selected_result = min_fixed_16_16(method1, method2);
+1 -1
drivers/gpu/drm/i915/selftests/mock_gem_device.c
··· 206 206 mkwrite_device_info(i915)->ring_mask = BIT(0); 207 207 i915->engine[RCS] = mock_engine(i915, "mock"); 208 208 if (!i915->engine[RCS]) 209 - goto err_dependencies; 209 + goto err_priorities; 210 210 211 211 i915->kernel_context = mock_context(i915, NULL); 212 212 if (!i915->kernel_context)
-2
drivers/gpu/drm/nouveau/nouveau_connector.c
··· 1158 1158 return -ENODEV; 1159 1159 if (WARN_ON(msg->size > 16)) 1160 1160 return -E2BIG; 1161 - if (msg->size == 0) 1162 - return msg->size; 1163 1161 1164 1162 ret = nvkm_i2c_aux_acquire(aux); 1165 1163 if (ret)
-5
drivers/gpu/drm/nouveau/nouveau_display.c
··· 409 409 struct nouveau_display *disp = nouveau_display(dev); 410 410 struct nouveau_drm *drm = nouveau_drm(dev); 411 411 struct drm_connector *connector; 412 - struct drm_crtc *crtc; 413 412 414 413 if (!suspend) { 415 414 if (drm_drv_uses_atomic_modeset(dev)) ··· 416 417 else 417 418 drm_crtc_force_disable_all(dev); 418 419 } 419 - 420 - /* Make sure that drm and hw vblank irqs get properly disabled. */ 421 - drm_for_each_crtc(crtc, dev) 422 - drm_crtc_vblank_off(crtc); 423 420 424 421 /* disable flip completion events */ 425 422 nvif_notify_put(&drm->flip);
+23 -8
drivers/gpu/drm/nouveau/nv50_display.c
··· 3674 3674 drm_mode_connector_attach_encoder(connector, encoder); 3675 3675 3676 3676 if (dcbe->type == DCB_OUTPUT_DP) { 3677 + struct nv50_disp *disp = nv50_disp(encoder->dev); 3677 3678 struct nvkm_i2c_aux *aux = 3678 3679 nvkm_i2c_aux_find(i2c, dcbe->i2c_index); 3679 3680 if (aux) { 3680 - nv_encoder->i2c = &nv_connector->aux.ddc; 3681 + if (disp->disp->oclass < GF110_DISP) { 3682 + /* HW has no support for address-only 3683 + * transactions, so we're required to 3684 + * use custom I2C-over-AUX code. 3685 + */ 3686 + nv_encoder->i2c = &aux->i2c; 3687 + } else { 3688 + nv_encoder->i2c = &nv_connector->aux.ddc; 3689 + } 3681 3690 nv_encoder->aux = aux; 3682 3691 } 3683 3692 3684 3693 /*TODO: Use DP Info Table to check for support. */ 3685 - if (nv50_disp(encoder->dev)->disp->oclass >= GF110_DISP) { 3694 + if (disp->disp->oclass >= GF110_DISP) { 3686 3695 ret = nv50_mstm_new(nv_encoder, &nv_connector->aux, 16, 3687 3696 nv_connector->base.base.id, 3688 3697 &nv_encoder->dp.mstm); ··· 3940 3931 3941 3932 NV_ATOMIC(drm, "%s: clr %04x (set %04x)\n", crtc->name, 3942 3933 asyh->clr.mask, asyh->set.mask); 3934 + if (crtc_state->active && !asyh->state.active) 3935 + drm_crtc_vblank_off(crtc); 3943 3936 3944 3937 if (asyh->clr.mask) { 3945 3938 nv50_head_flush_clr(head, asyh, atom->flush_disable); ··· 4027 4016 nv50_head_flush_set(head, asyh); 4028 4017 interlock_core = 1; 4029 4018 } 4030 - } 4031 4019 4032 - for_each_crtc_in_state(state, crtc, crtc_state, i) { 4033 - if (crtc->state->event) 4034 - drm_crtc_vblank_get(crtc); 4020 + if (asyh->state.active) { 4021 + if (!crtc_state->active) 4022 + drm_crtc_vblank_on(crtc); 4023 + if (asyh->state.event) 4024 + drm_crtc_vblank_get(crtc); 4025 + } 4035 4026 } 4036 4027 4037 4028 /* Update plane(s). */ ··· 4080 4067 if (crtc->state->event) { 4081 4068 unsigned long flags; 4082 4069 /* Get correct count/ts if racing with vblank irq */ 4083 - drm_accurate_vblank_count(crtc); 4070 + if (crtc->state->active) 4071 + drm_accurate_vblank_count(crtc); 4084 4072 spin_lock_irqsave(&crtc->dev->event_lock, flags); 4085 4073 drm_crtc_send_vblank_event(crtc, crtc->state->event); 4086 4074 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 4087 4075 crtc->state->event = NULL; 4088 - drm_crtc_vblank_put(crtc); 4076 + if (crtc->state->active) 4077 + drm_crtc_vblank_put(crtc); 4089 4078 } 4090 4079 } 4091 4080
+1
drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h
··· 22 22 unsigned proto_evo:4; 23 23 enum nvkm_ior_proto { 24 24 CRT, 25 + TV, 25 26 TMDS, 26 27 LVDS, 27 28 DP,
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h
··· 22 22 u8 type[3]; 23 23 } pior; 24 24 25 - struct nv50_disp_chan *chan[17]; 25 + struct nv50_disp_chan *chan[21]; 26 26 }; 27 27 28 28 void nv50_disp_super_1(struct nv50_disp *);
+1
drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c
··· 62 62 case 0: 63 63 switch (outp->info.type) { 64 64 case DCB_OUTPUT_ANALOG: *type = DAC; return CRT; 65 + case DCB_OUTPUT_TV : *type = DAC; return TV; 65 66 case DCB_OUTPUT_TMDS : *type = SOR; return TMDS; 66 67 case DCB_OUTPUT_LVDS : *type = SOR; return LVDS; 67 68 case DCB_OUTPUT_DP : *type = SOR; return DP;
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
··· 129 129 130 130 if (bar->bar[0].mem) { 131 131 addr = nvkm_memory_addr(bar->bar[0].mem) >> 12; 132 - nvkm_wr32(device, 0x001714, 0xc0000000 | addr); 132 + nvkm_wr32(device, 0x001714, 0x80000000 | addr); 133 133 } 134 134 135 135 return 0;
+1
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
··· 25 25 26 26 nvkm-y += nvkm/subdev/i2c/aux.o 27 27 nvkm-y += nvkm/subdev/i2c/auxg94.o 28 + nvkm-y += nvkm/subdev/i2c/auxgf119.o 28 29 nvkm-y += nvkm/subdev/i2c/auxgm200.o 29 30 30 31 nvkm-y += nvkm/subdev/i2c/anx9805.o
+4
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.c
··· 117 117 nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *aux, bool retry, u8 type, 118 118 u32 addr, u8 *data, u8 *size) 119 119 { 120 + if (!*size && !aux->func->address_only) { 121 + AUX_ERR(aux, "address-only transaction dropped"); 122 + return -ENOSYS; 123 + } 120 124 return aux->func->xfer(aux, retry, type, addr, data, size); 121 125 } 122 126
+6
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
··· 3 3 #include "pad.h" 4 4 5 5 struct nvkm_i2c_aux_func { 6 + bool address_only; 6 7 int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type, 7 8 u32 addr, u8 *data, u8 *size); 8 9 int (*lnk_ctl)(struct nvkm_i2c_aux *, int link_nr, int link_bw, ··· 18 17 int nvkm_i2c_aux_xfer(struct nvkm_i2c_aux *, bool retry, u8 type, 19 18 u32 addr, u8 *data, u8 *size); 20 19 20 + int g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *, struct nvkm_i2c_pad *, 21 + int, u8, struct nvkm_i2c_aux **); 22 + 21 23 int g94_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **); 24 + int g94_i2c_aux_xfer(struct nvkm_i2c_aux *, bool, u8, u32, u8 *, u8 *); 25 + int gf119_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **); 22 26 int gm200_i2c_aux_new(struct nvkm_i2c_pad *, int, u8, struct nvkm_i2c_aux **); 23 27 24 28 #define AUX_MSG(b,l,f,a...) do { \
+19 -11
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
··· 72 72 return 0; 73 73 } 74 74 75 - static int 75 + int 76 76 g94_i2c_aux_xfer(struct nvkm_i2c_aux *obj, bool retry, 77 77 u8 type, u32 addr, u8 *data, u8 *size) 78 78 { ··· 105 105 } 106 106 107 107 ctrl = nvkm_rd32(device, 0x00e4e4 + base); 108 - ctrl &= ~0x0001f0ff; 108 + ctrl &= ~0x0001f1ff; 109 109 ctrl |= type << 12; 110 - ctrl |= *size - 1; 110 + ctrl |= (*size ? (*size - 1) : 0x00000100); 111 111 nvkm_wr32(device, 0x00e4e0 + base, addr); 112 112 113 113 /* (maybe) retry transaction a number of times on failure... */ ··· 160 160 return ret < 0 ? ret : (stat & 0x000f0000) >> 16; 161 161 } 162 162 163 - static const struct nvkm_i2c_aux_func 164 - g94_i2c_aux_func = { 165 - .xfer = g94_i2c_aux_xfer, 166 - }; 167 - 168 163 int 169 - g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive, 170 - struct nvkm_i2c_aux **paux) 164 + g94_i2c_aux_new_(const struct nvkm_i2c_aux_func *func, 165 + struct nvkm_i2c_pad *pad, int index, u8 drive, 166 + struct nvkm_i2c_aux **paux) 171 167 { 172 168 struct g94_i2c_aux *aux; 173 169 ··· 171 175 return -ENOMEM; 172 176 *paux = &aux->base; 173 177 174 - nvkm_i2c_aux_ctor(&g94_i2c_aux_func, pad, index, &aux->base); 178 + nvkm_i2c_aux_ctor(func, pad, index, &aux->base); 175 179 aux->ch = drive; 176 180 aux->base.intr = 1 << aux->ch; 177 181 return 0; 182 + } 183 + 184 + static const struct nvkm_i2c_aux_func 185 + g94_i2c_aux = { 186 + .xfer = g94_i2c_aux_xfer, 187 + }; 188 + 189 + int 190 + g94_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive, 191 + struct nvkm_i2c_aux **paux) 192 + { 193 + return g94_i2c_aux_new_(&g94_i2c_aux, pad, index, drive, paux); 178 194 }
+35
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgf119.c
··· 1 + /* 2 + * Copyright 2017 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + #include "aux.h" 23 + 24 + static const struct nvkm_i2c_aux_func 25 + gf119_i2c_aux = { 26 + .address_only = true, 27 + .xfer = g94_i2c_aux_xfer, 28 + }; 29 + 30 + int 31 + gf119_i2c_aux_new(struct nvkm_i2c_pad *pad, int index, u8 drive, 32 + struct nvkm_i2c_aux **paux) 33 + { 34 + return g94_i2c_aux_new_(&gf119_i2c_aux, pad, index, drive, paux); 35 + }
+3 -2
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
··· 105 105 } 106 106 107 107 ctrl = nvkm_rd32(device, 0x00d954 + base); 108 - ctrl &= ~0x0001f0ff; 108 + ctrl &= ~0x0001f1ff; 109 109 ctrl |= type << 12; 110 - ctrl |= *size - 1; 110 + ctrl |= (*size ? (*size - 1) : 0x00000100); 111 111 nvkm_wr32(device, 0x00d950 + base, addr); 112 112 113 113 /* (maybe) retry transaction a number of times on failure... */ ··· 162 162 163 163 static const struct nvkm_i2c_aux_func 164 164 gm200_i2c_aux_func = { 165 + .address_only = true, 165 166 .xfer = gm200_i2c_aux_xfer, 166 167 }; 167 168
+2 -2
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/padgf119.c
··· 28 28 static const struct nvkm_i2c_pad_func 29 29 gf119_i2c_pad_s_func = { 30 30 .bus_new_4 = gf119_i2c_bus_new, 31 - .aux_new_6 = g94_i2c_aux_new, 31 + .aux_new_6 = gf119_i2c_aux_new, 32 32 .mode = g94_i2c_pad_mode, 33 33 }; 34 34 ··· 41 41 static const struct nvkm_i2c_pad_func 42 42 gf119_i2c_pad_x_func = { 43 43 .bus_new_4 = gf119_i2c_bus_new, 44 - .aux_new_6 = g94_i2c_aux_new, 44 + .aux_new_6 = gf119_i2c_aux_new, 45 45 }; 46 46 47 47 int
+9 -10
drivers/gpu/drm/rockchip/Kconfig
··· 5 5 select DRM_KMS_HELPER 6 6 select DRM_PANEL 7 7 select VIDEOMODE_HELPERS 8 + select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP 9 + select DRM_DW_HDMI if ROCKCHIP_DW_HDMI 10 + select DRM_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI 11 + select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC 8 12 help 9 13 Choose this option if you have a Rockchip soc chipset. 10 14 This driver provides kernel mode setting and buffer ··· 16 12 2D or 3D acceleration; acceleration is performed by other 17 13 IP found on the SoC. 18 14 15 + if DRM_ROCKCHIP 16 + 19 17 config ROCKCHIP_ANALOGIX_DP 20 18 bool "Rockchip specific extensions for Analogix DP driver" 21 - depends on DRM_ROCKCHIP 22 - select DRM_ANALOGIX_DP 23 19 help 24 20 This selects support for Rockchip SoC specific extensions 25 21 for the Analogix Core DP driver. If you want to enable DP ··· 27 23 28 24 config ROCKCHIP_CDN_DP 29 25 bool "Rockchip cdn DP" 30 - depends on DRM_ROCKCHIP 31 - depends on EXTCON 32 - select SND_SOC_HDMI_CODEC if SND_SOC 26 + depends on EXTCON=y || (EXTCON=m && DRM_ROCKCHIP=m) 33 27 help 34 28 This selects support for Rockchip SoC specific extensions 35 29 for the cdn DP driver. If you want to enable Dp on ··· 36 34 37 35 config ROCKCHIP_DW_HDMI 38 36 bool "Rockchip specific extensions for Synopsys DW HDMI" 39 - depends on DRM_ROCKCHIP 40 - select DRM_DW_HDMI 41 37 help 42 38 This selects support for Rockchip SoC specific extensions 43 39 for the Synopsys DesignWare HDMI driver. If you want to ··· 44 44 45 45 config ROCKCHIP_DW_MIPI_DSI 46 46 bool "Rockchip specific extensions for Synopsys DW MIPI DSI" 47 - depends on DRM_ROCKCHIP 48 - select DRM_MIPI_DSI 49 47 help 50 48 This selects support for Rockchip SoC specific extensions 51 49 for the Synopsys DesignWare HDMI driver. If you want to ··· 52 54 53 55 config ROCKCHIP_INNO_HDMI 54 56 bool "Rockchip specific extensions for Innosilicon HDMI" 55 - depends on DRM_ROCKCHIP 56 57 help 57 58 This selects support for Rockchip SoC specific extensions 58 59 for the Innosilicon HDMI driver. If you want to enable 59 60 HDMI on RK3036 based SoC, you should select this option. 61 + 62 + endif
+12 -12
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
··· 30 30 #include <drm/ttm/ttm_placement.h> 31 31 #include <drm/ttm/ttm_page_alloc.h> 32 32 33 - static struct ttm_place vram_placement_flags = { 33 + static const struct ttm_place vram_placement_flags = { 34 34 .fpfn = 0, 35 35 .lpfn = 0, 36 36 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED 37 37 }; 38 38 39 - static struct ttm_place vram_ne_placement_flags = { 39 + static const struct ttm_place vram_ne_placement_flags = { 40 40 .fpfn = 0, 41 41 .lpfn = 0, 42 42 .flags = TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT 43 43 }; 44 44 45 - static struct ttm_place sys_placement_flags = { 45 + static const struct ttm_place sys_placement_flags = { 46 46 .fpfn = 0, 47 47 .lpfn = 0, 48 48 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED 49 49 }; 50 50 51 - static struct ttm_place sys_ne_placement_flags = { 51 + static const struct ttm_place sys_ne_placement_flags = { 52 52 .fpfn = 0, 53 53 .lpfn = 0, 54 54 .flags = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT 55 55 }; 56 56 57 - static struct ttm_place gmr_placement_flags = { 57 + static const struct ttm_place gmr_placement_flags = { 58 58 .fpfn = 0, 59 59 .lpfn = 0, 60 60 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED 61 61 }; 62 62 63 - static struct ttm_place gmr_ne_placement_flags = { 63 + static const struct ttm_place gmr_ne_placement_flags = { 64 64 .fpfn = 0, 65 65 .lpfn = 0, 66 66 .flags = VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT 67 67 }; 68 68 69 - static struct ttm_place mob_placement_flags = { 69 + static const struct ttm_place mob_placement_flags = { 70 70 .fpfn = 0, 71 71 .lpfn = 0, 72 72 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED 73 73 }; 74 74 75 - static struct ttm_place mob_ne_placement_flags = { 75 + static const struct ttm_place mob_ne_placement_flags = { 76 76 .fpfn = 0, 77 77 .lpfn = 0, 78 78 .flags = VMW_PL_FLAG_MOB | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT ··· 85 85 .busy_placement = &vram_placement_flags 86 86 }; 87 87 88 - static struct ttm_place vram_gmr_placement_flags[] = { 88 + static const struct ttm_place vram_gmr_placement_flags[] = { 89 89 { 90 90 .fpfn = 0, 91 91 .lpfn = 0, ··· 97 97 } 98 98 }; 99 99 100 - static struct ttm_place gmr_vram_placement_flags[] = { 100 + static const struct ttm_place gmr_vram_placement_flags[] = { 101 101 { 102 102 .fpfn = 0, 103 103 .lpfn = 0, ··· 116 116 .busy_placement = &gmr_placement_flags 117 117 }; 118 118 119 - static struct ttm_place vram_gmr_ne_placement_flags[] = { 119 + static const struct ttm_place vram_gmr_ne_placement_flags[] = { 120 120 { 121 121 .fpfn = 0, 122 122 .lpfn = 0, ··· 165 165 .busy_placement = &sys_ne_placement_flags 166 166 }; 167 167 168 - static struct ttm_place evictable_placement_flags[] = { 168 + static const struct ttm_place evictable_placement_flags[] = { 169 169 { 170 170 .fpfn = 0, 171 171 .lpfn = 0,
+4 -6
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
··· 779 779 if (ret) 780 780 return ret; 781 781 782 - header->cb_header = dma_pool_alloc(man->headers, GFP_KERNEL, 783 - &header->handle); 782 + header->cb_header = dma_pool_zalloc(man->headers, GFP_KERNEL, 783 + &header->handle); 784 784 if (!header->cb_header) { 785 785 ret = -ENOMEM; 786 786 goto out_no_cb_header; ··· 790 790 cb_hdr = header->cb_header; 791 791 offset = header->node.start << PAGE_SHIFT; 792 792 header->cmd = man->map + offset; 793 - memset(cb_hdr, 0, sizeof(*cb_hdr)); 794 793 if (man->using_mob) { 795 794 cb_hdr->flags = SVGA_CB_FLAG_MOB; 796 795 cb_hdr->ptr.mob.mobid = man->cmd_space->mem.start; ··· 826 827 if (WARN_ON_ONCE(size > VMW_CMDBUF_INLINE_SIZE)) 827 828 return -ENOMEM; 828 829 829 - dheader = dma_pool_alloc(man->dheaders, GFP_KERNEL, 830 - &header->handle); 830 + dheader = dma_pool_zalloc(man->dheaders, GFP_KERNEL, 831 + &header->handle); 831 832 if (!dheader) 832 833 return -ENOMEM; 833 834 ··· 836 837 cb_hdr = &dheader->cb_header; 837 838 header->cb_header = cb_hdr; 838 839 header->cmd = dheader->cmd; 839 - memset(dheader, 0, sizeof(*dheader)); 840 840 cb_hdr->status = SVGA_CB_STATUS_NONE; 841 841 cb_hdr->flags = SVGA_CB_FLAG_NONE; 842 842 cb_hdr->ptr.pa = (u64)header->handle +
+2 -2
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
··· 205 205 int ret; 206 206 207 207 cres = kzalloc(sizeof(*cres), GFP_KERNEL); 208 - if (unlikely(cres == NULL)) 208 + if (unlikely(!cres)) 209 209 return -ENOMEM; 210 210 211 211 cres->hash.key = user_key | (res_type << 24); ··· 291 291 int ret; 292 292 293 293 man = kzalloc(sizeof(*man), GFP_KERNEL); 294 - if (man == NULL) 294 + if (!man) 295 295 return ERR_PTR(-ENOMEM); 296 296 297 297 man->dev_priv = dev_priv;
+3 -3
drivers/gpu/drm/vmwgfx/vmwgfx_context.c
··· 210 210 for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { 211 211 uctx->cotables[i] = vmw_cotable_alloc(dev_priv, 212 212 &uctx->res, i); 213 - if (unlikely(uctx->cotables[i] == NULL)) { 214 - ret = -ENOMEM; 213 + if (unlikely(IS_ERR(uctx->cotables[i]))) { 214 + ret = PTR_ERR(uctx->cotables[i]); 215 215 goto out_cotables; 216 216 } 217 217 } ··· 777 777 } 778 778 779 779 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 780 - if (unlikely(ctx == NULL)) { 780 + if (unlikely(!ctx)) { 781 781 ttm_mem_global_free(vmw_mem_glob(dev_priv), 782 782 vmw_user_context_size); 783 783 ret = -ENOMEM;
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
··· 584 584 return ERR_PTR(ret); 585 585 586 586 vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL); 587 - if (unlikely(vcotbl == NULL)) { 587 + if (unlikely(!vcotbl)) { 588 588 ret = -ENOMEM; 589 589 goto out_no_alloc; 590 590 }
+4 -4
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 227 227 DRM_AUTH | DRM_RENDER_ALLOW), 228 228 }; 229 229 230 - static struct pci_device_id vmw_pci_id_list[] = { 230 + static const struct pci_device_id vmw_pci_id_list[] = { 231 231 {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, 232 232 {0, 0, 0} 233 233 }; ··· 630 630 char host_log[100] = {0}; 631 631 632 632 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 633 - if (unlikely(dev_priv == NULL)) { 633 + if (unlikely(!dev_priv)) { 634 634 DRM_ERROR("Failed allocating a device private struct.\n"); 635 635 return -ENOMEM; 636 636 } ··· 1035 1035 int ret = -ENOMEM; 1036 1036 1037 1037 vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); 1038 - if (unlikely(vmw_fp == NULL)) 1038 + if (unlikely(!vmw_fp)) 1039 1039 return ret; 1040 1040 1041 1041 vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); ··· 1196 1196 struct vmw_master *vmaster; 1197 1197 1198 1198 vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL); 1199 - if (unlikely(vmaster == NULL)) 1199 + if (unlikely(!vmaster)) 1200 1200 return -ENOMEM; 1201 1201 1202 1202 vmw_master_init(vmaster);
+4 -4
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 264 264 } 265 265 266 266 node = kzalloc(sizeof(*node), GFP_KERNEL); 267 - if (unlikely(node == NULL)) { 267 + if (unlikely(!node)) { 268 268 DRM_ERROR("Failed to allocate a resource validation " 269 269 "entry.\n"); 270 270 return -ENOMEM; ··· 452 452 struct vmw_resource_relocation *rel; 453 453 454 454 rel = kmalloc(sizeof(*rel), GFP_KERNEL); 455 - if (unlikely(rel == NULL)) { 455 + if (unlikely(!rel)) { 456 456 DRM_ERROR("Failed to allocate a resource relocation.\n"); 457 457 return -ENOMEM; 458 458 } ··· 519 519 struct vmw_sw_context *sw_context, 520 520 SVGA3dCmdHeader *header) 521 521 { 522 - return capable(CAP_SYS_ADMIN) ? : -EINVAL; 522 + return -EINVAL; 523 523 } 524 524 525 525 static int vmw_cmd_ok(struct vmw_private *dev_priv, ··· 2584 2584 2585 2585 /** 2586 2586 * vmw_cmd_dx_ia_set_vertex_buffers - Validate an 2587 - * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command. 2587 + * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command. 2588 2588 * 2589 2589 * @dev_priv: Pointer to a device private struct. 2590 2590 * @sw_context: The software context being used for this batch.
+5 -5
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
··· 284 284 { 285 285 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL); 286 286 287 - if (unlikely(fman == NULL)) 287 + if (unlikely(!fman)) 288 288 return NULL; 289 289 290 290 fman->dev_priv = dev_priv; ··· 541 541 int ret; 542 542 543 543 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 544 - if (unlikely(fence == NULL)) 544 + if (unlikely(!fence)) 545 545 return -ENOMEM; 546 546 547 547 ret = vmw_fence_obj_init(fman, fence, seqno, ··· 606 606 return ret; 607 607 608 608 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); 609 - if (unlikely(ufence == NULL)) { 609 + if (unlikely(!ufence)) { 610 610 ret = -ENOMEM; 611 611 goto out_no_object; 612 612 } ··· 966 966 struct vmw_fence_manager *fman = fman_from_fence(fence); 967 967 968 968 eaction = kzalloc(sizeof(*eaction), GFP_KERNEL); 969 - if (unlikely(eaction == NULL)) 969 + if (unlikely(!eaction)) 970 970 return -ENOMEM; 971 971 972 972 eaction->event = event; ··· 1002 1002 int ret; 1003 1003 1004 1004 event = kzalloc(sizeof(*event), GFP_KERNEL); 1005 - if (unlikely(event == NULL)) { 1005 + if (unlikely(!event)) { 1006 1006 DRM_ERROR("Failed to allocate an event.\n"); 1007 1007 ret = -ENOMEM; 1008 1008 goto out_no_space;
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
··· 121 121 struct vmwgfx_gmrid_man *gman = 122 122 kzalloc(sizeof(*gman), GFP_KERNEL); 123 123 124 - if (unlikely(gman == NULL)) 124 + if (unlikely(!gman)) 125 125 return -ENOMEM; 126 126 127 127 spin_lock_init(&gman->lock);
+9
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
··· 384 384 385 385 hotspot_x = du->hotspot_x; 386 386 hotspot_y = du->hotspot_y; 387 + 388 + if (plane->fb) { 389 + hotspot_x += plane->fb->hot_x; 390 + hotspot_y += plane->fb->hot_y; 391 + } 392 + 387 393 du->cursor_surface = vps->surf; 388 394 du->cursor_dmabuf = vps->dmabuf; 389 395 ··· 417 411 vmw_cursor_update_position(dev_priv, true, 418 412 du->cursor_x + hotspot_x, 419 413 du->cursor_y + hotspot_y); 414 + 415 + du->core_hotspot_x = hotspot_x - du->hotspot_x; 416 + du->core_hotspot_y = hotspot_y - du->hotspot_y; 420 417 } else { 421 418 DRM_ERROR("Failed to update cursor image\n"); 422 419 }
+3 -3
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
··· 320 320 321 321 if (dev_priv->has_dx) { 322 322 *otables = kmemdup(dx_tables, sizeof(dx_tables), GFP_KERNEL); 323 - if (*otables == NULL) 323 + if (!(*otables)) 324 324 return -ENOMEM; 325 325 326 326 dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables); 327 327 } else { 328 328 *otables = kmemdup(pre_dx_tables, sizeof(pre_dx_tables), 329 329 GFP_KERNEL); 330 - if (*otables == NULL) 330 + if (!(*otables)) 331 331 return -ENOMEM; 332 332 333 333 dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables); ··· 407 407 { 408 408 struct vmw_mob *mob = kzalloc(sizeof(*mob), GFP_KERNEL); 409 409 410 - if (unlikely(mob == NULL)) 410 + if (unlikely(!mob)) 411 411 return NULL; 412 412 413 413 mob->num_pages = vmw_mob_calculate_pt_pages(data_pages);
+3 -3
drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
··· 244 244 245 245 reply_len = ebx; 246 246 reply = kzalloc(reply_len + 1, GFP_KERNEL); 247 - if (reply == NULL) { 247 + if (!reply) { 248 248 DRM_ERROR("Cannot allocate memory for reply\n"); 249 249 return -ENOMEM; 250 250 } ··· 340 340 341 341 msg_len = strlen(guest_info_param) + strlen("info-get ") + 1; 342 342 msg = kzalloc(msg_len, GFP_KERNEL); 343 - if (msg == NULL) { 343 + if (!msg) { 344 344 DRM_ERROR("Cannot allocate memory to get %s", guest_info_param); 345 345 return -ENOMEM; 346 346 } ··· 400 400 401 401 msg_len = strlen(log) + strlen("log ") + 1; 402 402 msg = kzalloc(msg_len, GFP_KERNEL); 403 - if (msg == NULL) { 403 + if (!msg) { 404 404 DRM_ERROR("Cannot allocate memory for log message\n"); 405 405 return -ENOMEM; 406 406 }
+2 -2
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 446 446 int ret; 447 447 448 448 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); 449 - if (unlikely(user_bo == NULL)) { 449 + if (unlikely(!user_bo)) { 450 450 DRM_ERROR("Failed to allocate a buffer.\n"); 451 451 return -ENOMEM; 452 452 } ··· 836 836 } 837 837 838 838 backup = kzalloc(sizeof(*backup), GFP_KERNEL); 839 - if (unlikely(backup == NULL)) 839 + if (unlikely(!backup)) 840 840 return -ENOMEM; 841 841 842 842 ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
+3 -3
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
··· 751 751 } 752 752 753 753 ushader = kzalloc(sizeof(*ushader), GFP_KERNEL); 754 - if (unlikely(ushader == NULL)) { 754 + if (unlikely(!ushader)) { 755 755 ttm_mem_global_free(vmw_mem_glob(dev_priv), 756 756 vmw_user_shader_size); 757 757 ret = -ENOMEM; ··· 821 821 } 822 822 823 823 shader = kzalloc(sizeof(*shader), GFP_KERNEL); 824 - if (unlikely(shader == NULL)) { 824 + if (unlikely(!shader)) { 825 825 ttm_mem_global_free(vmw_mem_glob(dev_priv), 826 826 vmw_shader_size); 827 827 ret = -ENOMEM; ··· 981 981 982 982 /* Allocate and pin a DMA buffer */ 983 983 buf = kzalloc(sizeof(*buf), GFP_KERNEL); 984 - if (unlikely(buf == NULL)) 984 + if (unlikely(!buf)) 985 985 return -ENOMEM; 986 986 987 987 ret = vmw_dmabuf_init(dev_priv, buf, size, &vmw_sys_ne_placement,
+2 -2
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
··· 1640 1640 * something arbitrarily large and we will reject any layout 1641 1641 * that doesn't fit prim_bb_mem later 1642 1642 */ 1643 - dev->mode_config.max_width = 16384; 1644 - dev->mode_config.max_height = 16384; 1643 + dev->mode_config.max_width = 8192; 1644 + dev->mode_config.max_height = 8192; 1645 1645 } 1646 1646 1647 1647 vmw_kms_create_implicit_placement_property(dev_priv, false);
+7 -1
drivers/gpu/host1x/dev.c
··· 186 186 return -ENOMEM; 187 187 188 188 err = iommu_attach_device(host->domain, &pdev->dev); 189 - if (err) 189 + if (err == -ENODEV) { 190 + iommu_domain_free(host->domain); 191 + host->domain = NULL; 192 + goto skip_iommu; 193 + } else if (err) { 190 194 goto fail_free_domain; 195 + } 191 196 192 197 geometry = &host->domain->geometry; 193 198 ··· 203 198 host->iova_end = geometry->aperture_end; 204 199 } 205 200 201 + skip_iommu: 206 202 err = host1x_channel_list_init(&host->channel_list, 207 203 host->info->nb_channels); 208 204 if (err) {