Merge tag 'drm-fixes-2020-01-19' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
"Back from LCA2020, fixes wasn't too busy last week, seems to have
quieten down appropriately, some amdgpu, i915, then a core mst fix and
one fix for virtio-gpu and one for rockchip:

core mst:
- serialize down messages and clear timeslots are on unplug

amdgpu:
- Update golden settings for renoir
- eDP fix

i915:
- uAPI fix: Remove dash and colon from PMU names to comply with
tools/perf
- Fix for include file that was indirectly included
- Two fixes to make sure VMA are marked active for error capture

virtio:
- maintain obj reservation lock when submitting cmds

rockchip:
- increase link rate var size to accommodate rates"

* tag 'drm-fixes-2020-01-19' of git://anongit.freedesktop.org/drm/drm:
drm/amd/display: Reorder detect_edp_sink_caps before link settings read.
drm/amdgpu: update goldensetting for renoir
drm/dp_mst: Have DP_Tx send one msg at a time
drm/dp_mst: clear time slots for ports invalid
drm/i915/pmu: Do not use colons or dashes in PMU names
drm/rockchip: fix integer type used for storing dp data rate
drm/i915/gt: Mark ring->vma as active while pinned
drm/i915/gt: Mark context->state vma as active while pinned
drm/i915/gt: Skip trying to unbind in restore_ggtt_mappings
drm/i915: Add missing include file <linux/math64.h>
drm/virtio: add missing virtio_gpu_array_lock_resv call

Changed files
+94 -17
drivers
include
+1 -1
drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
··· 254 254 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC0_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 255 255 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_RLC1_RB_WPTR_POLL_CNTL, 0xfffffff7, 0x00403000), 256 256 SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_PAGE, 0x000003ff, 0x000003c0), 257 - SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x00000000) 257 + SOC15_REG_GOLDEN_VALUE(SDMA0, 0, mmSDMA0_UTCL1_WATERMK, 0xfc000000, 0x03fbe1fe) 258 258 }; 259 259 260 260 static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev,
+1 -1
drivers/gpu/drm/amd/display/dc/core/dc_link.c
··· 817 817 } 818 818 819 819 case SIGNAL_TYPE_EDP: { 820 - read_current_link_settings_on_detect(link); 821 820 detect_edp_sink_caps(link); 821 + read_current_link_settings_on_detect(link); 822 822 sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; 823 823 sink_caps.signal = SIGNAL_TYPE_EDP; 824 824 break;
+36 -3
drivers/gpu/drm/drm_dp_mst_topology.c
··· 1190 1190 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) { 1191 1191 mstb->tx_slots[txmsg->seqno] = NULL; 1192 1192 } 1193 + mgr->is_waiting_for_dwn_reply = false; 1194 + 1193 1195 } 1194 1196 out: 1195 1197 if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) { ··· 1201 1199 } 1202 1200 mutex_unlock(&mgr->qlock); 1203 1201 1202 + drm_dp_mst_kick_tx(mgr); 1204 1203 return ret; 1205 1204 } 1206 1205 ··· 2321 2318 { 2322 2319 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; 2323 2320 struct drm_dp_mst_port *port; 2324 - int old_ddps, ret; 2321 + int old_ddps, old_input, ret, i; 2325 2322 u8 new_pdt; 2326 2323 bool dowork = false, create_connector = false; 2327 2324 ··· 2352 2349 } 2353 2350 2354 2351 old_ddps = port->ddps; 2352 + old_input = port->input; 2355 2353 port->input = conn_stat->input_port; 2356 2354 port->mcs = conn_stat->message_capability_status; 2357 2355 port->ldps = conn_stat->legacy_device_plug_status; ··· 2375 2371 DRM_ERROR("Failed to change PDT for port %p: %d\n", 2376 2372 port, ret); 2377 2373 dowork = false; 2374 + } 2375 + 2376 + if (!old_input && old_ddps != port->ddps && !port->ddps) { 2377 + for (i = 0; i < mgr->max_payloads; i++) { 2378 + struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i]; 2379 + struct drm_dp_mst_port *port_validated; 2380 + 2381 + if (!vcpi) 2382 + continue; 2383 + 2384 + port_validated = 2385 + container_of(vcpi, struct drm_dp_mst_port, vcpi); 2386 + port_validated = 2387 + drm_dp_mst_topology_get_port_validated(mgr, port_validated); 2388 + if (!port_validated) { 2389 + mutex_lock(&mgr->payload_lock); 2390 + vcpi->num_slots = 0; 2391 + mutex_unlock(&mgr->payload_lock); 2392 + } else { 2393 + drm_dp_mst_topology_put_port(port_validated); 2394 + } 2395 + } 2378 2396 } 2379 2397 2380 2398 if (port->connector) ··· 2744 2718 ret = process_single_tx_qlock(mgr, txmsg, false); 2745 2719 if (ret == 1) { 2746 2720 /* txmsg is sent it should be in the slots now */ 2721 + mgr->is_waiting_for_dwn_reply = true; 2747 2722 list_del(&txmsg->next); 2748 2723 } else if (ret) { 2749 2724 DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); 2725 + mgr->is_waiting_for_dwn_reply = false; 2750 2726 list_del(&txmsg->next); 2751 2727 if (txmsg->seqno != -1) 2752 2728 txmsg->dst->tx_slots[txmsg->seqno] = NULL; ··· 2788 2760 drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); 2789 2761 } 2790 2762 2791 - if (list_is_singular(&mgr->tx_msg_downq)) 2763 + if (list_is_singular(&mgr->tx_msg_downq) && 2764 + !mgr->is_waiting_for_dwn_reply) 2792 2765 process_single_down_tx_qlock(mgr); 2793 2766 mutex_unlock(&mgr->qlock); 2794 2767 } ··· 3707 3678 mutex_lock(&mgr->qlock); 3708 3679 txmsg->state = DRM_DP_SIDEBAND_TX_RX; 3709 3680 mstb->tx_slots[slot] = NULL; 3681 + mgr->is_waiting_for_dwn_reply = false; 3710 3682 mutex_unlock(&mgr->qlock); 3711 3683 3712 3684 wake_up_all(&mgr->tx_waitq); ··· 3717 3687 no_msg: 3718 3688 drm_dp_mst_topology_put_mstb(mstb); 3719 3689 clear_down_rep_recv: 3690 + mutex_lock(&mgr->qlock); 3691 + mgr->is_waiting_for_dwn_reply = false; 3692 + mutex_unlock(&mgr->qlock); 3720 3693 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 3721 3694 3722 3695 return 0; ··· 4530 4497 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work); 4531 4498 4532 4499 mutex_lock(&mgr->qlock); 4533 - if (!list_empty(&mgr->tx_msg_downq)) 4500 + if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply) 4534 4501 process_single_down_tx_qlock(mgr); 4535 4502 mutex_unlock(&mgr->qlock); 4536 4503 }
+37 -3
drivers/gpu/drm/i915/gt/intel_context.c
··· 123 123 if (err) 124 124 return err; 125 125 126 + err = i915_active_acquire(&vma->active); 127 + if (err) 128 + goto err_unpin; 129 + 126 130 /* 127 131 * And mark it as a globally pinned object to let the shrinker know 128 132 * it cannot reclaim the object until we release it. ··· 135 131 vma->obj->mm.dirty = true; 136 132 137 133 return 0; 134 + 135 + err_unpin: 136 + i915_vma_unpin(vma); 137 + return err; 138 138 } 139 139 140 140 static void __context_unpin_state(struct i915_vma *vma) 141 141 { 142 142 i915_vma_make_shrinkable(vma); 143 + i915_active_release(&vma->active); 143 144 __i915_vma_unpin(vma); 145 + } 146 + 147 + static int __ring_active(struct intel_ring *ring) 148 + { 149 + int err; 150 + 151 + err = i915_active_acquire(&ring->vma->active); 152 + if (err) 153 + return err; 154 + 155 + err = intel_ring_pin(ring); 156 + if (err) 157 + goto err_active; 158 + 159 + return 0; 160 + 161 + err_active: 162 + i915_active_release(&ring->vma->active); 163 + return err; 164 + } 165 + 166 + static void __ring_retire(struct intel_ring *ring) 167 + { 168 + intel_ring_unpin(ring); 169 + i915_active_release(&ring->vma->active); 144 170 } 145 171 146 172 __i915_active_call ··· 185 151 __context_unpin_state(ce->state); 186 152 187 153 intel_timeline_unpin(ce->timeline); 188 - intel_ring_unpin(ce->ring); 154 + __ring_retire(ce->ring); 189 155 190 156 intel_context_put(ce); 191 157 } ··· 197 163 198 164 intel_context_get(ce); 199 165 200 - err = intel_ring_pin(ce->ring); 166 + err = __ring_active(ce->ring); 201 167 if (err) 202 168 goto err_put; 203 169 ··· 217 183 err_timeline: 218 184 intel_timeline_unpin(ce->timeline); 219 185 err_ring: 220 - intel_ring_unpin(ce->ring); 186 + __ring_retire(ce->ring); 221 187 err_put: 222 188 intel_context_put(ce); 223 189 return err;
+2 -5
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 3304 3304 3305 3305 static void ggtt_restore_mappings(struct i915_ggtt *ggtt) 3306 3306 { 3307 - struct i915_vma *vma, *vn; 3307 + struct i915_vma *vma; 3308 3308 bool flush = false; 3309 3309 int open; 3310 3310 ··· 3319 3319 open = atomic_xchg(&ggtt->vm.open, 0); 3320 3320 3321 3321 /* clflush objects bound into the GGTT and rebind them. */ 3322 - list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { 3322 + list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) { 3323 3323 struct drm_i915_gem_object *obj = vma->obj; 3324 3324 3325 3325 if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) 3326 - continue; 3327 - 3328 - if (!__i915_vma_unbind(vma)) 3329 3326 continue; 3330 3327 3331 3328 clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
+8 -3
drivers/gpu/drm/i915/i915_pmu.c
··· 1074 1074 hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1075 1075 pmu->timer.function = i915_sample; 1076 1076 1077 - if (!is_igp(i915)) 1077 + if (!is_igp(i915)) { 1078 1078 pmu->name = kasprintf(GFP_KERNEL, 1079 - "i915-%s", 1079 + "i915_%s", 1080 1080 dev_name(i915->drm.dev)); 1081 - else 1081 + if (pmu->name) { 1082 + /* tools/perf reserves colons as special. */ 1083 + strreplace((char *)pmu->name, ':', '_'); 1084 + } 1085 + } else { 1082 1086 pmu->name = "i915"; 1087 + } 1083 1088 if (!pmu->name) 1084 1089 goto err; 1085 1090
+1
drivers/gpu/drm/i915/selftests/i915_random.h
··· 25 25 #ifndef __I915_SELFTESTS_RANDOM_H__ 26 26 #define __I915_SELFTESTS_RANDOM_H__ 27 27 28 + #include <linux/math64.h> 28 29 #include <linux/random.h> 29 30 30 31 #include "../i915_selftest.h"
+1 -1
drivers/gpu/drm/rockchip/cdn-dp-core.h
··· 95 95 struct cdn_dp_port *port[MAX_PHY]; 96 96 u8 ports; 97 97 u8 max_lanes; 98 - u8 max_rate; 98 + unsigned int max_rate; 99 99 u8 lanes; 100 100 int active_port; 101 101
+1
drivers/gpu/drm/virtio/virtgpu_plane.c
··· 232 232 if (!objs) 233 233 return; 234 234 virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); 235 + virtio_gpu_array_lock_resv(objs); 235 236 virtio_gpu_cmd_transfer_to_host_2d 236 237 (vgdev, 0, 237 238 plane->state->crtc_w,
+6
include/drm/drm_dp_mst_helper.h
··· 605 605 * &drm_dp_sideband_msg_tx.state once they are queued 606 606 */ 607 607 struct mutex qlock; 608 + 609 + /** 610 + * @is_waiting_for_dwn_reply: indicate whether is waiting for down reply 611 + */ 612 + bool is_waiting_for_dwn_reply; 613 + 608 614 /** 609 615 * @tx_msg_downq: List of pending down replies. 610 616 */