Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-misc-fixes-2017-07-20' of git://anongit.freedesktop.org/git/drm-misc into drm-fixes

Core Changes:
- fence: Introduce new fence flag to signify timestamp is populated (Chris)
- mst: Avoid processing incomplete data + fix NULL dereference (Imre)

Driver Changes:
- vc4: Avoid WARN from grabbing a ref from vblank that's not on (Boris)

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Boris Brezillon <boris.brezillon@free-electrons.com>
Cc: Imre Deak <imre.deak@intel.com>

* tag 'drm-misc-fixes-2017-07-20' of git://anongit.freedesktop.org/git/drm-misc:
drm/mst: Avoid processing partially received up/down message transactions
drm/mst: Avoid dereferencing a NULL mstb in drm_dp_mst_handle_up_req()
drm/mst: Fix error handling during MST sideband message reception
drm/vc4: Fix VBLANK handling in crtc->enable() path
dma-buf/fence: Avoid use of uninitialised timestamp

+92 -44
+6 -11
drivers/dma-buf/dma-fence.c
··· 75 75 if (WARN_ON(!fence)) 76 76 return -EINVAL; 77 77 78 - if (!ktime_to_ns(fence->timestamp)) { 79 - fence->timestamp = ktime_get(); 80 - smp_mb__before_atomic(); 81 - } 82 - 83 78 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { 84 79 ret = -EINVAL; 85 80 ··· 82 87 * we might have raced with the unlocked dma_fence_signal, 83 88 * still run through all callbacks 84 89 */ 85 - } else 90 + } else { 91 + fence->timestamp = ktime_get(); 92 + set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); 86 93 trace_dma_fence_signaled(fence); 94 + } 87 95 88 96 list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { 89 97 list_del_init(&cur->node); ··· 113 115 if (!fence) 114 116 return -EINVAL; 115 117 116 - if (!ktime_to_ns(fence->timestamp)) { 117 - fence->timestamp = ktime_get(); 118 - smp_mb__before_atomic(); 119 - } 120 - 121 118 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) 122 119 return -EINVAL; 123 120 121 + fence->timestamp = ktime_get(); 122 + set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); 124 123 trace_dma_fence_signaled(fence); 125 124 126 125 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) {
+1 -1
drivers/dma-buf/sync_debug.c
··· 84 84 show ? "_" : "", 85 85 sync_status_str(status)); 86 86 87 - if (status) { 87 + if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) { 88 88 struct timespec64 ts64 = 89 89 ktime_to_timespec64(fence->timestamp); 90 90
+7 -1
drivers/dma-buf/sync_file.c
··· 391 391 sizeof(info->driver_name)); 392 392 393 393 info->status = dma_fence_get_status(fence); 394 - info->timestamp_ns = ktime_to_ns(fence->timestamp); 394 + while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && 395 + !test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) 396 + cpu_relax(); 397 + info->timestamp_ns = 398 + test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ? 399 + ktime_to_ns(fence->timestamp) : 400 + ktime_set(0, 0); 395 401 } 396 402 397 403 static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
+33 -8
drivers/gpu/drm/drm_dp_mst_topology.c
··· 330 330 return false; 331 331 } 332 332 333 + /* 334 + * ignore out-of-order messages or messages that are part of a 335 + * failed transaction 336 + */ 337 + if (!recv_hdr.somt && !msg->have_somt) 338 + return false; 339 + 333 340 /* get length contained in this portion */ 334 341 msg->curchunk_len = recv_hdr.msg_len; 335 342 msg->curchunk_hdrlen = hdrlen; ··· 2171 2164 } 2172 2165 EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); 2173 2166 2174 - static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) 2167 + static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) 2175 2168 { 2176 2169 int len; 2177 2170 u8 replyblock[32]; ··· 2186 2179 replyblock, len); 2187 2180 if (ret != len) { 2188 2181 DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret); 2189 - return; 2182 + return false; 2190 2183 } 2191 2184 ret = drm_dp_sideband_msg_build(msg, replyblock, len, true); 2192 2185 if (!ret) { 2193 2186 DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]); 2194 - return; 2187 + return false; 2195 2188 } 2196 2189 replylen = msg->curchunk_len + msg->curchunk_hdrlen; 2197 2190 ··· 2203 2196 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, 2204 2197 replyblock, len); 2205 2198 if (ret != len) { 2206 - DRM_DEBUG_KMS("failed to read a chunk\n"); 2199 + DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n", 2200 + len, ret); 2201 + return false; 2207 2202 } 2203 + 2208 2204 ret = drm_dp_sideband_msg_build(msg, replyblock, len, false); 2209 - if (ret == false) 2205 + if (!ret) { 2210 2206 DRM_DEBUG_KMS("failed to build sideband msg\n"); 2207 + return false; 2208 + } 2209 + 2211 2210 curreply += len; 2212 2211 replylen -= len; 2213 2212 } 2213 + return true; 2214 2214 } 2215 2215 2216 2216 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) 2217 2217 { 2218 2218 int ret = 0; 2219 2219 2220 - drm_dp_get_one_sb_msg(mgr, false); 2220 + if (!drm_dp_get_one_sb_msg(mgr, false)) { 2221 + memset(&mgr->down_rep_recv, 0, 2222 + sizeof(struct drm_dp_sideband_msg_rx)); 2223 + return 0; 2224 + } 2221 2225 2222 2226 if (mgr->down_rep_recv.have_eomt) { 2223 2227 struct drm_dp_sideband_msg_tx *txmsg; ··· 2284 2266 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) 2285 2267 { 2286 2268 int ret = 0; 2287 - drm_dp_get_one_sb_msg(mgr, true); 2269 + 2270 + if (!drm_dp_get_one_sb_msg(mgr, true)) { 2271 + memset(&mgr->up_req_recv, 0, 2272 + sizeof(struct drm_dp_sideband_msg_rx)); 2273 + return 0; 2274 + } 2288 2275 2289 2276 if (mgr->up_req_recv.have_eomt) { 2290 2277 struct drm_dp_sideband_msg_req_body msg; ··· 2341 2318 DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); 2342 2319 } 2343 2320 2344 - drm_dp_put_mst_branch_device(mstb); 2321 + if (mstb) 2322 + drm_dp_put_mst_branch_device(mstb); 2323 + 2345 2324 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); 2346 2325 } 2347 2326 return ret;
+43 -23
drivers/gpu/drm/vc4/vc4_crtc.c
··· 520 520 SCALER_DISPSTATX_EMPTY); 521 521 } 522 522 523 + static void vc4_crtc_update_dlist(struct drm_crtc *crtc) 524 + { 525 + struct drm_device *dev = crtc->dev; 526 + struct vc4_dev *vc4 = to_vc4_dev(dev); 527 + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 528 + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); 529 + 530 + if (crtc->state->event) { 531 + unsigned long flags; 532 + 533 + crtc->state->event->pipe = drm_crtc_index(crtc); 534 + 535 + WARN_ON(drm_crtc_vblank_get(crtc) != 0); 536 + 537 + spin_lock_irqsave(&dev->event_lock, flags); 538 + vc4_crtc->event = crtc->state->event; 539 + crtc->state->event = NULL; 540 + 541 + HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), 542 + vc4_state->mm.start); 543 + 544 + spin_unlock_irqrestore(&dev->event_lock, flags); 545 + } else { 546 + HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), 547 + vc4_state->mm.start); 548 + } 549 + } 550 + 523 551 static void vc4_crtc_enable(struct drm_crtc *crtc) 524 552 { 525 553 struct drm_device *dev = crtc->dev; ··· 557 529 struct drm_display_mode *mode = &state->adjusted_mode; 558 530 559 531 require_hvs_enabled(dev); 532 + 533 + /* Enable vblank irq handling before crtc is started otherwise 534 + * drm_crtc_get_vblank() fails in vc4_crtc_update_dlist(). 535 + */ 536 + drm_crtc_vblank_on(crtc); 537 + vc4_crtc_update_dlist(crtc); 560 538 561 539 /* Turn on the scaler, which will wait for vstart to start 562 540 * compositing. ··· 575 541 /* Turn on the pixel valve, which will emit the vstart signal. */ 576 542 CRTC_WRITE(PV_V_CONTROL, 577 543 CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN); 578 - 579 - /* Enable vblank irq handling after crtc is started. */ 580 - drm_crtc_vblank_on(crtc); 581 544 } 582 545 583 546 static bool vc4_crtc_mode_fixup(struct drm_crtc *crtc, ··· 629 598 { 630 599 struct drm_device *dev = crtc->dev; 631 600 struct vc4_dev *vc4 = to_vc4_dev(dev); 632 - struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); 633 601 struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); 634 602 struct drm_plane *plane; 635 603 bool debug_dump_regs = false; ··· 650 620 651 621 WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size); 652 622 653 - if (crtc->state->event) { 654 - unsigned long flags; 655 - 656 - crtc->state->event->pipe = drm_crtc_index(crtc); 657 - 658 - WARN_ON(drm_crtc_vblank_get(crtc) != 0); 659 - 660 - spin_lock_irqsave(&dev->event_lock, flags); 661 - vc4_crtc->event = crtc->state->event; 662 - crtc->state->event = NULL; 663 - 664 - HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), 665 - vc4_state->mm.start); 666 - 667 - spin_unlock_irqrestore(&dev->event_lock, flags); 668 - } else { 669 - HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), 670 - vc4_state->mm.start); 671 - } 623 + /* Only update DISPLIST if the CRTC was already running and is not 624 + * being disabled. 625 + * vc4_crtc_enable() takes care of updating the dlist just after 626 + * re-enabling VBLANK interrupts and before enabling the engine. 627 + * If the CRTC is being disabled, there's no point in updating this 628 + * information. 629 + */ 630 + if (crtc->state->active && old_state->active) 631 + vc4_crtc_update_dlist(crtc); 672 632 673 633 if (debug_dump_regs) { 674 634 DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc));
+2
include/linux/dma-fence.h
··· 55 55 * of the time. 56 56 * 57 57 * DMA_FENCE_FLAG_SIGNALED_BIT - fence is already signaled 58 + * DMA_FENCE_FLAG_TIMESTAMP_BIT - timestamp recorded for fence signaling 58 59 * DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called 59 60 * DMA_FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the 60 61 * implementer of the fence for its own purposes. Can be used in different ··· 85 84 86 85 enum dma_fence_flag_bits { 87 86 DMA_FENCE_FLAG_SIGNALED_BIT, 87 + DMA_FENCE_FLAG_TIMESTAMP_BIT, 88 88 DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, 89 89 DMA_FENCE_FLAG_USER_BITS, /* must always be last member */ 90 90 };