Merge tag 'drm-fixes-for-v4.15-rc5' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
"I've got most of two weeks worth of fixes here due to being on
holidays last week.

The main things are:

- Core:
* Syncobj fd reference count fix
* Leasing ioctl misuse fix

- nouveau regression fixes

- further amdgpu DC fixes

- sun4i regression fixes

I'm not sure I'll see many fixes over next couple of weeks, we'll see
how we go"

* tag 'drm-fixes-for-v4.15-rc5' of git://people.freedesktop.org/~airlied/linux: (27 commits)
drm/syncobj: Stop reusing the same struct file for all syncobj -> fd
drm: move lease init after validation in drm_lease_create
drm/plane: Make framebuffer refcounting the responsibility of setplane_internal callers
drm/sun4i: hdmi: Move the mode_valid callback to the encoder
drm/nouveau: fix obvious memory leak
drm/i915: Protect DDI port to DPLL map from theoretical race.
drm/i915/lpe: Remove double-encapsulation of info string
drm/sun4i: Fix error path handling
drm/nouveau: use alternate memory type for system-memory buffers with kind != 0
drm/nouveau: avoid GPU page sizes > PAGE_SIZE for buffer objects in host memory
drm/nouveau/mmu/gp10b: use correct implementation
drm/nouveau/pci: do a msi rearm on init
drm/nouveau/imem/nv50: fix refcount_t warning
drm/nouveau/bios/dp: support DP Info Table 2.0
drm/nouveau/fbcon: fix NULL pointer access in nouveau_fbcon_destroy
drm/amd/display: Fix rehook MST display not light back on
drm/amd/display: fix missing pixel clock adjustment for dongle
drm/amd/display: set chroma taps to 1 when not scaling
drm/amd/display: add pipe locking before front end programing
drm/sun4i: validate modes for HDMI
...

+268 -147
+1 -1
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
··· 2467 2467 PACKET3_MAP_QUEUES_PIPE(ring->pipe) | 2468 2468 PACKET3_MAP_QUEUES_ME((ring->me == 1 ? 0 : 1)) | 2469 2469 PACKET3_MAP_QUEUES_QUEUE_TYPE(0) | /*queue_type: normal compute queue */ 2470 - PACKET3_MAP_QUEUES_ALLOC_FORMAT(1) | /* alloc format: all_on_one_pipe */ 2470 + PACKET3_MAP_QUEUES_ALLOC_FORMAT(0) | /* alloc format: all_on_one_pipe */ 2471 2471 PACKET3_MAP_QUEUES_ENGINE_SEL(0) | /* engine_sel: compute */ 2472 2472 PACKET3_MAP_QUEUES_NUM_QUEUES(1)); /* num_queues: must be 1 */ 2473 2473 amdgpu_ring_write(kiq_ring, PACKET3_MAP_QUEUES_DOORBELL_OFFSET(ring->doorbell_index));
+8 -5
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 2336 2336 const struct dm_connector_state *dm_state) 2337 2337 { 2338 2338 struct drm_display_mode *preferred_mode = NULL; 2339 - const struct drm_connector *drm_connector; 2339 + struct drm_connector *drm_connector; 2340 2340 struct dc_stream_state *stream = NULL; 2341 2341 struct drm_display_mode mode = *drm_mode; 2342 2342 bool native_mode_found = false; ··· 2355 2355 2356 2356 if (!aconnector->dc_sink) { 2357 2357 /* 2358 - * Exclude MST from creating fake_sink 2359 - * TODO: need to enable MST into fake_sink feature 2358 + * Create dc_sink when necessary to MST 2359 + * Don't apply fake_sink to MST 2360 2360 */ 2361 - if (aconnector->mst_port) 2362 - goto stream_create_fail; 2361 + if (aconnector->mst_port) { 2362 + dm_dp_mst_dc_sink_create(drm_connector); 2363 + goto mst_dc_sink_create_done; 2364 + } 2363 2365 2364 2366 if (create_fake_sink(aconnector)) 2365 2367 goto stream_create_fail; ··· 2412 2410 stream_create_fail: 2413 2411 dm_state_null: 2414 2412 drm_connector_null: 2413 + mst_dc_sink_create_done: 2415 2414 return stream; 2416 2415 } 2417 2416
+2
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
··· 189 189 struct mutex hpd_lock; 190 190 191 191 bool fake_enable; 192 + 193 + bool mst_connected; 192 194 }; 193 195 194 196 #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base)
+51
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
··· 185 185 return ret; 186 186 } 187 187 188 + void dm_dp_mst_dc_sink_create(struct drm_connector *connector) 189 + { 190 + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 191 + struct edid *edid; 192 + struct dc_sink *dc_sink; 193 + struct dc_sink_init_data init_params = { 194 + .link = aconnector->dc_link, 195 + .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; 196 + 197 + edid = drm_dp_mst_get_edid(connector, &aconnector->mst_port->mst_mgr, aconnector->port); 198 + 199 + if (!edid) { 200 + drm_mode_connector_update_edid_property( 201 + &aconnector->base, 202 + NULL); 203 + return; 204 + } 205 + 206 + aconnector->edid = edid; 207 + 208 + dc_sink = dc_link_add_remote_sink( 209 + aconnector->dc_link, 210 + (uint8_t *)aconnector->edid, 211 + (aconnector->edid->extensions + 1) * EDID_LENGTH, 212 + &init_params); 213 + 214 + dc_sink->priv = aconnector; 215 + aconnector->dc_sink = dc_sink; 216 + 217 + amdgpu_dm_add_sink_to_freesync_module( 218 + connector, aconnector->edid); 219 + 220 + drm_mode_connector_update_edid_property( 221 + &aconnector->base, aconnector->edid); 222 + } 223 + 188 224 static int dm_dp_mst_get_modes(struct drm_connector *connector) 189 225 { 190 226 struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); ··· 347 311 drm_mode_connector_set_path_property(connector, pathprop); 348 312 349 313 drm_connector_list_iter_end(&conn_iter); 314 + aconnector->mst_connected = true; 350 315 return &aconnector->base; 351 316 } 352 317 } ··· 400 363 */ 401 364 amdgpu_dm_connector_funcs_reset(connector); 402 365 366 + aconnector->mst_connected = true; 367 + 403 368 DRM_INFO("DM_MST: added connector: %p [id: %d] [master: %p]\n", 404 369 aconnector, connector->base.id, aconnector->mst_port); 405 370 ··· 433 394 drm_mode_connector_update_edid_property( 434 395 &aconnector->base, 435 396 NULL); 397 + 398 + aconnector->mst_connected = false; 436 399 } 437 400 438 401 static void dm_dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr) ··· 445 404 drm_kms_helper_hotplug_event(dev); 446 405 } 447 406 407 + static void dm_dp_mst_link_status_reset(struct drm_connector *connector) 408 + { 409 + mutex_lock(&connector->dev->mode_config.mutex); 410 + drm_mode_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD); 411 + mutex_unlock(&connector->dev->mode_config.mutex); 412 + } 413 + 448 414 static void dm_dp_mst_register_connector(struct drm_connector *connector) 449 415 { 450 416 struct drm_device *dev = connector->dev; 451 417 struct amdgpu_device *adev = dev->dev_private; 418 + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); 452 419 453 420 if (adev->mode_info.rfbdev) 454 421 drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector); ··· 465 416 466 417 drm_connector_register(connector); 467 418 419 + if (aconnector->mst_connected) 420 + dm_dp_mst_link_status_reset(connector); 468 421 } 469 422 470 423 static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
+1
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h
··· 31 31 32 32 void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, 33 33 struct amdgpu_dm_connector *aconnector); 34 + void dm_dp_mst_dc_sink_create(struct drm_connector *connector); 34 35 35 36 #endif
+9
drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
··· 900 900 v->override_vta_ps[input_idx] = pipe->plane_res.scl_data.taps.v_taps; 901 901 v->override_hta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.h_taps_c; 902 902 v->override_vta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.v_taps_c; 903 + /* 904 + * Spreadsheet doesn't handle taps_c is one properly, 905 + * need to force Chroma to always be scaled to pass 906 + * bandwidth validation. 907 + */ 908 + if (v->override_hta_pschroma[input_idx] == 1) 909 + v->override_hta_pschroma[input_idx] = 2; 910 + if (v->override_vta_pschroma[input_idx] == 1) 911 + v->override_vta_pschroma[input_idx] = 2; 903 912 v->source_scan[input_idx] = (pipe->plane_state->rotation % 2) ? dcn_bw_vert : dcn_bw_hor; 904 913 } 905 914 if (v->is_line_buffer_bpp_fixed == dcn_bw_yes)
+3 -1
drivers/gpu/drm/amd/display/dc/core/dc_link.c
··· 1801 1801 link->link_enc->funcs->disable_output(link->link_enc, signal, link); 1802 1802 } 1803 1803 1804 - bool dp_active_dongle_validate_timing( 1804 + static bool dp_active_dongle_validate_timing( 1805 1805 const struct dc_crtc_timing *timing, 1806 1806 const struct dc_dongle_caps *dongle_caps) 1807 1807 { ··· 1833 1833 /* Check Color Depth and Pixel Clock */ 1834 1834 if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) 1835 1835 required_pix_clk /= 2; 1836 + else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) 1837 + required_pix_clk = required_pix_clk * 2 / 3; 1836 1838 1837 1839 switch (timing->display_color_depth) { 1838 1840 case COLOR_DEPTH_666:
+21 -5
drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
··· 2866 2866 int num_planes, 2867 2867 struct dc_state *context) 2868 2868 { 2869 - int i, be_idx; 2869 + int i; 2870 2870 2871 2871 if (num_planes == 0) 2872 2872 return; 2873 2873 2874 - be_idx = -1; 2875 2874 for (i = 0; i < dc->res_pool->pipe_count; i++) { 2876 - if (stream == context->res_ctx.pipe_ctx[i].stream) { 2877 - be_idx = context->res_ctx.pipe_ctx[i].stream_res.tg->inst; 2878 - break; 2875 + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2876 + struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 2877 + 2878 + if (stream == pipe_ctx->stream) { 2879 + if (!pipe_ctx->top_pipe && 2880 + (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) 2881 + dc->hwss.pipe_control_lock(dc, pipe_ctx, true); 2879 2882 } 2880 2883 } 2881 2884 ··· 2898 2895 context->stream_count); 2899 2896 2900 2897 dce110_program_front_end_for_pipe(dc, pipe_ctx); 2898 + 2899 + dc->hwss.update_plane_addr(dc, pipe_ctx); 2900 + 2901 2901 program_surface_visibility(dc, pipe_ctx); 2902 2902 2903 + } 2904 + 2905 + for (i = 0; i < dc->res_pool->pipe_count; i++) { 2906 + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; 2907 + struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; 2908 + 2909 + if ((stream == pipe_ctx->stream) && 2910 + (!pipe_ctx->top_pipe) && 2911 + (pipe_ctx->plane_state || old_pipe_ctx->plane_state)) 2912 + dc->hwss.pipe_control_lock(dc, pipe_ctx, false); 2903 2913 } 2904 2914 } 2905 2915
+4 -5
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c
··· 159 159 scl_data->taps.h_taps = 1; 160 160 if (IDENTITY_RATIO(scl_data->ratios.vert)) 161 161 scl_data->taps.v_taps = 1; 162 - /* 163 - * Spreadsheet doesn't handle taps_c is one properly, 164 - * need to force Chroma to always be scaled to pass 165 - * bandwidth validation. 166 - */ 162 + if (IDENTITY_RATIO(scl_data->ratios.horz_c)) 163 + scl_data->taps.h_taps_c = 1; 164 + if (IDENTITY_RATIO(scl_data->ratios.vert_c)) 165 + scl_data->taps.v_taps_c = 1; 167 166 } 168 167 169 168 return true;
+11 -11
drivers/gpu/drm/drm_lease.c
··· 220 220 221 221 mutex_lock(&dev->mode_config.idr_mutex); 222 222 223 - /* Insert the new lessee into the tree */ 224 - id = idr_alloc(&(drm_lease_owner(lessor)->lessee_idr), lessee, 1, 0, GFP_KERNEL); 225 - if (id < 0) { 226 - error = id; 227 - goto out_lessee; 228 - } 229 - 230 - lessee->lessee_id = id; 231 - lessee->lessor = drm_master_get(lessor); 232 - list_add_tail(&lessee->lessee_list, &lessor->lessees); 233 - 234 223 idr_for_each_entry(leases, entry, object) { 235 224 error = 0; 236 225 if (!idr_find(&dev->mode_config.crtc_idr, object)) ··· 234 245 goto out_lessee; 235 246 } 236 247 } 248 + 249 + /* Insert the new lessee into the tree */ 250 + id = idr_alloc(&(drm_lease_owner(lessor)->lessee_idr), lessee, 1, 0, GFP_KERNEL); 251 + if (id < 0) { 252 + error = id; 253 + goto out_lessee; 254 + } 255 + 256 + lessee->lessee_id = id; 257 + lessee->lessor = drm_master_get(lessor); 258 + list_add_tail(&lessee->lessee_list, &lessor->lessees); 237 259 238 260 /* Move the leases over */ 239 261 lessee->leases = *leases;
+20 -22
drivers/gpu/drm/drm_plane.c
··· 558 558 } 559 559 560 560 /* 561 - * setplane_internal - setplane handler for internal callers 561 + * __setplane_internal - setplane handler for internal callers 562 562 * 563 - * Note that we assume an extra reference has already been taken on fb. If the 564 - * update fails, this reference will be dropped before return; if it succeeds, 565 - * the previous framebuffer (if any) will be unreferenced instead. 563 + * This function will take a reference on the new fb for the plane 564 + * on success. 566 565 * 567 566 * src_{x,y,w,h} are provided in 16.16 fixed point format 568 567 */ ··· 629 630 if (!ret) { 630 631 plane->crtc = crtc; 631 632 plane->fb = fb; 632 - fb = NULL; 633 + drm_framebuffer_get(plane->fb); 633 634 } else { 634 635 plane->old_fb = NULL; 635 636 } 636 637 637 638 out: 638 - if (fb) 639 - drm_framebuffer_put(fb); 640 639 if (plane->old_fb) 641 640 drm_framebuffer_put(plane->old_fb); 642 641 plane->old_fb = NULL; ··· 682 685 struct drm_plane *plane; 683 686 struct drm_crtc *crtc = NULL; 684 687 struct drm_framebuffer *fb = NULL; 688 + int ret; 685 689 686 690 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 687 691 return -EINVAL; ··· 715 717 } 716 718 } 717 719 718 - /* 719 - * setplane_internal will take care of deref'ing either the old or new 720 - * framebuffer depending on success. 721 - */ 722 - return setplane_internal(plane, crtc, fb, 723 - plane_req->crtc_x, plane_req->crtc_y, 724 - plane_req->crtc_w, plane_req->crtc_h, 725 - plane_req->src_x, plane_req->src_y, 726 - plane_req->src_w, plane_req->src_h); 720 + ret = setplane_internal(plane, crtc, fb, 721 + plane_req->crtc_x, plane_req->crtc_y, 722 + plane_req->crtc_w, plane_req->crtc_h, 723 + plane_req->src_x, plane_req->src_y, 724 + plane_req->src_w, plane_req->src_h); 725 + 726 + if (fb) 727 + drm_framebuffer_put(fb); 728 + 729 + return ret; 727 730 } 728 731 729 732 static int drm_mode_cursor_universal(struct drm_crtc *crtc, ··· 787 788 src_h = fb->height << 16; 788 789 } 789 790 790 - /* 791 - * setplane_internal will take care of deref'ing either the old or new 792 - * framebuffer depending on success. 793 - */ 794 791 ret = __setplane_internal(crtc->cursor, crtc, fb, 795 - crtc_x, crtc_y, crtc_w, crtc_h, 796 - 0, 0, src_w, src_h, ctx); 792 + crtc_x, crtc_y, crtc_w, crtc_h, 793 + 0, 0, src_w, src_h, ctx); 794 + 795 + if (fb) 796 + drm_framebuffer_put(fb); 797 797 798 798 /* Update successful; save new cursor position, if necessary */ 799 799 if (ret == 0 && req->flags & DRM_MODE_CURSOR_MOVE) {
+29 -48
drivers/gpu/drm/drm_syncobj.c
··· 369 369 .release = drm_syncobj_file_release, 370 370 }; 371 371 372 - static int drm_syncobj_alloc_file(struct drm_syncobj *syncobj) 373 - { 374 - struct file *file = anon_inode_getfile("syncobj_file", 375 - &drm_syncobj_file_fops, 376 - syncobj, 0); 377 - if (IS_ERR(file)) 378 - return PTR_ERR(file); 379 - 380 - drm_syncobj_get(syncobj); 381 - if (cmpxchg(&syncobj->file, NULL, file)) { 382 - /* lost the race */ 383 - fput(file); 384 - } 385 - 386 - return 0; 387 - } 388 - 389 372 int drm_syncobj_get_fd(struct drm_syncobj *syncobj, int *p_fd) 390 373 { 391 - int ret; 374 + struct file *file; 392 375 int fd; 393 376 394 377 fd = get_unused_fd_flags(O_CLOEXEC); 395 378 if (fd < 0) 396 379 return fd; 397 380 398 - if (!syncobj->file) { 399 - ret = drm_syncobj_alloc_file(syncobj); 400 - if (ret) { 401 - put_unused_fd(fd); 402 - return ret; 403 - } 381 + file = anon_inode_getfile("syncobj_file", 382 + &drm_syncobj_file_fops, 383 + syncobj, 0); 384 + if (IS_ERR(file)) { 385 + put_unused_fd(fd); 386 + return PTR_ERR(file); 404 387 } 405 - fd_install(fd, syncobj->file); 388 + 389 + drm_syncobj_get(syncobj); 390 + fd_install(fd, file); 391 + 406 392 *p_fd = fd; 407 393 return 0; 408 394 } ··· 408 422 return ret; 409 423 } 410 424 411 - static struct drm_syncobj *drm_syncobj_fdget(int fd) 412 - { 413 - struct file *file = fget(fd); 414 - 415 - if (!file) 416 - return NULL; 417 - if (file->f_op != &drm_syncobj_file_fops) 418 - goto err; 419 - 420 - return file->private_data; 421 - err: 422 - fput(file); 423 - return NULL; 424 - }; 425 - 426 425 static int drm_syncobj_fd_to_handle(struct drm_file *file_private, 427 426 int fd, u32 *handle) 428 427 { 429 - struct drm_syncobj *syncobj = drm_syncobj_fdget(fd); 428 + struct drm_syncobj *syncobj; 429 + struct file *file; 430 430 int ret; 431 431 432 - if (!syncobj) 432 + file = fget(fd); 433 + if (!file) 433 434 return -EINVAL; 434 435 436 + if (file->f_op != &drm_syncobj_file_fops) { 437 + fput(file); 438 + return -EINVAL; 439 + } 440 + 435 441 /* take a reference to put in the idr */ 442 + syncobj = file->private_data; 436 443 drm_syncobj_get(syncobj); 437 444 438 445 idr_preload(GFP_KERNEL); ··· 434 455 spin_unlock(&file_private->syncobj_table_lock); 435 456 idr_preload_end(); 436 457 437 - if (ret < 0) { 438 - fput(syncobj->file); 439 - return ret; 440 - } 441 - *handle = ret; 442 - return 0; 458 + if (ret > 0) { 459 + *handle = ret; 460 + ret = 0; 461 + } else 462 + drm_syncobj_put(syncobj); 463 + 464 + fput(file); 465 + return ret; 443 466 } 444 467 445 468 static int drm_syncobj_import_sync_file_fence(struct drm_file *file_private,
+1 -8
drivers/gpu/drm/i915/i915_gem.c
··· 330 330 * must wait for all rendering to complete to the object (as unbinding 331 331 * must anyway), and retire the requests. 332 332 */ 333 - ret = i915_gem_object_wait(obj, 334 - I915_WAIT_INTERRUPTIBLE | 335 - I915_WAIT_LOCKED | 336 - I915_WAIT_ALL, 337 - MAX_SCHEDULE_TIMEOUT, 338 - NULL); 333 + ret = i915_gem_object_set_to_cpu_domain(obj, false); 339 334 if (ret) 340 335 return ret; 341 - 342 - i915_gem_retire_requests(to_i915(obj->base.dev)); 343 336 344 337 while ((vma = list_first_entry_or_null(&obj->vma_list, 345 338 struct i915_vma,
+2 -1
drivers/gpu/drm/i915/i915_sw_fence.c
··· 367 367 struct dma_fence *dma; 368 368 struct timer_list timer; 369 369 struct irq_work work; 370 + struct rcu_head rcu; 370 371 }; 371 372 372 373 static void timer_i915_sw_fence_wake(struct timer_list *t) ··· 407 406 del_timer_sync(&cb->timer); 408 407 dma_fence_put(cb->dma); 409 408 410 - kfree(cb); 409 + kfree_rcu(cb, rcu); 411 410 } 412 411 413 412 int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
+11 -11
drivers/gpu/drm/i915/intel_breadcrumbs.c
··· 186 186 struct intel_wait *wait, *n, *first; 187 187 188 188 if (!b->irq_armed) 189 - return; 189 + goto wakeup_signaler; 190 190 191 191 /* We only disarm the irq when we are idle (all requests completed), 192 192 * so if the bottom-half remains asleep, it missed the request ··· 208 208 b->waiters = RB_ROOT; 209 209 210 210 spin_unlock_irq(&b->rb_lock); 211 + 212 + /* 213 + * The signaling thread may be asleep holding a reference to a request, 214 + * that had its signaling cancelled prior to being preempted. We need 215 + * to kick the signaler, just in case, to release any such reference. 216 + */ 217 + wakeup_signaler: 218 + wake_up_process(b->signaler); 211 219 } 212 220 213 221 static bool use_fake_irq(const struct intel_breadcrumbs *b) ··· 659 651 } 660 652 661 653 if (unlikely(do_schedule)) { 662 - DEFINE_WAIT(exec); 663 - 664 654 if (kthread_should_park()) 665 655 kthread_parkme(); 666 656 667 - if (kthread_should_stop()) { 668 - GEM_BUG_ON(request); 657 + if (unlikely(kthread_should_stop())) { 658 + i915_gem_request_put(request); 669 659 break; 670 660 } 671 661 672 - if (request) 673 - add_wait_queue(&request->execute, &exec); 674 - 675 662 schedule(); 676 - 677 - if (request) 678 - remove_wait_queue(&request->execute, &exec); 679 663 } 680 664 i915_gem_request_put(request); 681 665 } while (1);
+4
drivers/gpu/drm/i915/intel_ddi.c
··· 2128 2128 if (WARN_ON(!pll)) 2129 2129 return; 2130 2130 2131 + mutex_lock(&dev_priv->dpll_lock); 2132 + 2131 2133 if (IS_CANNONLAKE(dev_priv)) { 2132 2134 /* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */ 2133 2135 val = I915_READ(DPCLKA_CFGCR0); ··· 2159 2157 } else if (INTEL_INFO(dev_priv)->gen < 9) { 2160 2158 I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll)); 2161 2159 } 2160 + 2161 + mutex_unlock(&dev_priv->dpll_lock); 2162 2162 } 2163 2163 2164 2164 static void intel_ddi_clk_disable(struct intel_encoder *encoder)
+1 -2
drivers/gpu/drm/i915/intel_display.c
··· 9944 9944 } 9945 9945 9946 9946 ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0); 9947 + drm_framebuffer_put(fb); 9947 9948 if (ret) 9948 9949 goto fail; 9949 - 9950 - drm_framebuffer_put(fb); 9951 9950 9952 9951 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode); 9953 9952 if (ret)
+1 -1
drivers/gpu/drm/i915/intel_lpe_audio.c
··· 193 193 }; 194 194 195 195 if (!pci_dev_present(atom_hdaudio_ids)) { 196 - DRM_INFO("%s\n", "HDaudio controller not detected, using LPE audio instead\n"); 196 + DRM_INFO("HDaudio controller not detected, using LPE audio instead\n"); 197 197 lpe_present = true; 198 198 } 199 199 }
+3 -2
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 224 224 /* Determine if we can get a cache-coherent map, forcing 225 225 * uncached mapping if we can't. 226 226 */ 227 - if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED) 227 + if (!nouveau_drm_use_coherent_gpu_mapping(drm)) 228 228 nvbo->force_coherent = true; 229 229 } 230 230 ··· 262 262 if (cli->device.info.family > NV_DEVICE_INFO_V0_CURIE && 263 263 (flags & TTM_PL_FLAG_VRAM) && !vmm->page[i].vram) 264 264 continue; 265 - if ((flags & TTM_PL_FLAG_TT ) && !vmm->page[i].host) 265 + if ((flags & TTM_PL_FLAG_TT) && 266 + (!vmm->page[i].host || vmm->page[i].shift > PAGE_SHIFT)) 266 267 continue; 267 268 268 269 /* Select this page size if it's the first that supports
+9 -2
drivers/gpu/drm/nouveau/nouveau_drv.h
··· 157 157 struct nvif_object copy; 158 158 int mtrr; 159 159 int type_vram; 160 - int type_host; 161 - int type_ncoh; 160 + int type_host[2]; 161 + int type_ncoh[2]; 162 162 } ttm; 163 163 164 164 /* GEM interface support */ ··· 215 215 nouveau_drm(struct drm_device *dev) 216 216 { 217 217 return dev->dev_private; 218 + } 219 + 220 + static inline bool 221 + nouveau_drm_use_coherent_gpu_mapping(struct nouveau_drm *drm) 222 + { 223 + struct nvif_mmu *mmu = &drm->client.mmu; 224 + return !(mmu->type[drm->ttm.type_host[0]].type & NVIF_MEM_UNCACHED); 218 225 } 219 226 220 227 int nouveau_pmops_suspend(struct device *);
+1 -1
drivers/gpu/drm/nouveau/nouveau_fbcon.c
··· 429 429 drm_fb_helper_unregister_fbi(&fbcon->helper); 430 430 drm_fb_helper_fini(&fbcon->helper); 431 431 432 - if (nouveau_fb->nvbo) { 432 + if (nouveau_fb && nouveau_fb->nvbo) { 433 433 nouveau_vma_del(&nouveau_fb->vma); 434 434 nouveau_bo_unmap(nouveau_fb->nvbo); 435 435 nouveau_bo_unpin(nouveau_fb->nvbo);
+3 -3
drivers/gpu/drm/nouveau/nouveau_mem.c
··· 103 103 u8 type; 104 104 int ret; 105 105 106 - if (mmu->type[drm->ttm.type_host].type & NVIF_MEM_UNCACHED) 107 - type = drm->ttm.type_ncoh; 106 + if (!nouveau_drm_use_coherent_gpu_mapping(drm)) 107 + type = drm->ttm.type_ncoh[!!mem->kind]; 108 108 else 109 - type = drm->ttm.type_host; 109 + type = drm->ttm.type_host[0]; 110 110 111 111 if (mem->kind && !(mmu->type[type].type & NVIF_MEM_KIND)) 112 112 mem->comp = mem->kind = 0;
+30 -11
drivers/gpu/drm/nouveau/nouveau_ttm.c
··· 235 235 drm->ttm.mem_global_ref.release = NULL; 236 236 } 237 237 238 + static int 239 + nouveau_ttm_init_host(struct nouveau_drm *drm, u8 kind) 240 + { 241 + struct nvif_mmu *mmu = &drm->client.mmu; 242 + int typei; 243 + 244 + typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE | 245 + kind | NVIF_MEM_COHERENT); 246 + if (typei < 0) 247 + return -ENOSYS; 248 + 249 + drm->ttm.type_host[!!kind] = typei; 250 + 251 + typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE | kind); 252 + if (typei < 0) 253 + return -ENOSYS; 254 + 255 + drm->ttm.type_ncoh[!!kind] = typei; 256 + return 0; 257 + } 258 + 238 259 int 239 260 nouveau_ttm_init(struct nouveau_drm *drm) 240 261 { ··· 265 244 struct drm_device *dev = drm->dev; 266 245 int typei, ret; 267 246 268 - typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE | 269 - NVIF_MEM_COHERENT); 270 - if (typei < 0) 271 - return -ENOSYS; 247 + ret = nouveau_ttm_init_host(drm, 0); 248 + if (ret) 249 + return ret; 272 250 273 - drm->ttm.type_host = typei; 274 - 275 - typei = nvif_mmu_type(mmu, NVIF_MEM_HOST | NVIF_MEM_MAPPABLE); 276 - if (typei < 0) 277 - return -ENOSYS; 278 - 279 - drm->ttm.type_ncoh = typei; 251 + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && 252 + drm->client.device.info.chipset != 0x50) { 253 + ret = nouveau_ttm_init_host(drm, NVIF_MEM_KIND); 254 + if (ret) 255 + return ret; 256 + } 280 257 281 258 if (drm->client.device.info.platform != NV_DEVICE_INFO_V0_SOC && 282 259 drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
+1 -1
drivers/gpu/drm/nouveau/nouveau_vmm.c
··· 67 67 nvif_vmm_put(&vma->vmm->vmm, &tmp); 68 68 } 69 69 list_del(&vma->head); 70 - *pvma = NULL; 71 70 kfree(*pvma); 71 + *pvma = NULL; 72 72 } 73 73 } 74 74
+1 -1
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
··· 2369 2369 .imem = gk20a_instmem_new, 2370 2370 .ltc = gp100_ltc_new, 2371 2371 .mc = gp10b_mc_new, 2372 - .mmu = gf100_mmu_new, 2372 + .mmu = gp10b_mmu_new, 2373 2373 .secboot = gp10b_secboot_new, 2374 2374 .pmu = gm20b_pmu_new, 2375 2375 .timer = gk20a_timer_new,
+8 -1
drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c
··· 36 36 if (data) { 37 37 *ver = nvbios_rd08(bios, data + 0x00); 38 38 switch (*ver) { 39 + case 0x20: 39 40 case 0x21: 40 41 case 0x30: 41 42 case 0x40: ··· 64 63 if (data && idx < *cnt) { 65 64 u16 outp = nvbios_rd16(bios, data + *hdr + idx * *len); 66 65 switch (*ver * !!outp) { 66 + case 0x20: 67 67 case 0x21: 68 68 case 0x30: 69 69 *hdr = nvbios_rd08(bios, data + 0x04); ··· 98 96 info->type = nvbios_rd16(bios, data + 0x00); 99 97 info->mask = nvbios_rd16(bios, data + 0x02); 100 98 switch (*ver) { 99 + case 0x20: 100 + info->mask |= 0x00c0; /* match any link */ 101 + /* fall-through */ 101 102 case 0x21: 102 103 case 0x30: 103 104 info->flags = nvbios_rd08(bios, data + 0x05); 104 105 info->script[0] = nvbios_rd16(bios, data + 0x06); 105 106 info->script[1] = nvbios_rd16(bios, data + 0x08); 106 - info->lnkcmp = nvbios_rd16(bios, data + 0x0a); 107 + if (*len >= 0x0c) 108 + info->lnkcmp = nvbios_rd16(bios, data + 0x0a); 107 109 if (*len >= 0x0f) { 108 110 info->script[2] = nvbios_rd16(bios, data + 0x0c); 109 111 info->script[3] = nvbios_rd16(bios, data + 0x0e); ··· 176 170 memset(info, 0x00, sizeof(*info)); 177 171 if (data) { 178 172 switch (*ver) { 173 + case 0x20: 179 174 case 0x21: 180 175 info->dc = nvbios_rd08(bios, data + 0x02); 181 176 info->pe = nvbios_rd08(bios, data + 0x03);
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv50.c
··· 249 249 iobj->base.memory.ptrs = &nv50_instobj_fast; 250 250 else 251 251 iobj->base.memory.ptrs = &nv50_instobj_slow; 252 - refcount_inc(&iobj->maps); 252 + refcount_set(&iobj->maps, 1); 253 253 } 254 254 255 255 mutex_unlock(&imem->subdev.mutex);
+7
drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
··· 136 136 return ret; 137 137 138 138 pci->irq = pdev->irq; 139 + 140 + /* Ensure MSI interrupts are armed, for the case where there are 141 + * already interrupts pending (for whatever reason) at load time. 142 + */ 143 + if (pci->msi) 144 + pci->func->msi_rearm(pci); 145 + 139 146 return ret; 140 147 } 141 148
+20
drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
··· 175 175 writel(val, hdmi->base + SUN4I_HDMI_VID_TIMING_POL_REG); 176 176 } 177 177 178 + static enum drm_mode_status sun4i_hdmi_mode_valid(struct drm_encoder *encoder, 179 + const struct drm_display_mode *mode) 180 + { 181 + struct sun4i_hdmi *hdmi = drm_encoder_to_sun4i_hdmi(encoder); 182 + unsigned long rate = mode->clock * 1000; 183 + unsigned long diff = rate / 200; /* +-0.5% allowed by HDMI spec */ 184 + long rounded_rate; 185 + 186 + /* 165 MHz is the typical max pixelclock frequency for HDMI <= 1.2 */ 187 + if (rate > 165000000) 188 + return MODE_CLOCK_HIGH; 189 + rounded_rate = clk_round_rate(hdmi->tmds_clk, rate); 190 + if (rounded_rate > 0 && 191 + max_t(unsigned long, rounded_rate, rate) - 192 + min_t(unsigned long, rounded_rate, rate) < diff) 193 + return MODE_OK; 194 + return MODE_NOCLOCK; 195 + } 196 + 178 197 static const struct drm_encoder_helper_funcs sun4i_hdmi_helper_funcs = { 179 198 .atomic_check = sun4i_hdmi_atomic_check, 180 199 .disable = sun4i_hdmi_disable, 181 200 .enable = sun4i_hdmi_enable, 182 201 .mode_set = sun4i_hdmi_mode_set, 202 + .mode_valid = sun4i_hdmi_mode_valid, 183 203 }; 184 204 185 205 static const struct drm_encoder_funcs sun4i_hdmi_funcs = {
+2 -2
drivers/gpu/drm/sun4i/sun4i_tcon.c
··· 724 724 if (IS_ERR(tcon->crtc)) { 725 725 dev_err(dev, "Couldn't create our CRTC\n"); 726 726 ret = PTR_ERR(tcon->crtc); 727 - goto err_free_clocks; 727 + goto err_free_dotclock; 728 728 } 729 729 730 730 ret = sun4i_rgb_init(drm, tcon); 731 731 if (ret < 0) 732 - goto err_free_clocks; 732 + goto err_free_dotclock; 733 733 734 734 if (tcon->quirks->needs_de_be_mux) { 735 735 /*
+2 -1
drivers/gpu/drm/ttm/ttm_page_alloc.c
··· 455 455 freed += (nr_free_pool - shrink_pages) << pool->order; 456 456 if (freed >= sc->nr_to_scan) 457 457 break; 458 + shrink_pages <<= pool->order; 458 459 } 459 460 mutex_unlock(&lock); 460 461 return freed; ··· 544 543 int r = 0; 545 544 unsigned i, j, cpages; 546 545 unsigned npages = 1 << order; 547 - unsigned max_cpages = min(count, (unsigned)NUM_PAGES_TO_ALLOC); 546 + unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC); 548 547 549 548 /* allocate array for page caching change */ 550 549 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);