Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-fixes-2019-10-04' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
"Been offline for 3 days, got back and had some fixes queued up.

Nothing too major, the i915 dp-mst fix is important, and amdgpu has a
bulk move speedup fix and some regressions, but nothing too insane for
an rc2 pull. The intel fixes are also 2 weeks worth, they missed the
boat last week.

core:
- writeback fixes

i915:
- Fix DP-MST crtc_mask
- Fix dsc dpp calculations
- Fix g4x sprite scaling stride check with GTT remapping
- Fix concurrence on cases where requests where getting retired at
same time as resubmitted to HW
- Fix gen9 display resolutions by setting the right max plane width
- Fix GPU hang on preemption
- Mark contents as dirty on a write fault. This was breaking cursor
sprite with dumb buffers.

komeda:
- memory leak fix

tilcdc:
- include fix

amdgpu:
- Enable bulk moves
- Power metrics fixes for Navi
- Fix S4 regression
- Add query for tcc disabled mask
- Fix several leaks in error paths
- randconfig fixes
- clang fixes"

* tag 'drm-fixes-2019-10-04' of git://anongit.freedesktop.org/drm/drm: (21 commits)
Revert "drm/i915: Fix DP-MST crtc_mask"
drm/omap: fix max fclk divider for omap36xx
drm/i915: Fix g4x sprite scaling stride check with GTT remapping
drm/i915/dp: Fix dsc bpp calculations, v5.
drm/amd/display: fix dcn21 Makefile for clang
drm/amd/display: hide an unused variable
drm/amdgpu: display_mode_vba_21: remove uint typedef
drm/amdgpu: hide another #warning
drm/amdgpu: make pmu support optional, again
drm/amd/display: memory leak
drm/amdgpu: fix multiple memory leaks in acp_hw_init
drm/amdgpu: return tcc_disabled_mask to userspace
drm/amdgpu: don't increment vram lost if we are in hibernation
Revert "drm/amdgpu: disable stutter mode for renoir"
drm/amd/powerplay: add sensor lock support for smu
drm/amd/powerplay: change metrics update period from 1ms to 100ms
drm/amdgpu: revert "disable bulk moves for now"
drm/tilcdc: include linux/pinctrl/consumer.h again
drm/komeda: prevent memory leak in komeda_wb_connector_add
drm: Clear the fence pointer when writeback job signaled
...

+232 -159
+1 -1
drivers/gpu/drm/amd/amdgpu/Makefile
··· 54 54 amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ 55 55 amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \ 56 56 amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \ 57 - amdgpu_vm_sdma.o amdgpu_pmu.o amdgpu_discovery.o amdgpu_ras_eeprom.o smu_v11_0_i2c.o 57 + amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o smu_v11_0_i2c.o 58 58 59 59 amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o 60 60
+22 -12
drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
··· 189 189 u32 val = 0; 190 190 u32 count = 0; 191 191 struct device *dev; 192 - struct i2s_platform_data *i2s_pdata; 192 + struct i2s_platform_data *i2s_pdata = NULL; 193 193 194 194 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 195 195 ··· 231 231 adev->acp.acp_cell = kcalloc(ACP_DEVS, sizeof(struct mfd_cell), 232 232 GFP_KERNEL); 233 233 234 - if (adev->acp.acp_cell == NULL) 235 - return -ENOMEM; 234 + if (adev->acp.acp_cell == NULL) { 235 + r = -ENOMEM; 236 + goto failure; 237 + } 236 238 237 239 adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL); 238 240 if (adev->acp.acp_res == NULL) { 239 - kfree(adev->acp.acp_cell); 240 - return -ENOMEM; 241 + r = -ENOMEM; 242 + goto failure; 241 243 } 242 244 243 245 i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL); 244 246 if (i2s_pdata == NULL) { 245 - kfree(adev->acp.acp_res); 246 - kfree(adev->acp.acp_cell); 247 - return -ENOMEM; 247 + r = -ENOMEM; 248 + goto failure; 248 249 } 249 250 250 251 switch (adev->asic_type) { ··· 342 341 r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell, 343 342 ACP_DEVS); 344 343 if (r) 345 - return r; 344 + goto failure; 346 345 347 346 for (i = 0; i < ACP_DEVS ; i++) { 348 347 dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i); 349 348 r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev); 350 349 if (r) { 351 350 dev_err(dev, "Failed to add dev to genpd\n"); 352 - return r; 351 + goto failure; 353 352 } 354 353 } 355 354 ··· 368 367 break; 369 368 if (--count == 0) { 370 369 dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); 371 - return -ETIMEDOUT; 370 + r = -ETIMEDOUT; 371 + goto failure; 372 372 } 373 373 udelay(100); 374 374 } ··· 386 384 break; 387 385 if (--count == 0) { 388 386 dev_err(&adev->pdev->dev, "Failed to reset ACP\n"); 389 - return -ETIMEDOUT; 387 + r = -ETIMEDOUT; 388 + goto failure; 390 389 } 391 390 udelay(100); 392 391 } ··· 396 393 val &= ~ACP_SOFT_RESET__SoftResetAud_MASK; 397 394 cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val); 398 395 return 0; 396 + 397 + failure: 398 + kfree(i2s_pdata); 399 + kfree(adev->acp.acp_res); 400 + kfree(adev->acp.acp_cell); 401 + kfree(adev->acp.acp_genpd); 402 + return r; 399 403 } 400 404 401 405 /**
+2 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 81 81 * - 3.32.0 - Add syncobj timeline support to AMDGPU_CS. 82 82 * - 3.33.0 - Fixes for GDS ENOMEM failures in AMDGPU_CS. 83 83 * - 3.34.0 - Non-DC can flip correctly between buffers with different pitches 84 + * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask 84 85 */ 85 86 #define KMS_DRIVER_MAJOR 3 86 - #define KMS_DRIVER_MINOR 34 87 + #define KMS_DRIVER_MINOR 35 87 88 #define KMS_DRIVER_PATCHLEVEL 0 88 89 89 90 #define AMDGPU_MAX_TIMEOUT_PARAM_LENTH 256
+1
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
··· 165 165 uint32_t num_sc_per_sh; 166 166 uint32_t num_packer_per_sc; 167 167 uint32_t pa_sc_tile_steering_override; 168 + uint64_t tcc_disabled_mask; 168 169 }; 169 170 170 171 struct amdgpu_cu_info {
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
··· 787 787 dev_info.pa_sc_tile_steering_override = 788 788 adev->gfx.config.pa_sc_tile_steering_override; 789 789 790 + dev_info.tcc_disabled_mask = adev->gfx.config.tcc_disabled_mask; 791 + 790 792 return copy_to_user(out, &dev_info, 791 793 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; 792 794 }
-2
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
··· 603 603 struct ttm_bo_global *glob = adev->mman.bdev.glob; 604 604 struct amdgpu_vm_bo_base *bo_base; 605 605 606 - #if 0 607 606 if (vm->bulk_moveable) { 608 607 spin_lock(&glob->lru_lock); 609 608 ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); 610 609 spin_unlock(&glob->lru_lock); 611 610 return; 612 611 } 613 - #endif 614 612 615 613 memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); 616 614
+12
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
··· 1691 1691 } 1692 1692 } 1693 1693 1694 + static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev) 1695 + { 1696 + /* TCCs are global (not instanced). */ 1697 + uint32_t tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) | 1698 + RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE); 1699 + 1700 + adev->gfx.config.tcc_disabled_mask = 1701 + REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) | 1702 + (REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, HI_TCC_DISABLE) << 16); 1703 + } 1704 + 1694 1705 static void gfx_v10_0_constants_init(struct amdgpu_device *adev) 1695 1706 { 1696 1707 u32 tmp; ··· 1713 1702 1714 1703 gfx_v10_0_setup_rb(adev); 1715 1704 gfx_v10_0_get_cu_info(adev, &adev->gfx.cu_info); 1705 + gfx_v10_0_get_tcc_info(adev); 1716 1706 adev->gfx.config.pa_sc_tile_steering_override = 1717 1707 gfx_v10_0_init_pa_sc_tile_steering_override(adev); 1718 1708
+4 -2
drivers/gpu/drm/amd/amdgpu/nv.c
··· 317 317 struct smu_context *smu = &adev->smu; 318 318 319 319 if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { 320 - amdgpu_inc_vram_lost(adev); 320 + if (!adev->in_suspend) 321 + amdgpu_inc_vram_lost(adev); 321 322 ret = smu_baco_reset(smu); 322 323 } else { 323 - amdgpu_inc_vram_lost(adev); 324 + if (!adev->in_suspend) 325 + amdgpu_inc_vram_lost(adev); 324 326 ret = nv_asic_mode1_reset(adev); 325 327 } 326 328
+4 -4
drivers/gpu/drm/amd/amdgpu/soc15.c
··· 558 558 { 559 559 switch (soc15_asic_reset_method(adev)) { 560 560 case AMD_RESET_METHOD_BACO: 561 - amdgpu_inc_vram_lost(adev); 561 + if (!adev->in_suspend) 562 + amdgpu_inc_vram_lost(adev); 562 563 return soc15_asic_baco_reset(adev); 563 564 case AMD_RESET_METHOD_MODE2: 564 565 return soc15_mode2_reset(adev); 565 566 default: 566 - amdgpu_inc_vram_lost(adev); 567 + if (!adev->in_suspend) 568 + amdgpu_inc_vram_lost(adev); 567 569 return soc15_asic_mode1_reset(adev); 568 570 } 569 571 } ··· 773 771 #if defined(CONFIG_DRM_AMD_DC) 774 772 else if (amdgpu_device_has_dc_support(adev)) 775 773 amdgpu_device_ip_block_add(adev, &dm_ip_block); 776 - #else 777 - # warning "Enable CONFIG_DRM_AMD_DC for display support on SOC15." 778 774 #endif 779 775 amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); 780 776 break;
+2 -2
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 2385 2385 2386 2386 if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) 2387 2387 dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; 2388 - if (adev->asic_type == CHIP_RENOIR) 2389 - dm->dc->debug.disable_stutter = true; 2390 2388 2391 2389 return 0; 2392 2390 fail: ··· 6017 6019 struct drm_crtc *crtc; 6018 6020 struct drm_crtc_state *old_crtc_state, *new_crtc_state; 6019 6021 int i; 6022 + #ifdef CONFIG_DEBUG_FS 6020 6023 enum amdgpu_dm_pipe_crc_source source; 6024 + #endif 6021 6025 6022 6026 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, 6023 6027 new_crtc_state, i) {
+1
drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
··· 668 668 return &clk_src->base; 669 669 } 670 670 671 + kfree(clk_src); 671 672 BREAK_TO_DEBUGGER(); 672 673 return NULL; 673 674 }
+1
drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
··· 714 714 return &clk_src->base; 715 715 } 716 716 717 + kfree(clk_src); 717 718 BREAK_TO_DEBUGGER(); 718 719 return NULL; 719 720 }
+1
drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
··· 687 687 return &clk_src->base; 688 688 } 689 689 690 + kfree(clk_src); 690 691 BREAK_TO_DEBUGGER(); 691 692 return NULL; 692 693 }
+1
drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
··· 500 500 return &clk_src->base; 501 501 } 502 502 503 + kfree(clk_src); 503 504 BREAK_TO_DEBUGGER(); 504 505 return NULL; 505 506 }
+1
drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
··· 701 701 return &clk_src->base; 702 702 } 703 703 704 + kfree(clk_src); 704 705 BREAK_TO_DEBUGGER(); 705 706 return NULL; 706 707 }
+1
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
··· 786 786 return &clk_src->base; 787 787 } 788 788 789 + kfree(clk_src); 789 790 BREAK_TO_DEBUGGER(); 790 791 return NULL; 791 792 }
+1
drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
··· 1077 1077 return &clk_src->base; 1078 1078 } 1079 1079 1080 + kfree(clk_src); 1080 1081 BREAK_TO_DEBUGGER(); 1081 1082 return NULL; 1082 1083 }
+11 -1
drivers/gpu/drm/amd/display/dc/dcn21/Makefile
··· 3 3 4 4 DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o 5 5 6 - CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse -mpreferred-stack-boundary=4 6 + ifneq ($(call cc-option, -mpreferred-stack-boundary=4),) 7 + cc_stack_align := -mpreferred-stack-boundary=4 8 + else ifneq ($(call cc-option, -mstack-alignment=16),) 9 + cc_stack_align := -mstack-alignment=16 10 + endif 11 + 12 + CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse $(cc_stack_align) 13 + 14 + ifdef CONFIG_CC_IS_CLANG 15 + CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o += -msse2 16 + endif 7 17 8 18 AMD_DAL_DCN21 = $(addprefix $(AMDDALPATH)/dc/dcn21/,$(DCN21)) 9 19
+5 -8
drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
··· 39 39 * ways. Unless there is something clearly wrong with it the code should 40 40 * remain as-is as it provides us with a guarantee from HW that it is correct. 41 41 */ 42 - 43 - typedef unsigned int uint; 44 - 45 42 typedef struct { 46 43 double DPPCLK; 47 44 double DISPCLK; ··· 4771 4774 mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = 0.0; 4772 4775 mode_lib->vba.MaximumReadBandwidthWithPrefetch = 0.0; 4773 4776 for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) { 4774 - uint m; 4777 + unsigned int m; 4775 4778 4776 4779 locals->cursor_bw[k] = 0; 4777 4780 locals->cursor_bw_pre[k] = 0; ··· 5282 5285 double SecondMinActiveDRAMClockChangeMarginOneDisplayInVBLank; 5283 5286 double FullDETBufferingTimeYStutterCriticalPlane = 0; 5284 5287 double TimeToFinishSwathTransferStutterCriticalPlane = 0; 5285 - uint k, j; 5288 + unsigned int k, j; 5286 5289 5287 5290 mode_lib->vba.TotalActiveDPP = 0; 5288 5291 mode_lib->vba.TotalDCCActiveDPP = 0; ··· 5504 5507 double DPPCLK[], 5505 5508 double *DCFCLKDeepSleep) 5506 5509 { 5507 - uint k; 5510 + unsigned int k; 5508 5511 double DisplayPipeLineDeliveryTimeLuma; 5509 5512 double DisplayPipeLineDeliveryTimeChroma; 5510 5513 //double DCFCLKDeepSleepPerPlane[DC__NUM_DPP__MAX]; ··· 5724 5727 double DisplayPipeRequestDeliveryTimeChromaPrefetch[]) 5725 5728 { 5726 5729 double req_per_swath_ub; 5727 - uint k; 5730 + unsigned int k; 5728 5731 5729 5732 for (k = 0; k < NumberOfActivePlanes; ++k) { 5730 5733 if (VRatio[k] <= 1) { ··· 5866 5869 unsigned int dpte_groups_per_row_chroma_ub; 5867 5870 unsigned int num_group_per_lower_vm_stage; 5868 5871 unsigned int num_req_per_lower_vm_stage; 5869 - uint k; 5872 + unsigned int k; 5870 5873 5871 5874 for (k = 0; k < NumberOfActivePlanes; ++k) { 5872 5875 if (GPUVMEnable == true) {
+2
drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
··· 843 843 smu->smu_baco.state = SMU_BACO_STATE_EXIT; 844 844 smu->smu_baco.platform_support = false; 845 845 846 + mutex_init(&smu->sensor_lock); 847 + 846 848 smu->watermarks_bitmap = 0; 847 849 smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT; 848 850 smu->default_power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
+2
drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
··· 1018 1018 if (!data || !size) 1019 1019 return -EINVAL; 1020 1020 1021 + mutex_lock(&smu->sensor_lock); 1021 1022 switch (sensor) { 1022 1023 case AMDGPU_PP_SENSOR_MAX_FAN_RPM: 1023 1024 *(uint32_t *)data = pptable->FanMaximumRpm; ··· 1045 1044 default: 1046 1045 ret = smu_smc_read_sensor(smu, sensor, data, size); 1047 1046 } 1047 + mutex_unlock(&smu->sensor_lock); 1048 1048 1049 1049 return ret; 1050 1050 }
+1
drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
··· 344 344 const struct smu_funcs *funcs; 345 345 const struct pptable_funcs *ppt_funcs; 346 346 struct mutex mutex; 347 + struct mutex sensor_lock; 347 348 uint64_t pool_size; 348 349 349 350 struct smu_table_context smu_table;
+3 -1
drivers/gpu/drm/amd/powerplay/navi10_ppt.c
··· 547 547 struct smu_table_context *smu_table= &smu->smu_table; 548 548 int ret = 0; 549 549 550 - if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + HZ / 1000)) { 550 + if (!smu_table->metrics_time || time_after(jiffies, smu_table->metrics_time + msecs_to_jiffies(100))) { 551 551 ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, 552 552 (void *)smu_table->metrics_table, false); 553 553 if (ret) { ··· 1386 1386 if(!data || !size) 1387 1387 return -EINVAL; 1388 1388 1389 + mutex_lock(&smu->sensor_lock); 1389 1390 switch (sensor) { 1390 1391 case AMDGPU_PP_SENSOR_MAX_FAN_RPM: 1391 1392 *(uint32_t *)data = pptable->FanMaximumRpm; ··· 1410 1409 default: 1411 1410 ret = smu_smc_read_sensor(smu, sensor, data, size); 1412 1411 } 1412 + mutex_unlock(&smu->sensor_lock); 1413 1413 1414 1414 return ret; 1415 1415 }
+2
drivers/gpu/drm/amd/powerplay/vega20_ppt.c
··· 3023 3023 if(!data || !size) 3024 3024 return -EINVAL; 3025 3025 3026 + mutex_lock(&smu->sensor_lock); 3026 3027 switch (sensor) { 3027 3028 case AMDGPU_PP_SENSOR_MAX_FAN_RPM: 3028 3029 *(uint32_t *)data = pptable->FanMaximumRpm; ··· 3049 3048 default: 3050 3049 ret = smu_smc_read_sensor(smu, sensor, data, size); 3051 3050 } 3051 + mutex_unlock(&smu->sensor_lock); 3052 3052 3053 3053 return ret; 3054 3054 }
+4 -3
drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c
··· 43 43 struct komeda_data_flow_cfg dflow; 44 44 int err; 45 45 46 - if (!writeback_job || !writeback_job->fb) { 46 + if (!writeback_job) 47 47 return 0; 48 - } 49 48 50 49 if (!crtc_st->active) { 51 50 DRM_DEBUG_ATOMIC("Cannot write the composition result out on a inactive CRTC.\n"); ··· 165 166 &komeda_wb_encoder_helper_funcs, 166 167 formats, n_formats); 167 168 komeda_put_fourcc_list(formats); 168 - if (err) 169 + if (err) { 170 + kfree(kwb_conn); 169 171 return err; 172 + } 170 173 171 174 drm_connector_helper_add(&wb_conn->base, &komeda_wb_conn_helper_funcs); 172 175
+2 -2
drivers/gpu/drm/arm/malidp_mw.c
··· 131 131 struct drm_framebuffer *fb; 132 132 int i, n_planes; 133 133 134 - if (!conn_state->writeback_job || !conn_state->writeback_job->fb) 134 + if (!conn_state->writeback_job) 135 135 return 0; 136 136 137 137 fb = conn_state->writeback_job->fb; ··· 248 248 249 249 mw_state = to_mw_state(conn_state); 250 250 251 - if (conn_state->writeback_job && conn_state->writeback_job->fb) { 251 + if (conn_state->writeback_job) { 252 252 struct drm_framebuffer *fb = conn_state->writeback_job->fb; 253 253 254 254 DRM_DEV_DEBUG_DRIVER(drm->dev,
+9 -4
drivers/gpu/drm/drm_atomic.c
··· 430 430 return -EINVAL; 431 431 } 432 432 433 - if (writeback_job->out_fence && !writeback_job->fb) { 434 - DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n", 435 - connector->base.id, connector->name); 436 - return -EINVAL; 433 + if (!writeback_job->fb) { 434 + if (writeback_job->out_fence) { 435 + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] requesting out-fence without framebuffer\n", 436 + connector->base.id, connector->name); 437 + return -EINVAL; 438 + } 439 + 440 + drm_writeback_cleanup_job(writeback_job); 441 + state->writeback_job = NULL; 437 442 } 438 443 439 444 return 0;
+15 -8
drivers/gpu/drm/drm_writeback.c
··· 324 324 if (job->fb) 325 325 drm_framebuffer_put(job->fb); 326 326 327 + if (job->out_fence) 328 + dma_fence_put(job->out_fence); 329 + 327 330 kfree(job); 328 331 } 329 332 EXPORT_SYMBOL(drm_writeback_cleanup_job); ··· 369 366 { 370 367 unsigned long flags; 371 368 struct drm_writeback_job *job; 369 + struct dma_fence *out_fence; 372 370 373 371 spin_lock_irqsave(&wb_connector->job_lock, flags); 374 372 job = list_first_entry_or_null(&wb_connector->job_queue, 375 373 struct drm_writeback_job, 376 374 list_entry); 377 - if (job) { 375 + if (job) 378 376 list_del(&job->list_entry); 379 - if (job->out_fence) { 380 - if (status) 381 - dma_fence_set_error(job->out_fence, status); 382 - dma_fence_signal(job->out_fence); 383 - dma_fence_put(job->out_fence); 384 - } 385 - } 377 + 386 378 spin_unlock_irqrestore(&wb_connector->job_lock, flags); 387 379 388 380 if (WARN_ON(!job)) 389 381 return; 382 + 383 + out_fence = job->out_fence; 384 + if (out_fence) { 385 + if (status) 386 + dma_fence_set_error(out_fence, status); 387 + dma_fence_signal(out_fence); 388 + dma_fence_put(out_fence); 389 + job->out_fence = NULL; 390 + } 390 391 391 392 INIT_WORK(&job->cleanup_work, cleanup_work); 392 393 queue_work(system_long_wq, &job->cleanup_work);
+8 -4
drivers/gpu/drm/i915/display/intel_display.c
··· 7261 7261 pipe_config->fdi_lanes = lane; 7262 7262 7263 7263 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock, 7264 - link_bw, &pipe_config->fdi_m_n, false); 7264 + link_bw, &pipe_config->fdi_m_n, false, false); 7265 7265 7266 7266 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config); 7267 7267 if (ret == -EDEADLK) ··· 7508 7508 intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, 7509 7509 int pixel_clock, int link_clock, 7510 7510 struct intel_link_m_n *m_n, 7511 - bool constant_n) 7511 + bool constant_n, bool fec_enable) 7512 7512 { 7513 - m_n->tu = 64; 7513 + u32 data_clock = bits_per_pixel * pixel_clock; 7514 7514 7515 - compute_m_n(bits_per_pixel * pixel_clock, 7515 + if (fec_enable) 7516 + data_clock = intel_dp_mode_to_fec_clock(data_clock); 7517 + 7518 + m_n->tu = 64; 7519 + compute_m_n(data_clock, 7516 7520 link_clock * nlanes * 8, 7517 7521 &m_n->gmch_m, &m_n->gmch_n, 7518 7522 constant_n);
+1 -1
drivers/gpu/drm/i915/display/intel_display.h
··· 414 414 void intel_link_compute_m_n(u16 bpp, int nlanes, 415 415 int pixel_clock, int link_clock, 416 416 struct intel_link_m_n *m_n, 417 - bool constant_n); 417 + bool constant_n, bool fec_enable); 418 418 bool is_ccs_modifier(u64 modifier); 419 419 void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv); 420 420 u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
+95 -89
drivers/gpu/drm/i915/display/intel_dp.c
··· 78 78 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 79 79 #define DP_DSC_MAX_ENC_THROUGHPUT_1 400000 80 80 81 - /* DP DSC FEC Overhead factor = (100 - 2.4)/100 */ 82 - #define DP_DSC_FEC_OVERHEAD_FACTOR 976 81 + /* DP DSC FEC Overhead factor = 1/(0.972261) */ 82 + #define DP_DSC_FEC_OVERHEAD_FACTOR 972261 83 83 84 84 /* Compliance test status bits */ 85 85 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0 ··· 491 491 return -1; 492 492 } 493 493 494 + return 0; 495 + } 496 + 497 + u32 intel_dp_mode_to_fec_clock(u32 mode_clock) 498 + { 499 + return div_u64(mul_u32_u32(mode_clock, 1000000U), 500 + DP_DSC_FEC_OVERHEAD_FACTOR); 501 + } 502 + 503 + static u16 intel_dp_dsc_get_output_bpp(u32 link_clock, u32 lane_count, 504 + u32 mode_clock, u32 mode_hdisplay) 505 + { 506 + u32 bits_per_pixel, max_bpp_small_joiner_ram; 507 + int i; 508 + 509 + /* 510 + * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* 511 + * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP) 512 + * for SST -> TimeSlotsPerMTP is 1, 513 + * for MST -> TimeSlotsPerMTP has to be calculated 514 + */ 515 + bits_per_pixel = (link_clock * lane_count * 8) / 516 + intel_dp_mode_to_fec_clock(mode_clock); 517 + DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel); 518 + 519 + /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 520 + max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER / mode_hdisplay; 521 + DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram); 522 + 523 + /* 524 + * Greatest allowed DSC BPP = MIN (output BPP from available Link BW 525 + * check, output bpp from small joiner RAM check) 526 + */ 527 + bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); 528 + 529 + /* Error out if the max bpp is less than smallest allowed valid bpp */ 530 + if (bits_per_pixel < valid_dsc_bpp[0]) { 531 + DRM_DEBUG_KMS("Unsupported BPP %u, min %u\n", 532 + bits_per_pixel, valid_dsc_bpp[0]); 533 + return 0; 534 + } 535 + 536 + /* Find the nearest match in the array of known BPPs from VESA */ 537 + for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { 538 + if (bits_per_pixel < valid_dsc_bpp[i + 1]) 539 + break; 540 + } 541 + bits_per_pixel = valid_dsc_bpp[i]; 542 + 543 + /* 544 + * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, 545 + * fractional part is 0 546 + */ 547 + return bits_per_pixel << 4; 548 + } 549 + 550 + static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, 551 + int mode_clock, int mode_hdisplay) 552 + { 553 + u8 min_slice_count, i; 554 + int max_slice_width; 555 + 556 + if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) 557 + min_slice_count = DIV_ROUND_UP(mode_clock, 558 + DP_DSC_MAX_ENC_THROUGHPUT_0); 559 + else 560 + min_slice_count = DIV_ROUND_UP(mode_clock, 561 + DP_DSC_MAX_ENC_THROUGHPUT_1); 562 + 563 + max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); 564 + if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 565 + DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n", 566 + max_slice_width); 567 + return 0; 568 + } 569 + /* Also take into account max slice width */ 570 + min_slice_count = min_t(u8, min_slice_count, 571 + DIV_ROUND_UP(mode_hdisplay, 572 + max_slice_width)); 573 + 574 + /* Find the closest match to the valid slice count values */ 575 + for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { 576 + if (valid_dsc_slicecount[i] > 577 + drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 578 + false)) 579 + break; 580 + if (min_slice_count <= valid_dsc_slicecount[i]) 581 + return valid_dsc_slicecount[i]; 582 + } 583 + 584 + DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count); 494 585 return 0; 495 586 } 496 587 ··· 2317 2226 adjusted_mode->crtc_clock, 2318 2227 pipe_config->port_clock, 2319 2228 &pipe_config->dp_m_n, 2320 - constant_n); 2229 + constant_n, pipe_config->fec_enable); 2321 2230 2322 2231 if (intel_connector->panel.downclock_mode != NULL && 2323 2232 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) { ··· 2327 2236 intel_connector->panel.downclock_mode->clock, 2328 2237 pipe_config->port_clock, 2329 2238 &pipe_config->dp_m2_n2, 2330 - constant_n); 2239 + constant_n, pipe_config->fec_enable); 2331 2240 } 2332 2241 2333 2242 if (!HAS_DDI(dev_priv)) ··· 4412 4321 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, 4413 4322 sink_irq_vector, DP_DPRX_ESI_LEN) == 4414 4323 DP_DPRX_ESI_LEN; 4415 - } 4416 - 4417 - u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count, 4418 - int mode_clock, int mode_hdisplay) 4419 - { 4420 - u16 bits_per_pixel, max_bpp_small_joiner_ram; 4421 - int i; 4422 - 4423 - /* 4424 - * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)* 4425 - * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP) 4426 - * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1, 4427 - * for MST -> TimeSlotsPerMTP has to be calculated 4428 - */ 4429 - bits_per_pixel = (link_clock * lane_count * 8 * 4430 - DP_DSC_FEC_OVERHEAD_FACTOR) / 4431 - mode_clock; 4432 - 4433 - /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ 4434 - max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER / 4435 - mode_hdisplay; 4436 - 4437 - /* 4438 - * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW 4439 - * check, output bpp from small joiner RAM check) 4440 - */ 4441 - bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram); 4442 - 4443 - /* Error out if the max bpp is less than smallest allowed valid bpp */ 4444 - if (bits_per_pixel < valid_dsc_bpp[0]) { 4445 - DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel); 4446 - return 0; 4447 - } 4448 - 4449 - /* Find the nearest match in the array of known BPPs from VESA */ 4450 - for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) { 4451 - if (bits_per_pixel < valid_dsc_bpp[i + 1]) 4452 - break; 4453 - } 4454 - bits_per_pixel = valid_dsc_bpp[i]; 4455 - 4456 - /* 4457 - * Compressed BPP in U6.4 format so multiply by 16, for Gen 11, 4458 - * fractional part is 0 4459 - */ 4460 - return bits_per_pixel << 4; 4461 - } 4462 - 4463 - u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, 4464 - int mode_clock, 4465 - int mode_hdisplay) 4466 - { 4467 - u8 min_slice_count, i; 4468 - int max_slice_width; 4469 - 4470 - if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE) 4471 - min_slice_count = DIV_ROUND_UP(mode_clock, 4472 - DP_DSC_MAX_ENC_THROUGHPUT_0); 4473 - else 4474 - min_slice_count = DIV_ROUND_UP(mode_clock, 4475 - DP_DSC_MAX_ENC_THROUGHPUT_1); 4476 - 4477 - max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd); 4478 - if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) { 4479 - DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n", 4480 - max_slice_width); 4481 - return 0; 4482 - } 4483 - /* Also take into account max slice width */ 4484 - min_slice_count = min_t(u8, min_slice_count, 4485 - DIV_ROUND_UP(mode_hdisplay, 4486 - max_slice_width)); 4487 - 4488 - /* Find the closest match to the valid slice count values */ 4489 - for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) { 4490 - if (valid_dsc_slicecount[i] > 4491 - drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, 4492 - false)) 4493 - break; 4494 - if (min_slice_count <= valid_dsc_slicecount[i]) 4495 - return valid_dsc_slicecount[i]; 4496 - } 4497 - 4498 - DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count); 4499 - return 0; 4500 4324 } 4501 4325 4502 4326 static void
+2 -4
drivers/gpu/drm/i915/display/intel_dp.h
··· 102 102 bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp); 103 103 bool 104 104 intel_dp_get_link_status(struct intel_dp *intel_dp, u8 *link_status); 105 - u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count, 106 - int mode_clock, int mode_hdisplay); 107 - u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, int mode_clock, 108 - int mode_hdisplay); 109 105 110 106 bool intel_dp_read_dpcd(struct intel_dp *intel_dp); 111 107 bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp); ··· 113 117 { 114 118 return ~((1 << lane_count) - 1) & 0xf; 115 119 } 120 + 121 + u32 intel_dp_mode_to_fec_clock(u32 mode_clock); 116 122 117 123 #endif /* __INTEL_DP_H__ */
+2 -2
drivers/gpu/drm/i915/display/intel_dp_mst.c
··· 81 81 adjusted_mode->crtc_clock, 82 82 crtc_state->port_clock, 83 83 &crtc_state->dp_m_n, 84 - constant_n); 84 + constant_n, crtc_state->fec_enable); 85 85 crtc_state->dp_m_n.tu = slots; 86 86 87 87 return 0; ··· 615 615 intel_encoder->type = INTEL_OUTPUT_DP_MST; 616 616 intel_encoder->power_domain = intel_dig_port->base.power_domain; 617 617 intel_encoder->port = intel_dig_port->base.port; 618 - intel_encoder->crtc_mask = BIT(pipe); 618 + intel_encoder->crtc_mask = 0x7; 619 619 intel_encoder->cloneable = 0; 620 620 621 621 intel_encoder->compute_config = intel_dp_mst_compute_config;
+3 -2
drivers/gpu/drm/i915/display/intel_sprite.c
··· 1528 1528 int src_x, src_w, src_h, crtc_w, crtc_h; 1529 1529 const struct drm_display_mode *adjusted_mode = 1530 1530 &crtc_state->base.adjusted_mode; 1531 + unsigned int stride = plane_state->color_plane[0].stride; 1531 1532 unsigned int cpp = fb->format->cpp[0]; 1532 1533 unsigned int width_bytes; 1533 1534 int min_width, min_height; ··· 1570 1569 return -EINVAL; 1571 1570 } 1572 1571 1573 - if (width_bytes > 4096 || fb->pitches[0] > 4096) { 1572 + if (stride > 4096) { 1574 1573 DRM_DEBUG_KMS("Stride (%u) exceeds hardware max with scaling (%u)\n", 1575 - fb->pitches[0], 4096); 1574 + stride, 4096); 1576 1575 return -EINVAL; 1577 1576 } 1578 1577
+1 -1
drivers/gpu/drm/omapdrm/dss/dss.c
··· 1083 1083 1084 1084 static const struct dss_features omap3630_dss_feats = { 1085 1085 .model = DSS_MODEL_OMAP3, 1086 - .fck_div_max = 32, 1086 + .fck_div_max = 31, 1087 1087 .fck_freq_max = 173000000, 1088 1088 .dss_fck_multiplier = 1, 1089 1089 .parent_clk_name = "dpll4_ck",
+2 -2
drivers/gpu/drm/rcar-du/rcar_du_writeback.c
··· 147 147 struct drm_device *dev = encoder->dev; 148 148 struct drm_framebuffer *fb; 149 149 150 - if (!conn_state->writeback_job || !conn_state->writeback_job->fb) 150 + if (!conn_state->writeback_job) 151 151 return 0; 152 152 153 153 fb = conn_state->writeback_job->fb; ··· 221 221 unsigned int i; 222 222 223 223 state = rcrtc->writeback.base.state; 224 - if (!state || !state->writeback_job || !state->writeback_job->fb) 224 + if (!state || !state->writeback_job) 225 225 return; 226 226 227 227 fb = state->writeback_job->fb;
+1
drivers/gpu/drm/tilcdc/tilcdc_tfp410.c
··· 7 7 #include <linux/gpio.h> 8 8 #include <linux/mod_devicetable.h> 9 9 #include <linux/of_gpio.h> 10 + #include <linux/pinctrl/consumer.h> 10 11 #include <linux/platform_device.h> 11 12 12 13 #include <drm/drm_atomic_helper.h>
+2 -3
drivers/gpu/drm/vc4/vc4_txp.c
··· 231 231 int i; 232 232 233 233 conn_state = drm_atomic_get_new_connector_state(state, conn); 234 - if (!conn_state->writeback_job || !conn_state->writeback_job->fb) 234 + if (!conn_state->writeback_job) 235 235 return 0; 236 236 237 237 crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); ··· 271 271 u32 ctrl; 272 272 int i; 273 273 274 - if (WARN_ON(!conn_state->writeback_job || 275 - !conn_state->writeback_job->fb)) 274 + if (WARN_ON(!conn_state->writeback_job)) 276 275 return; 277 276 278 277 mode = &conn_state->crtc->state->adjusted_mode;
+2
include/uapi/drm/amdgpu_drm.h
··· 1003 1003 __u64 high_va_max; 1004 1004 /* gfx10 pa_sc_tile_steering_override */ 1005 1005 __u32 pa_sc_tile_steering_override; 1006 + /* disabled TCCs */ 1007 + __u64 tcc_disabled_mask; 1006 1008 }; 1007 1009 1008 1010 struct drm_amdgpu_info_hw_ip {