Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-fixes-2021-01-15' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
"Regular fixes for rc4, a bunch of fixes across i915, amdgpu and
nouveau here, along with a couple of TTM fixes, and dma-buf and one
core pageflip/modifier interaction fix.

One notable i915 fix is a HSW GT1 regression fix that has been
outstanding for quite a while. (Thanks to Matt Turner for kicking
Intel into getting it fixed).

dma-buf:
- Fix a memory leak in CMAV heap

core:
- Fix format check for legacy pageflips

ttm:
- Pass correct address to dma_mapping_error()
- Use mutex in pool shrinker

i915:
- Allow the sysadmin to override security mitigations
- Restore clear-residual mitigations for ivb/byt
- Limit VFE threads based on GT
- GVT: fix vfio edid and full display detection
- Fix DSI DSC power refcounting
- Fix LPT CPU mode backlight takeover
- Disable RPM wakeref assertions during driver shutdown
- Fix DSI sequence sleeps

amdgpu:
- Update repo location in MAINTAINERS
- Add some new renoir PCI IDs
- Revert CRC UAPI changes
- Revert OLED display fix which cases clocking problems for some systems
- Misc vangogh fixes
- GFX fix for sienna cichlid
- DCN1.0 fix for pipe split
- Fix incorrect PSP command

amdkfd:
- Fix possible out of bounds read in vcrat creation

nouveau:
- irq handling fix
- expansion ROM fix
- hw init dpcd disable
- aux semaphore owner field fix
- vram heap sizing fix
- notifier at 0 is valid fix"

* tag 'drm-fixes-2021-01-15' of git://anongit.freedesktop.org/drm/drm: (37 commits)
drm/nouveau/kms/nv50-: fix case where notifier buffer is at offset 0
drm/nouveau/mmu: fix vram heap sizing
drm/nouveau/i2c/gm200: increase width of aux semaphore owner fields
drm/nouveau/i2c/gk110-: disable hw-initiated dpcd reads
drm/nouveau/i2c/gk110: split out from i2c/gk104
drm/nouveau/privring: ack interrupts the same way as RM
drm/nouveau/bios: fix issue shadowing expansion ROMs
drm/amd/display: Fix to be able to stop crc calculation
Revert "drm/amd/display: Expose new CRC window property"
Revert "drm/amdgpu/disply: fix documentation warnings in display manager"
Revert "drm/amd/display: Fix unused variable warning"
drm/amdgpu: set power brake sequence
drm/amdgpu: add new device id for Renior
drm/amdgpu: add green_sardine device id (v2)
drm/amdgpu: fix vram type and bandwidth error for DDR5 and DDR4
drm/amdgpu/gfx10: add updated GOLDEN_TSC_COUNT_UPPER/LOWER register offsets for VGH
drm/amdkfd: Fix out-of-bounds read in kdf_create_vcrat_image_cpu()
Revert "drm/amd/display: Fixed Intermittent blue screen on OLED panel"
drm/amd/display: disable dcn10 pipe split by default
drm/amd/display: Add a missing DCN3.01 API mapping
...

+600 -412
+2 -2
MAINTAINERS
··· 906 906 M: Felix Kuehling <Felix.Kuehling@amd.com> 907 907 L: amd-gfx@lists.freedesktop.org 908 908 S: Supported 909 - T: git git://people.freedesktop.org/~agd5f/linux 909 + T: git https://gitlab.freedesktop.org/agd5f/linux.git 910 910 F: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd*.[ch] 911 911 F: drivers/gpu/drm/amd/amdkfd/ 912 912 F: drivers/gpu/drm/amd/include/cik_structs.h ··· 14812 14812 M: Christian König <christian.koenig@amd.com> 14813 14813 L: amd-gfx@lists.freedesktop.org 14814 14814 S: Supported 14815 - T: git git://people.freedesktop.org/~agd5f/linux 14815 + T: git https://gitlab.freedesktop.org/agd5f/linux.git 14816 14816 F: drivers/gpu/drm/amd/ 14817 14817 F: drivers/gpu/drm/radeon/ 14818 14818 F: include/uapi/drm/amdgpu_drm.h
+3
drivers/dma-buf/heaps/cma_heap.c
··· 251 251 buffer->vaddr = NULL; 252 252 } 253 253 254 + /* free page list */ 255 + kfree(buffer->pages); 256 + /* release memory */ 254 257 cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount); 255 258 kfree(buffer); 256 259 }
+36 -17
drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c
··· 112 112 union igp_info { 113 113 struct atom_integrated_system_info_v1_11 v11; 114 114 struct atom_integrated_system_info_v1_12 v12; 115 + struct atom_integrated_system_info_v2_1 v21; 115 116 }; 116 117 117 118 union umc_info { ··· 210 209 if (adev->flags & AMD_IS_APU) { 211 210 igp_info = (union igp_info *) 212 211 (mode_info->atom_context->bios + data_offset); 213 - switch (crev) { 214 - case 11: 215 - mem_channel_number = igp_info->v11.umachannelnumber; 216 - /* channel width is 64 */ 217 - if (vram_width) 218 - *vram_width = mem_channel_number * 64; 219 - mem_type = igp_info->v11.memorytype; 220 - if (vram_type) 221 - *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 212 + switch (frev) { 213 + case 1: 214 + switch (crev) { 215 + case 11: 216 + case 12: 217 + mem_channel_number = igp_info->v11.umachannelnumber; 218 + if (!mem_channel_number) 219 + mem_channel_number = 1; 220 + /* channel width is 64 */ 221 + if (vram_width) 222 + *vram_width = mem_channel_number * 64; 223 + mem_type = igp_info->v11.memorytype; 224 + if (vram_type) 225 + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 226 + break; 227 + default: 228 + return -EINVAL; 229 + } 222 230 break; 223 - case 12: 224 - mem_channel_number = igp_info->v12.umachannelnumber; 225 - /* channel width is 64 */ 226 - if (vram_width) 227 - *vram_width = mem_channel_number * 64; 228 - mem_type = igp_info->v12.memorytype; 229 - if (vram_type) 230 - *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 231 + case 2: 232 + switch (crev) { 233 + case 1: 234 + case 2: 235 + mem_channel_number = igp_info->v21.umachannelnumber; 236 + if (!mem_channel_number) 237 + mem_channel_number = 1; 238 + /* channel width is 64 */ 239 + if (vram_width) 240 + *vram_width = mem_channel_number * 64; 241 + mem_type = igp_info->v21.memorytype; 242 + if (vram_type) 243 + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); 244 + break; 245 + default: 246 + return -EINVAL; 247 + } 231 248 break; 232 249 default: 233 250 return -EINVAL;
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 3034 3034 #endif 3035 3035 default: 3036 3036 if (amdgpu_dc > 0) 3037 - DRM_INFO("Display Core has been requested via kernel parameter " 3037 + DRM_INFO_ONCE("Display Core has been requested via kernel parameter " 3038 3038 "but isn't supported by ASIC, ignoring\n"); 3039 3039 return false; 3040 3040 }
+2
drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
··· 1085 1085 1086 1086 /* Renoir */ 1087 1087 {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, 1088 + {0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, 1089 + {0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, 1088 1090 1089 1091 /* Navi12 */ 1090 1092 {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
+46 -2
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
··· 99 99 #define mmGCR_GENERAL_CNTL_Sienna_Cichlid 0x1580 100 100 #define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX 0 101 101 102 + #define mmGOLDEN_TSC_COUNT_UPPER_Vangogh 0x0025 103 + #define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX 1 104 + #define mmGOLDEN_TSC_COUNT_LOWER_Vangogh 0x0026 105 + #define mmGOLDEN_TSC_COUNT_LOWER_Vangogh_BASE_IDX 1 102 106 #define mmSPI_CONFIG_CNTL_1_Vangogh 0x2441 103 107 #define mmSPI_CONFIG_CNTL_1_Vangogh_BASE_IDX 1 104 108 #define mmVGT_TF_MEMORY_BASE_HI_Vangogh 0x2261 ··· 163 159 #define mmGCUTCL2_CGTT_CLK_CTRL_Sienna_Cichlid_BASE_IDX 0 164 160 #define mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid 0x15db 165 161 #define mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid_BASE_IDX 0 162 + 163 + #define mmGC_THROTTLE_CTRL_Sienna_Cichlid 0x2030 164 + #define mmGC_THROTTLE_CTRL_Sienna_Cichlid_BASE_IDX 0 166 165 167 166 MODULE_FIRMWARE("amdgpu/navi10_ce.bin"); 168 167 MODULE_FIRMWARE("amdgpu/navi10_pfp.bin"); ··· 3331 3324 static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure); 3332 3325 static u32 gfx_v10_3_get_disabled_sa(struct amdgpu_device *adev); 3333 3326 static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev); 3327 + static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev); 3334 3328 3335 3329 static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask) 3336 3330 { ··· 7200 7192 if (adev->asic_type == CHIP_SIENNA_CICHLID) 7201 7193 gfx_v10_3_program_pbb_mode(adev); 7202 7194 7195 + if (adev->asic_type >= CHIP_SIENNA_CICHLID) 7196 + gfx_v10_3_set_power_brake_sequence(adev); 7197 + 7203 7198 return r; 7204 7199 } 7205 7200 ··· 7388 7377 7389 7378 amdgpu_gfx_off_ctrl(adev, false); 7390 7379 mutex_lock(&adev->gfx.gpu_clock_mutex); 7391 - clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) | 7392 - ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL); 7380 + switch (adev->asic_type) { 7381 + case CHIP_VANGOGH: 7382 + clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) | 7383 + ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL); 7384 + break; 7385 + default: 7386 + clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) | 7387 + ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL); 7388 + break; 7389 + } 7393 7390 mutex_unlock(&adev->gfx.gpu_clock_mutex); 7394 7391 amdgpu_gfx_off_ctrl(adev, true); 7395 7392 return clock; ··· 9186 9167 break; 9187 9168 } 9188 9169 } 9170 + } 9171 + 9172 + static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev) 9173 + { 9174 + WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX, 9175 + (0x1 << GRBM_GFX_INDEX__SA_BROADCAST_WRITES__SHIFT) | 9176 + (0x1 << GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT) | 9177 + (0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT)); 9178 + 9179 + WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, ixPWRBRK_STALL_PATTERN_CTRL); 9180 + WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, 9181 + (0x1 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_STEP_INTERVAL__SHIFT) | 9182 + (0x12 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_BEGIN_STEP__SHIFT) | 9183 + (0x13 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_END_STEP__SHIFT) | 9184 + (0xf << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_THROTTLE_PATTERN_BIT_NUMS__SHIFT)); 9185 + 9186 + WREG32_SOC15(GC, 0, mmGC_THROTTLE_CTRL_Sienna_Cichlid, 9187 + (0x1 << GC_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT) | 9188 + (0x1 << GC_THROTTLE_CTRL__PATTERN_MODE__SHIFT) | 9189 + (0x5 << GC_THROTTLE_CTRL__RELEASE_STEP_INTERVAL__SHIFT)); 9190 + 9191 + WREG32_SOC15(GC, 0, mmDIDT_IND_INDEX, ixDIDT_SQ_THROTTLE_CTRL); 9192 + 9193 + WREG32_SOC15(GC, 0, mmDIDT_IND_DATA, 9194 + (0x1 << DIDT_SQ_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT)); 9189 9195 } 9190 9196 9191 9197 const struct amdgpu_ip_block_version gfx_v10_0_ip_block =
+1 -1
drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
··· 47 47 GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */ 48 48 GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */ 49 49 GFX_CTRL_CMD_ID_GBR_IH_SET = 0x00080000, /* set Gbr IH_RB_CNTL registers */ 50 - GFX_CTRL_CMD_ID_CONSUME_CMD = 0x000A0000, /* send interrupt to psp for updating write pointer of vf */ 50 + GFX_CTRL_CMD_ID_CONSUME_CMD = 0x00090000, /* send interrupt to psp for updating write pointer of vf */ 51 51 GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */ 52 52 53 53 GFX_CTRL_CMD_ID_MAX = 0x000F0000, /* max command ID */
+2 -1
drivers/gpu/drm/amd/amdgpu/soc15.c
··· 1239 1239 break; 1240 1240 case CHIP_RENOIR: 1241 1241 adev->asic_funcs = &soc15_asic_funcs; 1242 - if (adev->pdev->device == 0x1636) 1242 + if ((adev->pdev->device == 0x1636) || 1243 + (adev->pdev->device == 0x164c)) 1243 1244 adev->apu_flags |= AMD_APU_IS_RENOIR; 1244 1245 else 1245 1246 adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
+7 -4
drivers/gpu/drm/amd/amdkfd/kfd_crat.c
··· 1040 1040 (struct crat_subtype_iolink *)sub_type_hdr); 1041 1041 if (ret < 0) 1042 1042 return ret; 1043 - crat_table->length += (sub_type_hdr->length * entries); 1044 - crat_table->total_entries += entries; 1045 1043 1046 - sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + 1047 - sub_type_hdr->length * entries); 1044 + if (entries) { 1045 + crat_table->length += (sub_type_hdr->length * entries); 1046 + crat_table->total_entries += entries; 1047 + 1048 + sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + 1049 + sub_type_hdr->length * entries); 1050 + } 1048 1051 #else 1049 1052 pr_info("IO link not available for non x86 platforms\n"); 1050 1053 #endif
+9 -133
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 939 939 } 940 940 #endif 941 941 942 - #ifdef CONFIG_DEBUG_FS 943 - static int create_crtc_crc_properties(struct amdgpu_display_manager *dm) 944 - { 945 - dm->crc_win_x_start_property = 946 - drm_property_create_range(adev_to_drm(dm->adev), 947 - DRM_MODE_PROP_ATOMIC, 948 - "AMD_CRC_WIN_X_START", 0, U16_MAX); 949 - if (!dm->crc_win_x_start_property) 950 - return -ENOMEM; 951 - 952 - dm->crc_win_y_start_property = 953 - drm_property_create_range(adev_to_drm(dm->adev), 954 - DRM_MODE_PROP_ATOMIC, 955 - "AMD_CRC_WIN_Y_START", 0, U16_MAX); 956 - if (!dm->crc_win_y_start_property) 957 - return -ENOMEM; 958 - 959 - dm->crc_win_x_end_property = 960 - drm_property_create_range(adev_to_drm(dm->adev), 961 - DRM_MODE_PROP_ATOMIC, 962 - "AMD_CRC_WIN_X_END", 0, U16_MAX); 963 - if (!dm->crc_win_x_end_property) 964 - return -ENOMEM; 965 - 966 - dm->crc_win_y_end_property = 967 - drm_property_create_range(adev_to_drm(dm->adev), 968 - DRM_MODE_PROP_ATOMIC, 969 - "AMD_CRC_WIN_Y_END", 0, U16_MAX); 970 - if (!dm->crc_win_y_end_property) 971 - return -ENOMEM; 972 - 973 - return 0; 974 - } 975 - #endif 976 - 977 942 static int amdgpu_dm_init(struct amdgpu_device *adev) 978 943 { 979 944 struct dc_init_data init_data; ··· 1085 1120 1086 1121 dc_init_callbacks(adev->dm.dc, &init_params); 1087 1122 } 1088 - #endif 1089 - #ifdef CONFIG_DEBUG_FS 1090 - if (create_crtc_crc_properties(&adev->dm)) 1091 - DRM_ERROR("amdgpu: failed to create crc property.\n"); 1092 1123 #endif 1093 1124 if (amdgpu_dm_initialize_drm_device(adev)) { 1094 1125 DRM_ERROR( ··· 5294 5333 state->crc_src = cur->crc_src; 5295 5334 state->cm_has_degamma = cur->cm_has_degamma; 5296 5335 state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb; 5297 - #ifdef CONFIG_DEBUG_FS 5298 - state->crc_window = cur->crc_window; 5299 - #endif 5336 + 5300 5337 /* TODO Duplicate dc_stream after objects are stream object is flattened */ 5301 5338 5302 5339 return &state->base; 5303 5340 } 5304 - 5305 - #ifdef CONFIG_DEBUG_FS 5306 - static int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc, 5307 - struct drm_crtc_state *crtc_state, 5308 - struct drm_property *property, 5309 - uint64_t val) 5310 - { 5311 - struct drm_device *dev = crtc->dev; 5312 - struct amdgpu_device *adev = drm_to_adev(dev); 5313 - struct dm_crtc_state *dm_new_state = 5314 - to_dm_crtc_state(crtc_state); 5315 - 5316 - if (property == adev->dm.crc_win_x_start_property) 5317 - dm_new_state->crc_window.x_start = val; 5318 - else if (property == adev->dm.crc_win_y_start_property) 5319 - dm_new_state->crc_window.y_start = val; 5320 - else if (property == adev->dm.crc_win_x_end_property) 5321 - dm_new_state->crc_window.x_end = val; 5322 - else if (property == adev->dm.crc_win_y_end_property) 5323 - dm_new_state->crc_window.y_end = val; 5324 - else 5325 - return -EINVAL; 5326 - 5327 - return 0; 5328 - } 5329 - 5330 - static int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc, 5331 - const struct drm_crtc_state *state, 5332 - struct drm_property *property, 5333 - uint64_t *val) 5334 - { 5335 - struct drm_device *dev = crtc->dev; 5336 - struct amdgpu_device *adev = drm_to_adev(dev); 5337 - struct dm_crtc_state *dm_state = 5338 - to_dm_crtc_state(state); 5339 - 5340 - if (property == adev->dm.crc_win_x_start_property) 5341 - *val = dm_state->crc_window.x_start; 5342 - else if (property == adev->dm.crc_win_y_start_property) 5343 - *val = dm_state->crc_window.y_start; 5344 - else if (property == adev->dm.crc_win_x_end_property) 5345 - *val = dm_state->crc_window.x_end; 5346 - else if (property == adev->dm.crc_win_y_end_property) 5347 - *val = dm_state->crc_window.y_end; 5348 - else 5349 - return -EINVAL; 5350 - 5351 - return 0; 5352 - } 5353 - #endif 5354 5341 5355 5342 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable) 5356 5343 { ··· 5366 5457 .enable_vblank = dm_enable_vblank, 5367 5458 .disable_vblank = dm_disable_vblank, 5368 5459 .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, 5369 - #ifdef CONFIG_DEBUG_FS 5370 - .atomic_set_property = amdgpu_dm_crtc_atomic_set_property, 5371 - .atomic_get_property = amdgpu_dm_crtc_atomic_get_property, 5372 - #endif 5373 5460 }; 5374 5461 5375 5462 static enum drm_connector_status ··· 6567 6662 return 0; 6568 6663 } 6569 6664 6570 - #ifdef CONFIG_DEBUG_FS 6571 - static void attach_crtc_crc_properties(struct amdgpu_display_manager *dm, 6572 - struct amdgpu_crtc *acrtc) 6573 - { 6574 - drm_object_attach_property(&acrtc->base.base, 6575 - dm->crc_win_x_start_property, 6576 - 0); 6577 - drm_object_attach_property(&acrtc->base.base, 6578 - dm->crc_win_y_start_property, 6579 - 0); 6580 - drm_object_attach_property(&acrtc->base.base, 6581 - dm->crc_win_x_end_property, 6582 - 0); 6583 - drm_object_attach_property(&acrtc->base.base, 6584 - dm->crc_win_y_end_property, 6585 - 0); 6586 - } 6587 - #endif 6588 - 6589 6665 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, 6590 6666 struct drm_plane *plane, 6591 6667 uint32_t crtc_index) ··· 6614 6728 drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES, 6615 6729 true, MAX_COLOR_LUT_ENTRIES); 6616 6730 drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES); 6617 - #ifdef CONFIG_DEBUG_FS 6618 - attach_crtc_crc_properties(dm, acrtc); 6619 - #endif 6731 + 6620 6732 return 0; 6621 6733 6622 6734 fail: ··· 8251 8367 */ 8252 8368 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { 8253 8369 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); 8254 - bool configure_crc = false; 8255 8370 8256 8371 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 8257 8372 ··· 8260 8377 dc_stream_retain(dm_new_crtc_state->stream); 8261 8378 acrtc->dm_irq_params.stream = dm_new_crtc_state->stream; 8262 8379 manage_dm_interrupts(adev, acrtc, true); 8263 - } 8264 - if (IS_ENABLED(CONFIG_DEBUG_FS) && new_crtc_state->active && 8265 - amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) { 8380 + 8381 + #ifdef CONFIG_DEBUG_FS 8266 8382 /** 8267 8383 * Frontend may have changed so reapply the CRC capture 8268 8384 * settings for the stream. 8269 8385 */ 8270 8386 dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); 8271 - dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); 8272 8387 8273 - if (amdgpu_dm_crc_window_is_default(dm_new_crtc_state)) { 8274 - if (!old_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state)) 8275 - configure_crc = true; 8276 - } else { 8277 - if (amdgpu_dm_crc_window_changed(dm_new_crtc_state, dm_old_crtc_state)) 8278 - configure_crc = true; 8279 - } 8280 - 8281 - if (configure_crc) 8388 + if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) { 8282 8389 amdgpu_dm_crtc_configure_crc_source( 8283 - crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src); 8390 + crtc, dm_new_crtc_state, 8391 + dm_new_crtc_state->crc_src); 8392 + } 8393 + #endif 8284 8394 } 8285 8395 } 8286 8396
-38
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
··· 336 336 */ 337 337 const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; 338 338 339 - #ifdef CONFIG_DEBUG_FS 340 - /** 341 - * @crc_win_x_start_property: 342 - * 343 - * X start of the crc calculation window 344 - */ 345 - struct drm_property *crc_win_x_start_property; 346 - /** 347 - * @crc_win_y_start_property: 348 - * 349 - * Y start of the crc calculation window 350 - */ 351 - struct drm_property *crc_win_y_start_property; 352 - /** 353 - * @crc_win_x_end_property: 354 - * 355 - * X end of the crc calculation window 356 - */ 357 - struct drm_property *crc_win_x_end_property; 358 - /** 359 - * @crc_win_y_end_property: 360 - * 361 - * Y end of the crc calculation window 362 - */ 363 - struct drm_property *crc_win_y_end_property; 364 - #endif 365 339 /** 366 340 * @mst_encoders: 367 341 * ··· 422 448 struct dc_plane_state *dc_state; 423 449 }; 424 450 425 - #ifdef CONFIG_DEBUG_FS 426 - struct crc_rec { 427 - uint16_t x_start; 428 - uint16_t y_start; 429 - uint16_t x_end; 430 - uint16_t y_end; 431 - }; 432 - #endif 433 - 434 451 struct dm_crtc_state { 435 452 struct drm_crtc_state base; 436 453 struct dc_stream_state *stream; ··· 444 479 struct dc_info_packet vrr_infopacket; 445 480 446 481 int abm_level; 447 - #ifdef CONFIG_DEBUG_FS 448 - struct crc_rec crc_window; 449 - #endif 450 482 }; 451 483 452 484 #define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
+1 -53
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
··· 81 81 return pipe_crc_sources; 82 82 } 83 83 84 - static void amdgpu_dm_set_crc_window_default(struct dm_crtc_state *dm_crtc_state) 85 - { 86 - dm_crtc_state->crc_window.x_start = 0; 87 - dm_crtc_state->crc_window.y_start = 0; 88 - dm_crtc_state->crc_window.x_end = 0; 89 - dm_crtc_state->crc_window.y_end = 0; 90 - } 91 - 92 - bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state) 93 - { 94 - bool ret = true; 95 - 96 - if ((dm_crtc_state->crc_window.x_start != 0) || 97 - (dm_crtc_state->crc_window.y_start != 0) || 98 - (dm_crtc_state->crc_window.x_end != 0) || 99 - (dm_crtc_state->crc_window.y_end != 0)) 100 - ret = false; 101 - 102 - return ret; 103 - } 104 - 105 - bool amdgpu_dm_crc_window_changed(struct dm_crtc_state *dm_new_crtc_state, 106 - struct dm_crtc_state *dm_old_crtc_state) 107 - { 108 - bool ret = false; 109 - 110 - if ((dm_new_crtc_state->crc_window.x_start != dm_old_crtc_state->crc_window.x_start) || 111 - (dm_new_crtc_state->crc_window.y_start != dm_old_crtc_state->crc_window.y_start) || 112 - (dm_new_crtc_state->crc_window.x_end != dm_old_crtc_state->crc_window.x_end) || 113 - (dm_new_crtc_state->crc_window.y_end != dm_old_crtc_state->crc_window.y_end)) 114 - ret = true; 115 - 116 - return ret; 117 - } 118 - 119 84 int 120 85 amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name, 121 86 size_t *values_cnt) ··· 105 140 struct dc_stream_state *stream_state = dm_crtc_state->stream; 106 141 bool enable = amdgpu_dm_is_valid_crc_source(source); 107 142 int ret = 0; 108 - struct crc_params *crc_window = NULL, tmp_window; 109 143 110 144 /* Configuration will be deferred to stream enable. */ 111 145 if (!stream_state) ··· 114 150 115 151 /* Enable CRTC CRC generation if necessary. */ 116 152 if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) { 117 - if (!enable) 118 - amdgpu_dm_set_crc_window_default(dm_crtc_state); 119 - 120 - if (!amdgpu_dm_crc_window_is_default(dm_crtc_state)) { 121 - crc_window = &tmp_window; 122 - 123 - tmp_window.windowa_x_start = dm_crtc_state->crc_window.x_start; 124 - tmp_window.windowa_y_start = dm_crtc_state->crc_window.y_start; 125 - tmp_window.windowa_x_end = dm_crtc_state->crc_window.x_end; 126 - tmp_window.windowa_y_end = dm_crtc_state->crc_window.y_end; 127 - tmp_window.windowb_x_start = dm_crtc_state->crc_window.x_start; 128 - tmp_window.windowb_y_start = dm_crtc_state->crc_window.y_start; 129 - tmp_window.windowb_x_end = dm_crtc_state->crc_window.x_end; 130 - tmp_window.windowb_y_end = dm_crtc_state->crc_window.y_end; 131 - } 132 - 133 153 if (!dc_stream_configure_crc(stream_state->ctx->dc, 134 - stream_state, crc_window, enable, enable)) { 154 + stream_state, NULL, enable, enable)) { 135 155 ret = -EINVAL; 136 156 goto unlock; 137 157 }
+1 -4
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h
··· 46 46 } 47 47 48 48 /* amdgpu_dm_crc.c */ 49 - bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state); 50 - bool amdgpu_dm_crc_window_changed(struct dm_crtc_state *dm_new_crtc_state, 51 - struct dm_crtc_state *dm_old_crtc_state); 49 + #ifdef CONFIG_DEBUG_FS 52 50 int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, 53 51 struct dm_crtc_state *dm_crtc_state, 54 52 enum amdgpu_dm_pipe_crc_source source); 55 - #ifdef CONFIG_DEBUG_FS 56 53 int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name); 57 54 int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, 58 55 const char *src_name,
+1 -1
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_mpc.c
··· 470 470 unsigned int mpc1_get_mpc_out_mux(struct mpc *mpc, int opp_id) 471 471 { 472 472 struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc); 473 - uint32_t val = 0; 473 + uint32_t val = 0xf; 474 474 475 475 if (opp_id < MAX_OPP && REG(MUX[opp_id])) 476 476 REG_GET(MUX[opp_id], MPC_OUT_MUX, &val);
+2 -2
drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
··· 608 608 .disable_pplib_clock_request = false, 609 609 .disable_pplib_wm_range = false, 610 610 .pplib_wm_report_mode = WM_REPORT_DEFAULT, 611 - .pipe_split_policy = MPC_SPLIT_DYNAMIC, 612 - .force_single_disp_pipe_split = true, 611 + .pipe_split_policy = MPC_SPLIT_AVOID, 612 + .force_single_disp_pipe_split = false, 613 613 .disable_dcc = DCC_ENABLE, 614 614 .voltage_align_fclk = true, 615 615 .disable_stereo_support = true,
+1
drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
··· 1731 1731 .populate_dml_pipes = dcn30_populate_dml_pipes_from_context, 1732 1732 .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, 1733 1733 .add_stream_to_ctx = dcn30_add_stream_to_ctx, 1734 + .add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource, 1734 1735 .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, 1735 1736 .populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context, 1736 1737 .set_mcif_arb_params = dcn30_set_mcif_arb_params,
+6 -5
drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
··· 2635 2635 } 2636 2636 2637 2637 if (mode_lib->vba.DRAMClockChangeSupportsVActive && 2638 - mode_lib->vba.MinActiveDRAMClockChangeMargin > 60 && 2639 - mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) { 2638 + mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) { 2640 2639 mode_lib->vba.DRAMClockChangeWatermark += 25; 2641 2640 2642 2641 for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) { 2643 - if (mode_lib->vba.DRAMClockChangeWatermark > 2644 - dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark)) 2645 - mode_lib->vba.MinTTUVBlank[k] += 25; 2642 + if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) { 2643 + if (mode_lib->vba.DRAMClockChangeWatermark > 2644 + dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark)) 2645 + mode_lib->vba.MinTTUVBlank[k] += 25; 2646 + } 2646 2647 } 2647 2648 2648 2649 mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
+8 -1
drivers/gpu/drm/drm_plane.c
··· 1163 1163 if (ret) 1164 1164 goto out; 1165 1165 1166 - if (old_fb->format != fb->format) { 1166 + /* 1167 + * Only check the FOURCC format code, excluding modifiers. This is 1168 + * enough for all legacy drivers. Atomic drivers have their own 1169 + * checks in their ->atomic_check implementation, which will 1170 + * return -EINVAL if any hw or driver constraint is violated due 1171 + * to modifier changes. 1172 + */ 1173 + if (old_fb->format->format != fb->format->format) { 1167 1174 DRM_DEBUG_KMS("Page flip is not allowed to change frame buffer format.\n"); 1168 1175 ret = -EINVAL; 1169 1176 goto out;
+1
drivers/gpu/drm/i915/Makefile
··· 38 38 i915_config.o \ 39 39 i915_irq.o \ 40 40 i915_getparam.o \ 41 + i915_mitigations.o \ 41 42 i915_params.o \ 42 43 i915_pci.o \ 43 44 i915_scatterlist.o \
-4
drivers/gpu/drm/i915/display/icl_dsi.c
··· 1616 1616 1617 1617 get_dsi_io_power_domains(i915, 1618 1618 enc_to_intel_dsi(encoder)); 1619 - 1620 - if (crtc_state->dsc.compression_enable) 1621 - intel_display_power_get(i915, 1622 - intel_dsc_power_domain(crtc_state)); 1623 1619 } 1624 1620 1625 1621 static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
+5 -4
drivers/gpu/drm/i915/display/intel_panel.c
··· 1650 1650 val = pch_get_backlight(connector); 1651 1651 else 1652 1652 val = lpt_get_backlight(connector); 1653 - val = intel_panel_compute_brightness(connector, val); 1654 - panel->backlight.level = clamp(val, panel->backlight.min, 1655 - panel->backlight.max); 1656 1653 1657 1654 if (cpu_mode) { 1658 1655 drm_dbg_kms(&dev_priv->drm, 1659 1656 "CPU backlight register was enabled, switching to PCH override\n"); 1660 1657 1661 1658 /* Write converted CPU PWM value to PCH override register */ 1662 - lpt_set_backlight(connector->base.state, panel->backlight.level); 1659 + lpt_set_backlight(connector->base.state, val); 1663 1660 intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, 1664 1661 pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE); 1665 1662 1666 1663 intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, 1667 1664 cpu_ctl2 & ~BLM_PWM_ENABLE); 1668 1665 } 1666 + 1667 + val = intel_panel_compute_brightness(connector, val); 1668 + panel->backlight.level = clamp(val, panel->backlight.min, 1669 + panel->backlight.max); 1669 1670 1670 1671 return 0; 1671 1672 }
+13 -3
drivers/gpu/drm/i915/display/vlv_dsi.c
··· 812 812 intel_dsi_prepare(encoder, pipe_config); 813 813 814 814 intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_POWER_ON); 815 - intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay); 816 815 817 - /* Deassert reset */ 818 - intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET); 816 + /* 817 + * Give the panel time to power-on and then deassert its reset. 818 + * Depending on the VBT MIPI sequences version the deassert-seq 819 + * may contain the necessary delay, intel_dsi_msleep() will skip 820 + * the delay in that case. If there is no deassert-seq, then an 821 + * unconditional msleep is used to give the panel time to power-on. 822 + */ 823 + if (dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) { 824 + intel_dsi_msleep(intel_dsi, intel_dsi->panel_on_delay); 825 + intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DEASSERT_RESET); 826 + } else { 827 + msleep(intel_dsi->panel_on_delay); 828 + } 819 829 820 830 if (IS_GEMINILAKE(dev_priv)) { 821 831 glk_cold_boot = glk_dsi_enable_io(encoder);
+94 -63
drivers/gpu/drm/i915/gt/gen7_renderclear.c
··· 7 7 #include "i915_drv.h" 8 8 #include "intel_gpu_commands.h" 9 9 10 - #define MAX_URB_ENTRIES 64 11 - #define STATE_SIZE (4 * 1024) 12 10 #define GT3_INLINE_DATA_DELAYS 0x1E00 13 11 #define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS)) 14 12 ··· 32 34 }; 33 35 34 36 struct batch_vals { 35 - u32 max_primitives; 36 - u32 max_urb_entries; 37 - u32 cmd_size; 38 - u32 state_size; 37 + u32 max_threads; 39 38 u32 state_start; 40 - u32 batch_size; 39 + u32 surface_start; 41 40 u32 surface_height; 42 41 u32 surface_width; 43 - u32 scratch_size; 44 - u32 max_size; 42 + u32 size; 45 43 }; 44 + 45 + static inline int num_primitives(const struct batch_vals *bv) 46 + { 47 + /* 48 + * We need to saturate the GPU with work in order to dispatch 49 + * a shader on every HW thread, and clear the thread-local registers. 50 + * In short, we have to dispatch work faster than the shaders can 51 + * run in order to fill the EU and occupy each HW thread. 52 + */ 53 + return bv->max_threads; 54 + } 46 55 47 56 static void 48 57 batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv) 49 58 { 50 59 if (IS_HASWELL(i915)) { 51 - bv->max_primitives = 280; 52 - bv->max_urb_entries = MAX_URB_ENTRIES; 60 + switch (INTEL_INFO(i915)->gt) { 61 + default: 62 + case 1: 63 + bv->max_threads = 70; 64 + break; 65 + case 2: 66 + bv->max_threads = 140; 67 + break; 68 + case 3: 69 + bv->max_threads = 280; 70 + break; 71 + } 53 72 bv->surface_height = 16 * 16; 54 73 bv->surface_width = 32 * 2 * 16; 55 74 } else { 56 - bv->max_primitives = 128; 57 - bv->max_urb_entries = MAX_URB_ENTRIES / 2; 75 + switch (INTEL_INFO(i915)->gt) { 76 + default: 77 + case 1: /* including vlv */ 78 + bv->max_threads = 36; 79 + break; 80 + case 2: 81 + bv->max_threads = 128; 82 + break; 83 + } 58 84 bv->surface_height = 16 * 8; 59 85 bv->surface_width = 32 * 16; 60 86 } 61 - bv->cmd_size = bv->max_primitives * 4096; 62 - bv->state_size = STATE_SIZE; 63 - bv->state_start = bv->cmd_size; 64 - bv->batch_size = bv->cmd_size + bv->state_size; 65 - bv->scratch_size = bv->surface_height * bv->surface_width; 66 - bv->max_size = bv->batch_size + bv->scratch_size; 87 + bv->state_start = round_up(SZ_1K + num_primitives(bv) * 64, SZ_4K); 88 + bv->surface_start = bv->state_start + SZ_4K; 89 + bv->size = bv->surface_start + bv->surface_height * bv->surface_width; 67 90 } 68 91 69 92 static void batch_init(struct batch_chunk *bc, ··· 174 155 gen7_fill_binding_table(struct batch_chunk *state, 175 156 const struct batch_vals *bv) 176 157 { 177 - u32 surface_start = gen7_fill_surface_state(state, bv->batch_size, bv); 158 + u32 surface_start = 159 + gen7_fill_surface_state(state, bv->surface_start, bv); 178 160 u32 *cs = batch_alloc_items(state, 32, 8); 179 161 u32 offset = batch_offset(state, cs); 180 162 ··· 234 214 gen7_emit_state_base_address(struct batch_chunk *batch, 235 215 u32 surface_state_base) 236 216 { 237 - u32 *cs = batch_alloc_items(batch, 0, 12); 217 + u32 *cs = batch_alloc_items(batch, 0, 10); 238 218 239 - *cs++ = STATE_BASE_ADDRESS | (12 - 2); 219 + *cs++ = STATE_BASE_ADDRESS | (10 - 2); 240 220 /* general */ 241 221 *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY; 242 222 /* surface */ ··· 253 233 *cs++ = BASE_ADDRESS_MODIFY; 254 234 *cs++ = 0; 255 235 *cs++ = BASE_ADDRESS_MODIFY; 256 - *cs++ = 0; 257 - *cs++ = 0; 258 236 batch_advance(batch, cs); 259 237 } 260 238 ··· 262 244 u32 urb_size, u32 curbe_size, 263 245 u32 mode) 264 246 { 265 - u32 urb_entries = bv->max_urb_entries; 266 - u32 threads = bv->max_primitives - 1; 247 + u32 threads = bv->max_threads - 1; 267 248 u32 *cs = batch_alloc_items(batch, 32, 8); 268 249 269 250 *cs++ = MEDIA_VFE_STATE | (8 - 2); ··· 271 254 *cs++ = 0; 272 255 273 256 /* number of threads & urb entries for GPGPU vs Media Mode */ 274 - *cs++ = threads << 16 | urb_entries << 8 | mode << 2; 257 + *cs++ = threads << 16 | 1 << 8 | mode << 2; 275 258 276 259 *cs++ = 0; 277 260 ··· 310 293 { 311 294 unsigned int x_offset = (media_object_index % 16) * 64; 312 295 unsigned int y_offset = (media_object_index / 16) * 16; 313 - unsigned int inline_data_size; 314 - unsigned int media_batch_size; 315 - unsigned int i; 296 + unsigned int pkt = 6 + 3; 316 297 u32 *cs; 317 298 318 - inline_data_size = 112 * 8; 319 - media_batch_size = inline_data_size + 6; 299 + cs = batch_alloc_items(batch, 8, pkt); 320 300 321 - cs = batch_alloc_items(batch, 8, media_batch_size); 322 - 323 - *cs++ = MEDIA_OBJECT | (media_batch_size - 2); 301 + *cs++ = MEDIA_OBJECT | (pkt - 2); 324 302 325 303 /* interface descriptor offset */ 326 304 *cs++ = 0; ··· 329 317 *cs++ = 0; 330 318 331 319 /* inline */ 332 - *cs++ = (y_offset << 16) | (x_offset); 320 + *cs++ = y_offset << 16 | x_offset; 333 321 *cs++ = 0; 334 322 *cs++ = GT3_INLINE_DATA_DELAYS; 335 - for (i = 3; i < inline_data_size; i++) 336 - *cs++ = 0; 337 323 338 324 batch_advance(batch, cs); 339 325 } 340 326 341 327 static void gen7_emit_pipeline_flush(struct batch_chunk *batch) 342 328 { 343 - u32 *cs = batch_alloc_items(batch, 0, 5); 329 + u32 *cs = batch_alloc_items(batch, 0, 4); 344 330 345 - *cs++ = GFX_OP_PIPE_CONTROL(5); 346 - *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE | 347 - PIPE_CONTROL_GLOBAL_GTT_IVB; 331 + *cs++ = GFX_OP_PIPE_CONTROL(4); 332 + *cs++ = PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | 333 + PIPE_CONTROL_DEPTH_CACHE_FLUSH | 334 + PIPE_CONTROL_DC_FLUSH_ENABLE | 335 + PIPE_CONTROL_CS_STALL; 348 336 *cs++ = 0; 349 337 *cs++ = 0; 338 + 339 + batch_advance(batch, cs); 340 + } 341 + 342 + static void gen7_emit_pipeline_invalidate(struct batch_chunk *batch) 343 + { 344 + u32 *cs = batch_alloc_items(batch, 0, 8); 345 + 346 + /* ivb: Stall before STATE_CACHE_INVALIDATE */ 347 + *cs++ = GFX_OP_PIPE_CONTROL(4); 348 + *cs++ = PIPE_CONTROL_STALL_AT_SCOREBOARD | 349 + PIPE_CONTROL_CS_STALL; 350 350 *cs++ = 0; 351 + *cs++ = 0; 352 + 353 + *cs++ = GFX_OP_PIPE_CONTROL(4); 354 + *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE; 355 + *cs++ = 0; 356 + *cs++ = 0; 357 + 351 358 batch_advance(batch, cs); 352 359 } 353 360 ··· 375 344 const struct batch_vals *bv) 376 345 { 377 346 struct drm_i915_private *i915 = vma->vm->i915; 378 - unsigned int desc_count = 64; 379 - const u32 urb_size = 112; 347 + const unsigned int desc_count = 1; 348 + const unsigned int urb_size = 1; 380 349 struct batch_chunk cmds, state; 381 - u32 interface_descriptor; 350 + u32 descriptors; 382 351 unsigned int i; 383 352 384 - batch_init(&cmds, vma, start, 0, bv->cmd_size); 385 - batch_init(&state, vma, start, bv->state_start, bv->state_size); 353 + batch_init(&cmds, vma, start, 0, bv->state_start); 354 + batch_init(&state, vma, start, bv->state_start, SZ_4K); 386 355 387 - interface_descriptor = 388 - gen7_fill_interface_descriptor(&state, bv, 389 - IS_HASWELL(i915) ? 390 - &cb_kernel_hsw : 391 - &cb_kernel_ivb, 392 - desc_count); 393 - gen7_emit_pipeline_flush(&cmds); 356 + descriptors = gen7_fill_interface_descriptor(&state, bv, 357 + IS_HASWELL(i915) ? 358 + &cb_kernel_hsw : 359 + &cb_kernel_ivb, 360 + desc_count); 361 + 362 + gen7_emit_pipeline_invalidate(&cmds); 394 363 batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA); 395 364 batch_add(&cmds, MI_NOOP); 396 - gen7_emit_state_base_address(&cmds, interface_descriptor); 365 + gen7_emit_pipeline_invalidate(&cmds); 366 + 397 367 gen7_emit_pipeline_flush(&cmds); 368 + gen7_emit_state_base_address(&cmds, descriptors); 369 + gen7_emit_pipeline_invalidate(&cmds); 398 370 399 371 gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0); 372 + gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count); 400 373 401 - gen7_emit_interface_descriptor_load(&cmds, 402 - interface_descriptor, 403 - desc_count); 404 - 405 - for (i = 0; i < bv->max_primitives; i++) 374 + for (i = 0; i < num_primitives(bv); i++) 406 375 gen7_emit_media_object(&cmds, i); 407 376 408 377 batch_add(&cmds, MI_BATCH_BUFFER_END); ··· 416 385 417 386 batch_get_defaults(engine->i915, &bv); 418 387 if (!vma) 419 - return bv.max_size; 388 + return bv.size; 420 389 421 - GEM_BUG_ON(vma->obj->base.size < bv.max_size); 390 + GEM_BUG_ON(vma->obj->base.size < bv.size); 422 391 423 392 batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC); 424 393 if (IS_ERR(batch)) 425 394 return PTR_ERR(batch); 426 395 427 - emit_batch(vma, memset(batch, 0, bv.max_size), &bv); 396 + emit_batch(vma, memset(batch, 0, bv.size), &bv); 428 397 429 398 i915_gem_object_flush_map(vma->obj); 430 399 __i915_gem_object_release_map(vma->obj);
+4 -2
drivers/gpu/drm/i915/gt/intel_ring_submission.c
··· 32 32 #include "gen6_ppgtt.h" 33 33 #include "gen7_renderclear.h" 34 34 #include "i915_drv.h" 35 + #include "i915_mitigations.h" 35 36 #include "intel_breadcrumbs.h" 36 37 #include "intel_context.h" 37 38 #include "intel_gt.h" ··· 887 886 GEM_BUG_ON(HAS_EXECLISTS(engine->i915)); 888 887 889 888 if (engine->wa_ctx.vma && ce != engine->kernel_context) { 890 - if (engine->wa_ctx.vma->private != ce) { 889 + if (engine->wa_ctx.vma->private != ce && 890 + i915_mitigate_clear_residuals()) { 891 891 ret = clear_residuals(rq); 892 892 if (ret) 893 893 return ret; ··· 1292 1290 1293 1291 GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma); 1294 1292 1295 - if (IS_HASWELL(engine->i915) && engine->class == RENDER_CLASS) { 1293 + if (IS_GEN(engine->i915, 7) && engine->class == RENDER_CLASS) { 1296 1294 err = gen7_ctx_switch_bb_init(engine); 1297 1295 if (err) 1298 1296 goto err_ring_unpin;
+59 -26
drivers/gpu/drm/i915/gvt/display.c
··· 217 217 DDI_BUF_CTL_ENABLE); 218 218 vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) |= DDI_BUF_IS_IDLE; 219 219 } 220 + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= 221 + ~(PORTA_HOTPLUG_ENABLE | PORTA_HOTPLUG_STATUS_MASK); 222 + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= 223 + ~(PORTB_HOTPLUG_ENABLE | PORTB_HOTPLUG_STATUS_MASK); 224 + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= 225 + ~(PORTC_HOTPLUG_ENABLE | PORTC_HOTPLUG_STATUS_MASK); 226 + /* No hpd_invert set in vgpu vbt, need to clear invert mask */ 227 + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= ~BXT_DDI_HPD_INVERT_MASK; 228 + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= ~BXT_DE_PORT_HOTPLUG_MASK; 220 229 221 230 vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= ~(BIT(0) | BIT(1)); 222 231 vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &= ··· 282 273 vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)) |= 283 274 (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | 284 275 TRANS_DDI_FUNC_ENABLE); 276 + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= 277 + PORTA_HOTPLUG_ENABLE; 285 278 vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= 286 279 GEN8_DE_PORT_HOTPLUG(HPD_PORT_A); 287 280 } ··· 312 301 (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | 313 302 (PORT_B << TRANS_DDI_PORT_SHIFT) | 314 303 TRANS_DDI_FUNC_ENABLE); 304 + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= 305 + PORTB_HOTPLUG_ENABLE; 315 306 vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= 316 307 GEN8_DE_PORT_HOTPLUG(HPD_PORT_B); 317 308 } ··· 342 329 (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | 343 330 (PORT_B << TRANS_DDI_PORT_SHIFT) | 344 331 TRANS_DDI_FUNC_ENABLE); 332 + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= 333 + PORTC_HOTPLUG_ENABLE; 345 334 vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= 346 335 GEN8_DE_PORT_HOTPLUG(HPD_PORT_C); 347 336 } ··· 676 661 PORTD_HOTPLUG_STATUS_MASK; 677 662 intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG); 678 663 } else if (IS_BROXTON(i915)) { 679 - if (connected) { 680 - if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { 664 + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { 665 + if (connected) { 681 666 vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= 682 667 GEN8_DE_PORT_HOTPLUG(HPD_PORT_A); 683 - } 684 - if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { 685 - vgpu_vreg_t(vgpu, SFUSE_STRAP) |= 686 - SFUSE_STRAP_DDIB_DETECTED; 687 - vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= 688 - GEN8_DE_PORT_HOTPLUG(HPD_PORT_B); 689 - } 690 - if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { 691 - vgpu_vreg_t(vgpu, SFUSE_STRAP) |= 692 - SFUSE_STRAP_DDIC_DETECTED; 693 - vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= 694 - GEN8_DE_PORT_HOTPLUG(HPD_PORT_C); 695 - } 696 - } else { 697 - if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { 668 + } else { 698 669 vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= 699 670 ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_A); 700 671 } 701 - if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { 702 - vgpu_vreg_t(vgpu, SFUSE_STRAP) &= 703 - ~SFUSE_STRAP_DDIB_DETECTED; 672 + vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |= 673 + GEN8_DE_PORT_HOTPLUG(HPD_PORT_A); 674 + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= 675 + ~PORTA_HOTPLUG_STATUS_MASK; 676 + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= 677 + PORTA_HOTPLUG_LONG_DETECT; 678 + intel_vgpu_trigger_virtual_event(vgpu, DP_A_HOTPLUG); 679 + } 680 + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { 681 + if (connected) { 682 + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= 683 + GEN8_DE_PORT_HOTPLUG(HPD_PORT_B); 684 + vgpu_vreg_t(vgpu, SFUSE_STRAP) |= 685 + SFUSE_STRAP_DDIB_DETECTED; 686 + } else { 704 687 vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= 705 688 ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_B); 706 - } 707 - if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { 708 689 vgpu_vreg_t(vgpu, SFUSE_STRAP) &= 709 - ~SFUSE_STRAP_DDIC_DETECTED; 690 + ~SFUSE_STRAP_DDIB_DETECTED; 691 + } 692 + vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |= 693 + GEN8_DE_PORT_HOTPLUG(HPD_PORT_B); 694 + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= 695 + ~PORTB_HOTPLUG_STATUS_MASK; 696 + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= 697 + PORTB_HOTPLUG_LONG_DETECT; 698 + intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG); 699 + } 700 + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { 701 + if (connected) { 702 + vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= 703 + GEN8_DE_PORT_HOTPLUG(HPD_PORT_C); 704 + vgpu_vreg_t(vgpu, SFUSE_STRAP) |= 705 + SFUSE_STRAP_DDIC_DETECTED; 706 + } else { 710 707 vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= 711 708 ~GEN8_DE_PORT_HOTPLUG(HPD_PORT_C); 709 + vgpu_vreg_t(vgpu, SFUSE_STRAP) &= 710 + ~SFUSE_STRAP_DDIC_DETECTED; 712 711 } 712 + vgpu_vreg_t(vgpu, GEN8_DE_PORT_IIR) |= 713 + GEN8_DE_PORT_HOTPLUG(HPD_PORT_C); 714 + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) &= 715 + ~PORTC_HOTPLUG_STATUS_MASK; 716 + vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= 717 + PORTC_HOTPLUG_LONG_DETECT; 718 + intel_vgpu_trigger_virtual_event(vgpu, DP_C_HOTPLUG); 713 719 } 714 - vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= 715 - PORTB_HOTPLUG_STATUS_MASK; 716 - intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG); 717 720 } 718 721 } 719 722
+2 -3
drivers/gpu/drm/i915/gvt/vgpu.c
··· 437 437 if (ret) 438 438 goto out_clean_sched_policy; 439 439 440 - if (IS_BROADWELL(dev_priv)) 440 + if (IS_BROADWELL(dev_priv) || IS_BROXTON(dev_priv)) 441 441 ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B); 442 - /* FixMe: Re-enable APL/BXT once vfio_edid enabled */ 443 - else if (!IS_BROXTON(dev_priv)) 442 + else 444 443 ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D); 445 444 if (ret) 446 445 goto out_clean_sched_policy;
+4
drivers/gpu/drm/i915/i915_drv.c
··· 1047 1047 1048 1048 void i915_driver_shutdown(struct drm_i915_private *i915) 1049 1049 { 1050 + disable_rpm_wakeref_asserts(&i915->runtime_pm); 1051 + 1050 1052 i915_gem_suspend(i915); 1051 1053 1052 1054 drm_kms_helper_poll_disable(&i915->drm); ··· 1062 1060 1063 1061 intel_suspend_encoders(i915); 1064 1062 intel_shutdown_encoders(i915); 1063 + 1064 + enable_rpm_wakeref_asserts(&i915->runtime_pm); 1065 1065 } 1066 1066 1067 1067 static bool suspend_to_idle(struct drm_i915_private *dev_priv)
+146
drivers/gpu/drm/i915/i915_mitigations.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2021 Intel Corporation 4 + */ 5 + 6 + #include <linux/kernel.h> 7 + #include <linux/moduleparam.h> 8 + #include <linux/slab.h> 9 + #include <linux/string.h> 10 + 11 + #include "i915_drv.h" 12 + #include "i915_mitigations.h" 13 + 14 + static unsigned long mitigations __read_mostly = ~0UL; 15 + 16 + enum { 17 + CLEAR_RESIDUALS = 0, 18 + }; 19 + 20 + static const char * const names[] = { 21 + [CLEAR_RESIDUALS] = "residuals", 22 + }; 23 + 24 + bool i915_mitigate_clear_residuals(void) 25 + { 26 + return READ_ONCE(mitigations) & BIT(CLEAR_RESIDUALS); 27 + } 28 + 29 + static int mitigations_set(const char *val, const struct kernel_param *kp) 30 + { 31 + unsigned long new = ~0UL; 32 + char *str, *sep, *tok; 33 + bool first = true; 34 + int err = 0; 35 + 36 + BUILD_BUG_ON(ARRAY_SIZE(names) >= BITS_PER_TYPE(mitigations)); 37 + 38 + str = kstrdup(val, GFP_KERNEL); 39 + if (!str) 40 + return -ENOMEM; 41 + 42 + for (sep = str; (tok = strsep(&sep, ","));) { 43 + bool enable = true; 44 + int i; 45 + 46 + /* Be tolerant of leading/trailing whitespace */ 47 + tok = strim(tok); 48 + 49 + if (first) { 50 + first = false; 51 + 52 + if (!strcmp(tok, "auto")) 53 + continue; 54 + 55 + new = 0; 56 + if (!strcmp(tok, "off")) 57 + continue; 58 + } 59 + 60 + if (*tok == '!') { 61 + enable = !enable; 62 + tok++; 63 + } 64 + 65 + if (!strncmp(tok, "no", 2)) { 66 + enable = !enable; 67 + tok += 2; 68 + } 69 + 70 + if (*tok == '\0') 71 + continue; 72 + 73 + for (i = 0; i < ARRAY_SIZE(names); i++) { 74 + if (!strcmp(tok, names[i])) { 75 + if (enable) 76 + new |= BIT(i); 77 + else 78 + new &= ~BIT(i); 79 + break; 80 + } 81 + } 82 + if (i == ARRAY_SIZE(names)) { 83 + pr_err("Bad \"%s.mitigations=%s\", '%s' is unknown\n", 84 + DRIVER_NAME, val, tok); 85 + err = -EINVAL; 86 + break; 87 + } 88 + } 89 + kfree(str); 90 + if (err) 91 + return err; 92 + 93 + WRITE_ONCE(mitigations, new); 94 + return 0; 95 + } 96 + 97 + static int mitigations_get(char *buffer, const struct kernel_param *kp) 98 + { 99 + unsigned long local = READ_ONCE(mitigations); 100 + int count, i; 101 + bool enable; 102 + 103 + if (!local) 104 + return scnprintf(buffer, PAGE_SIZE, "%s\n", "off"); 105 + 106 + if (local & BIT(BITS_PER_LONG - 1)) { 107 + count = scnprintf(buffer, PAGE_SIZE, "%s,", "auto"); 108 + enable = false; 109 + } else { 110 + enable = true; 111 + count = 0; 112 + } 113 + 114 + for (i = 0; i < ARRAY_SIZE(names); i++) { 115 + if ((local & BIT(i)) != enable) 116 + continue; 117 + 118 + count += scnprintf(buffer + count, PAGE_SIZE - count, 119 + "%s%s,", enable ? "" : "!", names[i]); 120 + } 121 + 122 + buffer[count - 1] = '\n'; 123 + return count; 124 + } 125 + 126 + static const struct kernel_param_ops ops = { 127 + .set = mitigations_set, 128 + .get = mitigations_get, 129 + }; 130 + 131 + module_param_cb_unsafe(mitigations, &ops, NULL, 0600); 132 + MODULE_PARM_DESC(mitigations, 133 + "Selectively enable security mitigations for all Intel® GPUs in the system.\n" 134 + "\n" 135 + " auto -- enables all mitigations required for the platform [default]\n" 136 + " off -- disables all mitigations\n" 137 + "\n" 138 + "Individual mitigations can be enabled by passing a comma-separated string,\n" 139 + "e.g. mitigations=residuals to enable only clearing residuals or\n" 140 + "mitigations=auto,noresiduals to disable only the clear residual mitigation.\n" 141 + "Either '!' or 'no' may be used to switch from enabling the mitigation to\n" 142 + "disabling it.\n" 143 + "\n" 144 + "Active mitigations for Ivybridge, Baytrail, Haswell:\n" 145 + " residuals -- clear all thread-local registers between contexts" 146 + );
+13
drivers/gpu/drm/i915/i915_mitigations.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2021 Intel Corporation 4 + */ 5 + 6 + #ifndef __I915_MITIGATIONS_H__ 7 + #define __I915_MITIGATIONS_H__ 8 + 9 + #include <linux/types.h> 10 + 11 + bool i915_mitigate_clear_residuals(void); 12 + 13 + #endif /* __I915_MITIGATIONS_H__ */
+2 -2
drivers/gpu/drm/nouveau/dispnv50/disp.c
··· 222 222 223 223 int 224 224 nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp, 225 - const s32 *oclass, u8 head, void *data, u32 size, u64 syncbuf, 225 + const s32 *oclass, u8 head, void *data, u32 size, s64 syncbuf, 226 226 struct nv50_dmac *dmac) 227 227 { 228 228 struct nouveau_cli *cli = (void *)device->object.client; ··· 271 271 if (ret) 272 272 return ret; 273 273 274 - if (!syncbuf) 274 + if (syncbuf < 0) 275 275 return 0; 276 276 277 277 ret = nvif_object_ctor(&dmac->base.user, "kmsSyncCtxDma", NV50_DISP_HANDLE_SYNCBUF,
+1 -1
drivers/gpu/drm/nouveau/dispnv50/disp.h
··· 95 95 96 96 int nv50_dmac_create(struct nvif_device *device, struct nvif_object *disp, 97 97 const s32 *oclass, u8 head, void *data, u32 size, 98 - u64 syncbuf, struct nv50_dmac *dmac); 98 + s64 syncbuf, struct nv50_dmac *dmac); 99 99 void nv50_dmac_destroy(struct nv50_dmac *); 100 100 101 101 /*
+1 -1
drivers/gpu/drm/nouveau/dispnv50/wimmc37b.c
··· 76 76 int ret; 77 77 78 78 ret = nv50_dmac_create(&drm->client.device, &disp->disp->object, 79 - &oclass, 0, &args, sizeof(args), 0, 79 + &oclass, 0, &args, sizeof(args), -1, 80 80 &wndw->wimm); 81 81 if (ret) { 82 82 NV_ERROR(drm, "wimm%04x allocation failed: %d\n", oclass, ret);
+1
drivers/gpu/drm/nouveau/include/nvkm/subdev/i2c.h
··· 92 92 int gf117_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **); 93 93 int gf119_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **); 94 94 int gk104_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **); 95 + int gk110_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **); 95 96 int gm200_i2c_new(struct nvkm_device *, int, struct nvkm_i2c **); 96 97 97 98 static inline int
+6 -6
drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
··· 1815 1815 .fb = gk110_fb_new, 1816 1816 .fuse = gf100_fuse_new, 1817 1817 .gpio = gk104_gpio_new, 1818 - .i2c = gk104_i2c_new, 1818 + .i2c = gk110_i2c_new, 1819 1819 .ibus = gk104_ibus_new, 1820 1820 .iccsense = gf100_iccsense_new, 1821 1821 .imem = nv50_instmem_new, ··· 1853 1853 .fb = gk110_fb_new, 1854 1854 .fuse = gf100_fuse_new, 1855 1855 .gpio = gk104_gpio_new, 1856 - .i2c = gk104_i2c_new, 1856 + .i2c = gk110_i2c_new, 1857 1857 .ibus = gk104_ibus_new, 1858 1858 .iccsense = gf100_iccsense_new, 1859 1859 .imem = nv50_instmem_new, ··· 1891 1891 .fb = gk110_fb_new, 1892 1892 .fuse = gf100_fuse_new, 1893 1893 .gpio = gk104_gpio_new, 1894 - .i2c = gk104_i2c_new, 1894 + .i2c = gk110_i2c_new, 1895 1895 .ibus = gk104_ibus_new, 1896 1896 .iccsense = gf100_iccsense_new, 1897 1897 .imem = nv50_instmem_new, ··· 1929 1929 .fb = gk110_fb_new, 1930 1930 .fuse = gf100_fuse_new, 1931 1931 .gpio = gk104_gpio_new, 1932 - .i2c = gk104_i2c_new, 1932 + .i2c = gk110_i2c_new, 1933 1933 .ibus = gk104_ibus_new, 1934 1934 .iccsense = gf100_iccsense_new, 1935 1935 .imem = nv50_instmem_new, ··· 1967 1967 .fb = gm107_fb_new, 1968 1968 .fuse = gm107_fuse_new, 1969 1969 .gpio = gk104_gpio_new, 1970 - .i2c = gk104_i2c_new, 1970 + .i2c = gk110_i2c_new, 1971 1971 .ibus = gk104_ibus_new, 1972 1972 .iccsense = gf100_iccsense_new, 1973 1973 .imem = nv50_instmem_new, ··· 2003 2003 .fb = gm107_fb_new, 2004 2004 .fuse = gm107_fuse_new, 2005 2005 .gpio = gk104_gpio_new, 2006 - .i2c = gk104_i2c_new, 2006 + .i2c = gk110_i2c_new, 2007 2007 .ibus = gk104_ibus_new, 2008 2008 .iccsense = gf100_iccsense_new, 2009 2009 .imem = nv50_instmem_new,
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
··· 75 75 nvkm_debug(subdev, "%08x: type %02x, %d bytes\n", 76 76 image.base, image.type, image.size); 77 77 78 - if (!shadow_fetch(bios, mthd, image.size)) { 78 + if (!shadow_fetch(bios, mthd, image.base + image.size)) { 79 79 nvkm_debug(subdev, "%08x: fetch failed\n", image.base); 80 80 return 0; 81 81 }
+1
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/Kbuild
··· 7 7 nvkm-y += nvkm/subdev/i2c/gf117.o 8 8 nvkm-y += nvkm/subdev/i2c/gf119.o 9 9 nvkm-y += nvkm/subdev/i2c/gk104.o 10 + nvkm-y += nvkm/subdev/i2c/gk110.o 10 11 nvkm-y += nvkm/subdev/i2c/gm200.o 11 12 12 13 nvkm-y += nvkm/subdev/i2c/pad.o
+7
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/aux.h
··· 3 3 #define __NVKM_I2C_AUX_H__ 4 4 #include "pad.h" 5 5 6 + static inline void 7 + nvkm_i2c_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable) 8 + { 9 + if (i2c->func->aux_autodpcd) 10 + i2c->func->aux_autodpcd(i2c, aux, false); 11 + } 12 + 6 13 struct nvkm_i2c_aux_func { 7 14 bool address_only; 8 15 int (*xfer)(struct nvkm_i2c_aux *, bool retry, u8 type,
+7 -3
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxg94.c
··· 77 77 u8 type, u32 addr, u8 *data, u8 *size) 78 78 { 79 79 struct g94_i2c_aux *aux = g94_i2c_aux(obj); 80 - struct nvkm_device *device = aux->base.pad->i2c->subdev.device; 80 + struct nvkm_i2c *i2c = aux->base.pad->i2c; 81 + struct nvkm_device *device = i2c->subdev.device; 81 82 const u32 base = aux->ch * 0x50; 82 83 u32 ctrl, stat, timeout, retries = 0; 83 84 u32 xbuf[4] = {}; ··· 96 95 ret = -ENXIO; 97 96 goto out; 98 97 } 98 + 99 + nvkm_i2c_aux_autodpcd(i2c, aux->ch, false); 99 100 100 101 if (!(type & 1)) { 101 102 memcpy(xbuf, data, *size); ··· 131 128 if (!timeout--) { 132 129 AUX_ERR(&aux->base, "timeout %08x", ctrl); 133 130 ret = -EIO; 134 - goto out; 131 + goto out_err; 135 132 } 136 133 } while (ctrl & 0x00010000); 137 134 ret = 0; ··· 157 154 memcpy(data, xbuf, *size); 158 155 *size = stat & 0x0000001f; 159 156 } 160 - 157 + out_err: 158 + nvkm_i2c_aux_autodpcd(i2c, aux->ch, true); 161 159 out: 162 160 g94_i2c_aux_fini(aux); 163 161 return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
+11 -6
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/auxgm200.c
··· 33 33 gm200_i2c_aux_fini(struct gm200_i2c_aux *aux) 34 34 { 35 35 struct nvkm_device *device = aux->base.pad->i2c->subdev.device; 36 - nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00310000, 0x00000000); 36 + nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00710000, 0x00000000); 37 37 } 38 38 39 39 static int ··· 54 54 AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl); 55 55 return -EBUSY; 56 56 } 57 - } while (ctrl & 0x03010000); 57 + } while (ctrl & 0x07010000); 58 58 59 59 /* set some magic, and wait up to 1ms for it to appear */ 60 - nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00300000, ureq); 60 + nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00700000, ureq); 61 61 timeout = 1000; 62 62 do { 63 63 ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50)); ··· 67 67 gm200_i2c_aux_fini(aux); 68 68 return -EBUSY; 69 69 } 70 - } while ((ctrl & 0x03000000) != urep); 70 + } while ((ctrl & 0x07000000) != urep); 71 71 72 72 return 0; 73 73 } ··· 77 77 u8 type, u32 addr, u8 *data, u8 *size) 78 78 { 79 79 struct gm200_i2c_aux *aux = gm200_i2c_aux(obj); 80 - struct nvkm_device *device = aux->base.pad->i2c->subdev.device; 80 + struct nvkm_i2c *i2c = aux->base.pad->i2c; 81 + struct nvkm_device *device = i2c->subdev.device; 81 82 const u32 base = aux->ch * 0x50; 82 83 u32 ctrl, stat, timeout, retries = 0; 83 84 u32 xbuf[4] = {}; ··· 96 95 ret = -ENXIO; 97 96 goto out; 98 97 } 98 + 99 + nvkm_i2c_aux_autodpcd(i2c, aux->ch, false); 99 100 100 101 if (!(type & 1)) { 101 102 memcpy(xbuf, data, *size); ··· 131 128 if (!timeout--) { 132 129 AUX_ERR(&aux->base, "timeout %08x", ctrl); 133 130 ret = -EIO; 134 - goto out; 131 + goto out_err; 135 132 } 136 133 } while (ctrl & 0x00010000); 137 134 ret = 0; ··· 158 155 *size = stat & 0x0000001f; 159 156 } 160 157 158 + out_err: 159 + nvkm_i2c_aux_autodpcd(i2c, aux->ch, true); 161 160 out: 162 161 gm200_i2c_aux_fini(aux); 163 162 return ret < 0 ? ret : (stat & 0x000f0000) >> 16;
+45
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gk110.c
··· 1 + /* 2 + * Copyright 2021 Red Hat Inc. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice shall be included in 12 + * all copies or substantial portions of the Software. 13 + * 14 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 + * OTHER DEALINGS IN THE SOFTWARE. 21 + */ 22 + #include "priv.h" 23 + #include "pad.h" 24 + 25 + static void 26 + gk110_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable) 27 + { 28 + nvkm_mask(i2c->subdev.device, 0x00e4f8 + (aux * 0x50), 0x00010000, enable << 16); 29 + } 30 + 31 + static const struct nvkm_i2c_func 32 + gk110_i2c = { 33 + .pad_x_new = gf119_i2c_pad_x_new, 34 + .pad_s_new = gf119_i2c_pad_s_new, 35 + .aux = 4, 36 + .aux_stat = gk104_aux_stat, 37 + .aux_mask = gk104_aux_mask, 38 + .aux_autodpcd = gk110_aux_autodpcd, 39 + }; 40 + 41 + int 42 + gk110_i2c_new(struct nvkm_device *device, int index, struct nvkm_i2c **pi2c) 43 + { 44 + return nvkm_i2c_new_(&gk110_i2c, device, index, pi2c); 45 + }
+7
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/gm200.c
··· 24 24 #include "priv.h" 25 25 #include "pad.h" 26 26 27 + static void 28 + gm200_aux_autodpcd(struct nvkm_i2c *i2c, int aux, bool enable) 29 + { 30 + nvkm_mask(i2c->subdev.device, 0x00d968 + (aux * 0x50), 0x00010000, enable << 16); 31 + } 32 + 27 33 static const struct nvkm_i2c_func 28 34 gm200_i2c = { 29 35 .pad_x_new = gf119_i2c_pad_x_new, ··· 37 31 .aux = 8, 38 32 .aux_stat = gk104_aux_stat, 39 33 .aux_mask = gk104_aux_mask, 34 + .aux_autodpcd = gm200_aux_autodpcd, 40 35 }; 41 36 42 37 int
+1 -1
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/pad.h
··· 1 1 /* SPDX-License-Identifier: MIT */ 2 2 #ifndef __NVKM_I2C_PAD_H__ 3 3 #define __NVKM_I2C_PAD_H__ 4 - #include <subdev/i2c.h> 4 + #include "priv.h" 5 5 6 6 struct nvkm_i2c_pad { 7 7 const struct nvkm_i2c_pad_func *func;
+4
drivers/gpu/drm/nouveau/nvkm/subdev/i2c/priv.h
··· 23 23 /* mask on/off interrupt types for a given set of auxch 24 24 */ 25 25 void (*aux_mask)(struct nvkm_i2c *, u32, u32, u32); 26 + 27 + /* enable/disable HW-initiated DPCD reads 28 + */ 29 + void (*aux_autodpcd)(struct nvkm_i2c *, int aux, bool enable); 26 30 }; 27 31 28 32 void g94_aux_stat(struct nvkm_i2c *, u32 *, u32 *, u32 *, u32 *);
+7 -3
drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gf100.c
··· 22 22 * Authors: Ben Skeggs 23 23 */ 24 24 #include "priv.h" 25 + #include <subdev/timer.h> 25 26 26 27 static void 27 28 gf100_ibus_intr_hub(struct nvkm_subdev *ibus, int i) ··· 32 31 u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0400)); 33 32 u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0400)); 34 33 nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat); 35 - nvkm_mask(device, 0x122128 + (i * 0x0400), 0x00000200, 0x00000000); 36 34 } 37 35 38 36 static void ··· 42 42 u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0400)); 43 43 u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0400)); 44 44 nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat); 45 - nvkm_mask(device, 0x124128 + (i * 0x0400), 0x00000200, 0x00000000); 46 45 } 47 46 48 47 static void ··· 52 53 u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400)); 53 54 u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400)); 54 55 nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat); 55 - nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000); 56 56 } 57 57 58 58 void ··· 88 90 intr1 &= ~stat; 89 91 } 90 92 } 93 + 94 + nvkm_mask(device, 0x121c4c, 0x0000003f, 0x00000002); 95 + nvkm_msec(device, 2000, 96 + if (!(nvkm_rd32(device, 0x121c4c) & 0x0000003f)) 97 + break; 98 + ); 91 99 } 92 100 93 101 static int
+7 -3
drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk104.c
··· 22 22 * Authors: Ben Skeggs 23 23 */ 24 24 #include "priv.h" 25 + #include <subdev/timer.h> 25 26 26 27 static void 27 28 gk104_ibus_intr_hub(struct nvkm_subdev *ibus, int i) ··· 32 31 u32 data = nvkm_rd32(device, 0x122124 + (i * 0x0800)); 33 32 u32 stat = nvkm_rd32(device, 0x122128 + (i * 0x0800)); 34 33 nvkm_debug(ibus, "HUB%d: %06x %08x (%08x)\n", i, addr, data, stat); 35 - nvkm_mask(device, 0x122128 + (i * 0x0800), 0x00000200, 0x00000000); 36 34 } 37 35 38 36 static void ··· 42 42 u32 data = nvkm_rd32(device, 0x124124 + (i * 0x0800)); 43 43 u32 stat = nvkm_rd32(device, 0x124128 + (i * 0x0800)); 44 44 nvkm_debug(ibus, "ROP%d: %06x %08x (%08x)\n", i, addr, data, stat); 45 - nvkm_mask(device, 0x124128 + (i * 0x0800), 0x00000200, 0x00000000); 46 45 } 47 46 48 47 static void ··· 52 53 u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0800)); 53 54 u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0800)); 54 55 nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat); 55 - nvkm_mask(device, 0x128128 + (i * 0x0800), 0x00000200, 0x00000000); 56 56 } 57 57 58 58 void ··· 88 90 intr1 &= ~stat; 89 91 } 90 92 } 93 + 94 + nvkm_mask(device, 0x12004c, 0x0000003f, 0x00000002); 95 + nvkm_msec(device, 2000, 96 + if (!(nvkm_rd32(device, 0x12004c) & 0x0000003f)) 97 + break; 98 + ); 91 99 } 92 100 93 101 static int
+3 -3
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c
··· 316 316 { 317 317 struct nvkm_device *device = mmu->subdev.device; 318 318 struct nvkm_mm *mm = &device->fb->ram->vram; 319 - const u32 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL); 320 - const u32 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP); 321 - const u32 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED); 319 + const u64 sizeN = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NORMAL); 320 + const u64 sizeU = nvkm_mm_heap_size(mm, NVKM_RAM_MM_NOMAP); 321 + const u64 sizeM = nvkm_mm_heap_size(mm, NVKM_RAM_MM_MIXED); 322 322 u8 type = NVKM_MEM_KIND * !!mmu->func->kind; 323 323 u8 heap = NVKM_MEM_VRAM; 324 324 int heapM, heapN, heapU;
+11 -11
drivers/gpu/drm/ttm/ttm_pool.c
··· 66 66 static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER]; 67 67 static struct ttm_pool_type global_dma32_uncached[MAX_ORDER]; 68 68 69 - static spinlock_t shrinker_lock; 69 + static struct mutex shrinker_lock; 70 70 static struct list_head shrinker_list; 71 71 static struct shrinker mm_shrinker; 72 72 ··· 190 190 size_t size = (1ULL << order) * PAGE_SIZE; 191 191 192 192 addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL); 193 - if (dma_mapping_error(pool->dev, **dma_addr)) 193 + if (dma_mapping_error(pool->dev, addr)) 194 194 return -EFAULT; 195 195 } 196 196 ··· 249 249 spin_lock_init(&pt->lock); 250 250 INIT_LIST_HEAD(&pt->pages); 251 251 252 - spin_lock(&shrinker_lock); 252 + mutex_lock(&shrinker_lock); 253 253 list_add_tail(&pt->shrinker_list, &shrinker_list); 254 - spin_unlock(&shrinker_lock); 254 + mutex_unlock(&shrinker_lock); 255 255 } 256 256 257 257 /* Remove a pool_type from the global shrinker list and free all pages */ ··· 259 259 { 260 260 struct page *p, *tmp; 261 261 262 - spin_lock(&shrinker_lock); 262 + mutex_lock(&shrinker_lock); 263 263 list_del(&pt->shrinker_list); 264 - spin_unlock(&shrinker_lock); 264 + mutex_unlock(&shrinker_lock); 265 265 266 266 list_for_each_entry_safe(p, tmp, &pt->pages, lru) 267 267 ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); ··· 302 302 unsigned int num_freed; 303 303 struct page *p; 304 304 305 - spin_lock(&shrinker_lock); 305 + mutex_lock(&shrinker_lock); 306 306 pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list); 307 307 308 308 p = ttm_pool_type_take(pt); ··· 314 314 } 315 315 316 316 list_move_tail(&pt->shrinker_list, &shrinker_list); 317 - spin_unlock(&shrinker_lock); 317 + mutex_unlock(&shrinker_lock); 318 318 319 319 return num_freed; 320 320 } ··· 564 564 { 565 565 unsigned int i; 566 566 567 - spin_lock(&shrinker_lock); 567 + mutex_lock(&shrinker_lock); 568 568 569 569 seq_puts(m, "\t "); 570 570 for (i = 0; i < MAX_ORDER; ++i) ··· 600 600 seq_printf(m, "\ntotal\t: %8lu of %8lu\n", 601 601 atomic_long_read(&allocated_pages), page_pool_size); 602 602 603 - spin_unlock(&shrinker_lock); 603 + mutex_unlock(&shrinker_lock); 604 604 605 605 return 0; 606 606 } ··· 644 644 if (!page_pool_size) 645 645 page_pool_size = num_pages; 646 646 647 - spin_lock_init(&shrinker_lock); 647 + mutex_init(&shrinker_lock); 648 648 INIT_LIST_HEAD(&shrinker_list); 649 649 650 650 for (i = 0; i < MAX_ORDER; ++i) {