Merge tag 'drm-fixes-2025-12-27' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
"Post overeating fixes, only msm for this week has anything, so quiet
as expected.

msm:
- GPU:
- Fix crash on a7xx GPUs not supporting IFPC
- Fix perfcntr use with IFPC
- Concurrent binning fix
- DPU:
- Fixed DSC and SSPP fetching issues
- Switched to scnprint instead of snprintf
- Added missing NULL checks in pingpong code"

* tag 'drm-fixes-2025-12-27' of https://gitlab.freedesktop.org/drm/kernel: (27 commits)
drm/msm: Replace unsafe snprintf usage with scnprintf
drm/msm/dpu: Add missing NULL pointer check for pingpong interface
Revert "drm/msm/dpu: Enable quad-pipe for DSC and dual-DSI case"
Revert "drm/msm/dpu: support plane splitting in quad-pipe case"
drm/msm: msm_iommu.c: fix all kernel-doc warnings
drm/msm: msm_gpu.h: fix all kernel-doc warnings
drm/msm: msm_gem_vma.c: fix all kernel-doc warnings
drm/msm: msm_fence.h: fix all kernel-doc warnings
drm/msm/dpu: dpu_hw_wb.h: fix all kernel-doc warnings
drm/msm/dpu: dpu_hw_vbif.h: fix all kernel-doc warnings
drm/msm/dpu: dpu_hw_top.h: fix all kernel-doc warnings
drm/msm/dpu: dpu_hw_sspp.h: fix all kernel-doc warnings
drm/msm/dpu: dpu_hw_pingpong.h: fix all kernel-doc warnings
drm/msm/dpu: dpu_hw_merge3d.h: fix all kernel-doc warnings
drm/msm/dpu: dpu_hw_lm.h: fix all kernel-doc warnings
drm/msm/dpu: dpu_hw_intf.h: fix all kernel-doc warnings
drm/msm/dpu: dpu_hw_dspp.h: fix all kernel-doc warnings
drm/msm/dpu: dpu_hw_dsc.h: fix all kernel-doc warnings
drm/msm/dpu: dpu_hw_cwb.h: fix all kernel-doc warnings
drm/msm/dpu: dpu_hw_ctl.h: fix all kernel-doc warnings
...

+348 -373
+12 -1
drivers/gpu/drm/msm/adreno/a6xx_catalog.c
··· 1376 1376 REG_A6XX_UCHE_MODE_CNTL, 1377 1377 REG_A6XX_RB_NC_MODE_CNTL, 1378 1378 REG_A6XX_RB_CMP_DBG_ECO_CNTL, 1379 - REG_A7XX_GRAS_NC_MODE_CNTL, 1380 1379 REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE_ENABLE, 1381 1380 REG_A6XX_UCHE_GBIF_GX_CONFIG, 1382 1381 REG_A6XX_UCHE_CLIENT_PF, ··· 1391 1392 REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(2), 1392 1393 REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(3), 1393 1394 REG_A6XX_TPL1_BICUBIC_WEIGHTS_TABLE(4), 1395 + REG_A6XX_RBBM_PERFCTR_CNTL, 1394 1396 REG_A6XX_TPL1_NC_MODE_CNTL, 1395 1397 REG_A6XX_SP_NC_MODE_CNTL, 1396 1398 REG_A6XX_CP_DBG_ECO_CNTL, ··· 1448 1448 1449 1449 DECLARE_ADRENO_REGLIST_LIST(a750_ifpc_reglist); 1450 1450 1451 + static const struct adreno_reglist_pipe a7xx_dyn_pwrup_reglist_regs[] = { 1452 + { REG_A7XX_GRAS_NC_MODE_CNTL, 0, BIT(PIPE_BV) | BIT(PIPE_BR) }, 1453 + }; 1454 + 1455 + DECLARE_ADRENO_REGLIST_PIPE_LIST(a7xx_dyn_pwrup_reglist); 1456 + 1451 1457 static const struct adreno_info a7xx_gpus[] = { 1452 1458 { 1453 1459 .chip_ids = ADRENO_CHIP_IDS(0x07000200), ··· 1497 1491 .hwcg = a730_hwcg, 1498 1492 .protect = &a730_protect, 1499 1493 .pwrup_reglist = &a7xx_pwrup_reglist, 1494 + .dyn_pwrup_reglist = &a7xx_dyn_pwrup_reglist, 1500 1495 .gbif_cx = a640_gbif, 1501 1496 .gmu_cgc_mode = 0x00020000, 1502 1497 }, ··· 1520 1513 .hwcg = a740_hwcg, 1521 1514 .protect = &a730_protect, 1522 1515 .pwrup_reglist = &a7xx_pwrup_reglist, 1516 + .dyn_pwrup_reglist = &a7xx_dyn_pwrup_reglist, 1523 1517 .gbif_cx = a640_gbif, 1524 1518 .gmu_chipid = 0x7020100, 1525 1519 .gmu_cgc_mode = 0x00020202, ··· 1555 1547 .hwcg = a740_hwcg, 1556 1548 .protect = &a730_protect, 1557 1549 .pwrup_reglist = &a7xx_pwrup_reglist, 1550 + .dyn_pwrup_reglist = &a7xx_dyn_pwrup_reglist, 1558 1551 .ifpc_reglist = &a750_ifpc_reglist, 1559 1552 .gbif_cx = a640_gbif, 1560 1553 .gmu_chipid = 0x7050001, ··· 1598 1589 .a6xx = &(const struct a6xx_info) { 1599 1590 .protect = &a730_protect, 1600 1591 .pwrup_reglist = &a7xx_pwrup_reglist, 1592 + .dyn_pwrup_reglist = &a7xx_dyn_pwrup_reglist, 1601 1593 .ifpc_reglist = &a750_ifpc_reglist, 1602 1594 .gbif_cx = a640_gbif, 1603 1595 .gmu_chipid = 0x7090100, ··· 1633 1623 .hwcg = a740_hwcg, 1634 1624 .protect = &a730_protect, 1635 1625 .pwrup_reglist = &a7xx_pwrup_reglist, 1626 + .dyn_pwrup_reglist = &a7xx_dyn_pwrup_reglist, 1636 1627 .gbif_cx = a640_gbif, 1637 1628 .gmu_chipid = 0x70f0000, 1638 1629 .gmu_cgc_mode = 0x00020222,
+40 -12
drivers/gpu/drm/msm/adreno/a6xx_gpu.c
··· 849 849 min_acc_len_64b << 3 | 850 850 hbb_lo << 1 | ubwc_mode); 851 851 852 - if (adreno_is_a7xx(adreno_gpu)) 853 - gpu_write(gpu, REG_A7XX_GRAS_NC_MODE_CNTL, 854 - FIELD_PREP(GENMASK(8, 5), hbb_lo)); 852 + if (adreno_is_a7xx(adreno_gpu)) { 853 + for (u32 pipe_id = PIPE_BR; pipe_id <= PIPE_BV; pipe_id++) { 854 + gpu_write(gpu, REG_A7XX_CP_APERTURE_CNTL_HOST, 855 + A7XX_CP_APERTURE_CNTL_HOST_PIPE(pipe_id)); 856 + gpu_write(gpu, REG_A7XX_GRAS_NC_MODE_CNTL, 857 + FIELD_PREP(GENMASK(8, 5), hbb_lo)); 858 + } 859 + gpu_write(gpu, REG_A7XX_CP_APERTURE_CNTL_HOST, 860 + A7XX_CP_APERTURE_CNTL_HOST_PIPE(PIPE_NONE)); 861 + } 855 862 856 863 gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 857 864 min_acc_len_64b << 23 | hbb_lo << 21); ··· 872 865 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); 873 866 struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu); 874 867 const struct adreno_reglist_list *reglist; 868 + const struct adreno_reglist_pipe_list *dyn_pwrup_reglist; 875 869 void *ptr = a6xx_gpu->pwrup_reglist_ptr; 876 870 struct cpu_gpu_lock *lock = ptr; 877 871 u32 *dest = (u32 *)&lock->regs[0]; 872 + u32 dyn_pwrup_reglist_count = 0; 878 873 int i; 879 874 880 875 lock->gpu_req = lock->cpu_req = lock->turn = 0; 881 876 882 877 reglist = adreno_gpu->info->a6xx->ifpc_reglist; 883 - lock->ifpc_list_len = reglist->count; 878 + if (reglist) { 879 + lock->ifpc_list_len = reglist->count; 884 880 885 - /* 886 - * For each entry in each of the lists, write the offset and the current 887 - * register value into the GPU buffer 888 - */ 889 - for (i = 0; i < reglist->count; i++) { 890 - *dest++ = reglist->regs[i]; 891 - *dest++ = gpu_read(gpu, reglist->regs[i]); 881 + /* 882 + * For each entry in each of the lists, write the offset and the current 883 + * register value into the GPU buffer 884 + */ 885 + for (i = 0; i < reglist->count; i++) { 886 + *dest++ = reglist->regs[i]; 887 + *dest++ = gpu_read(gpu, reglist->regs[i]); 888 + } 892 889 } 893 890 894 891 reglist = adreno_gpu->info->a6xx->pwrup_reglist; ··· 918 907 * (<aperture, shifted 12 bits> <address> <data>), and the length is 919 908 * stored as number for triplets in dynamic_list_len. 920 909 */ 921 - lock->dynamic_list_len = 0; 910 + dyn_pwrup_reglist = adreno_gpu->info->a6xx->dyn_pwrup_reglist; 911 + if (dyn_pwrup_reglist) { 912 + for (u32 pipe_id = PIPE_BR; pipe_id <= PIPE_BV; pipe_id++) { 913 + gpu_write(gpu, REG_A7XX_CP_APERTURE_CNTL_HOST, 914 + A7XX_CP_APERTURE_CNTL_HOST_PIPE(pipe_id)); 915 + for (i = 0; i < dyn_pwrup_reglist->count; i++) { 916 + if ((dyn_pwrup_reglist->regs[i].pipe & BIT(pipe_id)) == 0) 917 + continue; 918 + *dest++ = A7XX_CP_APERTURE_CNTL_HOST_PIPE(pipe_id); 919 + *dest++ = dyn_pwrup_reglist->regs[i].offset; 920 + *dest++ = gpu_read(gpu, dyn_pwrup_reglist->regs[i].offset); 921 + dyn_pwrup_reglist_count++; 922 + } 923 + } 924 + gpu_write(gpu, REG_A7XX_CP_APERTURE_CNTL_HOST, 925 + A7XX_CP_APERTURE_CNTL_HOST_PIPE(PIPE_NONE)); 926 + } 927 + lock->dynamic_list_len = dyn_pwrup_reglist_count; 922 928 } 923 929 924 930 static int a7xx_preempt_start(struct msm_gpu *gpu)
+1
drivers/gpu/drm/msm/adreno/a6xx_gpu.h
··· 45 45 const struct adreno_reglist *hwcg; 46 46 const struct adreno_protect *protect; 47 47 const struct adreno_reglist_list *pwrup_reglist; 48 + const struct adreno_reglist_pipe_list *dyn_pwrup_reglist; 48 49 const struct adreno_reglist_list *ifpc_reglist; 49 50 const struct adreno_reglist *gbif_cx; 50 51 const struct adreno_reglist_pipe *nonctxt_reglist;
+2 -2
drivers/gpu/drm/msm/adreno/a6xx_preempt.c
··· 454 454 gpu->vm, &a6xx_gpu->preempt_postamble_bo, 455 455 &a6xx_gpu->preempt_postamble_iova); 456 456 457 - preempt_prepare_postamble(a6xx_gpu); 458 - 459 457 if (IS_ERR(a6xx_gpu->preempt_postamble_ptr)) 460 458 goto fail; 459 + 460 + preempt_prepare_postamble(a6xx_gpu); 461 461 462 462 timer_setup(&a6xx_gpu->preempt_timer, a6xx_preempt_timer, 0); 463 463
+13
drivers/gpu/drm/msm/adreno/adreno_gpu.h
··· 188 188 .count = ARRAY_SIZE(name ## _regs), \ 189 189 }; 190 190 191 + struct adreno_reglist_pipe_list { 192 + /** @reg: List of register **/ 193 + const struct adreno_reglist_pipe *regs; 194 + /** @count: Number of registers in the list **/ 195 + u32 count; 196 + }; 197 + 198 + #define DECLARE_ADRENO_REGLIST_PIPE_LIST(name) \ 199 + static const struct adreno_reglist_pipe_list name = { \ 200 + .regs = name ## _regs, \ 201 + .count = ARRAY_SIZE(name ## _regs), \ 202 + }; 203 + 191 204 struct adreno_gpu { 192 205 struct msm_gpu base; 193 206 const struct adreno_info *info;
+7 -31
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
··· 200 200 struct dpu_crtc_state *crtc_state) 201 201 { 202 202 struct dpu_crtc_mixer *m; 203 - u32 crcs[CRTC_QUAD_MIXERS]; 203 + u32 crcs[CRTC_DUAL_MIXERS]; 204 204 205 205 int rc = 0; 206 206 int i; ··· 1328 1328 struct drm_display_mode *mode = &crtc_state->adjusted_mode; 1329 1329 struct msm_display_topology topology = {0}; 1330 1330 struct drm_encoder *drm_enc; 1331 - u32 num_rt_intf; 1332 1331 1333 1332 drm_for_each_encoder_mask(drm_enc, crtc->dev, crtc_state->encoder_mask) 1334 1333 dpu_encoder_update_topology(drm_enc, &topology, crtc_state->state, ··· 1341 1342 * Dual display 1342 1343 * 2 LM, 2 INTF ( Split display using 2 interfaces) 1343 1344 * 1344 - * If DSC is enabled, try to use 4:4:2 topology if there is enough 1345 - * resource. Otherwise, use 2:2:2 topology. 1346 - * 1347 1345 * Single display 1348 1346 * 1 LM, 1 INTF 1349 1347 * 2 LM, 1 INTF (stream merge to support high resolution interfaces) 1350 1348 * 1351 - * If DSC is enabled, use 2:2:1 topology 1349 + * If DSC is enabled, use 2 LMs for 2:2:1 topology 1352 1350 * 1353 1351 * Add dspps to the reservation requirements if ctm is requested 1354 1352 * ··· 1357 1361 * (mode->hdisplay > MAX_HDISPLAY_SPLIT) check. 1358 1362 */ 1359 1363 1360 - num_rt_intf = topology.num_intf; 1361 - if (topology.cwb_enabled) 1362 - num_rt_intf--; 1363 - 1364 - if (topology.num_dsc) { 1365 - if (dpu_kms->catalog->dsc_count >= num_rt_intf * 2) 1366 - topology.num_dsc = num_rt_intf * 2; 1367 - else 1368 - topology.num_dsc = num_rt_intf; 1369 - topology.num_lm = topology.num_dsc; 1370 - } else if (num_rt_intf == 2) { 1364 + if (topology.num_intf == 2 && !topology.cwb_enabled) 1371 1365 topology.num_lm = 2; 1372 - } else if (dpu_kms->catalog->caps->has_3d_merge) { 1366 + else if (topology.num_dsc == 2) 1367 + topology.num_lm = 2; 1368 + else if (dpu_kms->catalog->caps->has_3d_merge) 1373 1369 topology.num_lm = (mode->hdisplay > MAX_HDISPLAY_SPLIT) ? 2 : 1; 1374 - } else { 1370 + else 1375 1371 topology.num_lm = 1; 1376 - } 1377 1372 1378 1373 if (crtc_state->ctm) 1379 1374 topology.num_dspp = topology.num_lm; ··· 1605 1618 } 1606 1619 1607 1620 return 0; 1608 - } 1609 - 1610 - /** 1611 - * dpu_crtc_get_num_lm - Get mixer number in this CRTC pipeline 1612 - * @state: Pointer to drm crtc state object 1613 - */ 1614 - unsigned int dpu_crtc_get_num_lm(const struct drm_crtc_state *state) 1615 - { 1616 - struct dpu_crtc_state *cstate = to_dpu_crtc_state(state); 1617 - 1618 - return cstate->num_mixers; 1619 1621 } 1620 1622 1621 1623 #ifdef CONFIG_DEBUG_FS
+3 -5
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
··· 210 210 211 211 bool bw_control; 212 212 bool bw_split_vote; 213 - struct drm_rect lm_bounds[CRTC_QUAD_MIXERS]; 213 + struct drm_rect lm_bounds[CRTC_DUAL_MIXERS]; 214 214 215 215 uint64_t input_fence_timeout_ns; 216 216 ··· 218 218 219 219 /* HW Resources reserved for the crtc */ 220 220 u32 num_mixers; 221 - struct dpu_crtc_mixer mixers[CRTC_QUAD_MIXERS]; 221 + struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS]; 222 222 223 223 u32 num_ctls; 224 - struct dpu_hw_ctl *hw_ctls[CRTC_QUAD_MIXERS]; 224 + struct dpu_hw_ctl *hw_ctls[CRTC_DUAL_MIXERS]; 225 225 226 226 enum dpu_crtc_crc_source crc_source; 227 227 int crc_frame_skip_count; ··· 266 266 } 267 267 268 268 void dpu_crtc_frame_event_cb(struct drm_crtc *crtc, u32 event); 269 - 270 - unsigned int dpu_crtc_get_num_lm(const struct drm_crtc_state *state); 271 269 272 270 #endif /* _DPU_CRTC_H_ */
+20 -9
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
··· 55 55 #define MAX_PHYS_ENCODERS_PER_VIRTUAL \ 56 56 (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES) 57 57 58 - #define MAX_CHANNELS_PER_ENC 4 58 + #define MAX_CHANNELS_PER_ENC 2 59 59 #define MAX_CWB_PER_ENC 2 60 60 61 61 #define IDLE_SHORT_TIMEOUT 1 ··· 661 661 struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc); 662 662 struct msm_drm_private *priv = dpu_enc->base.dev->dev_private; 663 663 struct msm_display_info *disp_info = &dpu_enc->disp_info; 664 + struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); 664 665 struct drm_connector *connector; 665 666 struct drm_connector_state *conn_state; 666 667 struct drm_framebuffer *fb; ··· 675 674 676 675 dsc = dpu_encoder_get_dsc_config(drm_enc); 677 676 678 - /* 679 - * Set DSC number as 1 to mark the enabled status, will be adjusted 680 - * in dpu_crtc_get_topology() 681 - */ 682 - if (dsc) 683 - topology->num_dsc = 1; 677 + /* We only support 2 DSC mode (with 2 LM and 1 INTF) */ 678 + if (dsc) { 679 + /* 680 + * Use 2 DSC encoders, 2 layer mixers and 1 or 2 interfaces 681 + * when Display Stream Compression (DSC) is enabled, 682 + * and when enough DSC blocks are available. 683 + * This is power-optimal and can drive up to (including) 4k 684 + * screens. 685 + */ 686 + WARN(topology->num_intf > 2, 687 + "DSC topology cannot support more than 2 interfaces\n"); 688 + if (topology->num_intf >= 2 || dpu_kms->catalog->dsc_count >= 2) 689 + topology->num_dsc = 2; 690 + else 691 + topology->num_dsc = 1; 692 + } 684 693 685 694 connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc); 686 695 if (!connector) ··· 2180 2169 { 2181 2170 int i, num_lm; 2182 2171 struct dpu_global_state *global_state; 2183 - struct dpu_hw_blk *hw_lm[MAX_CHANNELS_PER_ENC]; 2184 - struct dpu_hw_mixer *hw_mixer[MAX_CHANNELS_PER_ENC]; 2172 + struct dpu_hw_blk *hw_lm[2]; 2173 + struct dpu_hw_mixer *hw_mixer[2]; 2185 2174 struct dpu_hw_ctl *ctl = phys_enc->hw_ctl; 2186 2175 2187 2176 /* reset all mixers for this encoder */
+1 -1
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
··· 302 302 303 303 /* Use merge_3d unless DSC MERGE topology is used */ 304 304 if (phys_enc->split_role == ENC_ROLE_SOLO && 305 - (dpu_cstate->num_mixers != 1) && 305 + dpu_cstate->num_mixers == CRTC_DUAL_MIXERS && 306 306 !dpu_encoder_use_dsc_merge(phys_enc->parent)) 307 307 return BLEND_3D_H_ROW_INT; 308 308
+4 -6
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_wb.c
··· 247 247 if (hw_cdm) 248 248 intf_cfg.cdm = hw_cdm->idx; 249 249 250 - if (phys_enc->hw_pp->merge_3d && phys_enc->hw_pp->merge_3d->ops.setup_3d_mode) 251 - phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d, 252 - mode_3d); 250 + if (hw_pp && hw_pp->merge_3d && hw_pp->merge_3d->ops.setup_3d_mode) 251 + hw_pp->merge_3d->ops.setup_3d_mode(hw_pp->merge_3d, mode_3d); 253 252 254 253 /* setup which pp blk will connect to this wb */ 255 - if (hw_pp && phys_enc->hw_wb->ops.bind_pingpong_blk) 256 - phys_enc->hw_wb->ops.bind_pingpong_blk(phys_enc->hw_wb, 257 - phys_enc->hw_pp->idx); 254 + if (hw_pp && hw_wb->ops.bind_pingpong_blk) 255 + hw_wb->ops.bind_pingpong_blk(hw_wb, hw_pp->idx); 258 256 259 257 phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg); 260 258 } else if (phys_enc->hw_ctl && phys_enc->hw_ctl->ops.setup_intf_cfg) {
+1 -1
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
··· 24 24 #define DPU_MAX_IMG_WIDTH 0x3fff 25 25 #define DPU_MAX_IMG_HEIGHT 0x3fff 26 26 27 - #define CRTC_QUAD_MIXERS 4 27 + #define CRTC_DUAL_MIXERS 2 28 28 29 29 #define MAX_XIN_COUNT 16 30 30
+2 -2
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
··· 89 89 */ 90 90 struct dpu_hw_cdm_ops { 91 91 /** 92 - * Enable the CDM module 92 + * @enable: Enable the CDM module 93 93 * @cdm Pointer to chroma down context 94 94 */ 95 95 int (*enable)(struct dpu_hw_cdm *cdm, struct dpu_hw_cdm_cfg *cfg); 96 96 97 97 /** 98 - * Enable/disable the connection with pingpong 98 + * @bind_pingpong_blk: Enable/disable the connection with pingpong 99 99 * @cdm Pointer to chroma down context 100 100 * @pp pingpong block id. 101 101 */
+53 -31
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
··· 12 12 #include "dpu_hw_sspp.h" 13 13 14 14 /** 15 - * dpu_ctl_mode_sel: Interface mode selection 16 - * DPU_CTL_MODE_SEL_VID: Video mode interface 17 - * DPU_CTL_MODE_SEL_CMD: Command mode interface 15 + * enum dpu_ctl_mode_sel: Interface mode selection 16 + * @DPU_CTL_MODE_SEL_VID: Video mode interface 17 + * @DPU_CTL_MODE_SEL_CMD: Command mode interface 18 18 */ 19 19 enum dpu_ctl_mode_sel { 20 20 DPU_CTL_MODE_SEL_VID = 0, ··· 37 37 * struct dpu_hw_intf_cfg :Describes how the DPU writes data to output interface 38 38 * @intf : Interface id 39 39 * @intf_master: Master interface id in the dual pipe topology 40 + * @wb: Writeback mode 40 41 * @mode_3d: 3d mux configuration 41 42 * @merge_3d: 3d merge block used 42 43 * @intf_mode_sel: Interface mode, cmd / vid ··· 65 64 */ 66 65 struct dpu_hw_ctl_ops { 67 66 /** 68 - * kickoff hw operation for Sw controlled interfaces 67 + * @trigger_start: kickoff hw operation for Sw controlled interfaces 69 68 * DSI cmd mode and WB interface are SW controlled 70 69 * @ctx : ctl path ctx pointer 71 70 */ 72 71 void (*trigger_start)(struct dpu_hw_ctl *ctx); 73 72 74 73 /** 75 - * check if the ctl is started 74 + * @is_started: check if the ctl is started 76 75 * @ctx : ctl path ctx pointer 77 76 * @Return: true if started, false if stopped 78 77 */ 79 78 bool (*is_started)(struct dpu_hw_ctl *ctx); 80 79 81 80 /** 82 - * kickoff prepare is in progress hw operation for sw 81 + * @trigger_pending: kickoff prepare is in progress hw operation for sw 83 82 * controlled interfaces: DSI cmd mode and WB interface 84 83 * are SW controlled 85 84 * @ctx : ctl path ctx pointer ··· 87 86 void (*trigger_pending)(struct dpu_hw_ctl *ctx); 88 87 89 88 /** 90 - * Clear the value of the cached pending_flush_mask 89 + * @clear_pending_flush: Clear the value of the cached pending_flush_mask 91 90 * No effect on hardware. 92 91 * Required to be implemented. 93 92 * @ctx : ctl path ctx pointer ··· 95 94 void (*clear_pending_flush)(struct dpu_hw_ctl *ctx); 96 95 97 96 /** 98 - * Query the value of the cached pending_flush_mask 97 + * @get_pending_flush: Query the value of the cached pending_flush_mask 99 98 * No effect on hardware 100 99 * @ctx : ctl path ctx pointer 101 100 */ 102 101 u32 (*get_pending_flush)(struct dpu_hw_ctl *ctx); 103 102 104 103 /** 105 - * OR in the given flushbits to the cached pending_flush_mask 104 + * @update_pending_flush: OR in the given flushbits to the cached 105 + * pending_flush_mask. 106 106 * No effect on hardware 107 107 * @ctx : ctl path ctx pointer 108 108 * @flushbits : module flushmask ··· 112 110 u32 flushbits); 113 111 114 112 /** 115 - * OR in the given flushbits to the cached pending_(wb_)flush_mask 113 + * @update_pending_flush_wb: OR in the given flushbits to the 114 + * cached pending_(wb_)flush_mask. 116 115 * No effect on hardware 117 116 * @ctx : ctl path ctx pointer 118 117 * @blk : writeback block index ··· 122 119 enum dpu_wb blk); 123 120 124 121 /** 125 - * OR in the given flushbits to the cached pending_(cwb_)flush_mask 122 + * @update_pending_flush_cwb: OR in the given flushbits to the 123 + * cached pending_(cwb_)flush_mask. 126 124 * No effect on hardware 127 125 * @ctx : ctl path ctx pointer 128 126 * @blk : concurrent writeback block index ··· 132 128 enum dpu_cwb blk); 133 129 134 130 /** 135 - * OR in the given flushbits to the cached pending_(intf_)flush_mask 131 + * @update_pending_flush_intf: OR in the given flushbits to the 132 + * cached pending_(intf_)flush_mask. 136 133 * No effect on hardware 137 134 * @ctx : ctl path ctx pointer 138 135 * @blk : interface block index ··· 142 137 enum dpu_intf blk); 143 138 144 139 /** 145 - * OR in the given flushbits to the cached pending_(periph_)flush_mask 140 + * @update_pending_flush_periph: OR in the given flushbits to the 141 + * cached pending_(periph_)flush_mask. 146 142 * No effect on hardware 147 143 * @ctx : ctl path ctx pointer 148 144 * @blk : interface block index ··· 152 146 enum dpu_intf blk); 153 147 154 148 /** 155 - * OR in the given flushbits to the cached pending_(merge_3d_)flush_mask 149 + * @update_pending_flush_merge_3d: OR in the given flushbits to the 150 + * cached pending_(merge_3d_)flush_mask. 156 151 * No effect on hardware 157 152 * @ctx : ctl path ctx pointer 158 153 * @blk : interface block index ··· 162 155 enum dpu_merge_3d blk); 163 156 164 157 /** 165 - * OR in the given flushbits to the cached pending_flush_mask 158 + * @update_pending_flush_sspp: OR in the given flushbits to the 159 + * cached pending_flush_mask. 166 160 * No effect on hardware 167 161 * @ctx : ctl path ctx pointer 168 162 * @blk : SSPP block index ··· 172 164 enum dpu_sspp blk); 173 165 174 166 /** 175 - * OR in the given flushbits to the cached pending_flush_mask 167 + * @update_pending_flush_mixer: OR in the given flushbits to the 168 + * cached pending_flush_mask. 176 169 * No effect on hardware 177 170 * @ctx : ctl path ctx pointer 178 171 * @blk : LM block index ··· 182 173 enum dpu_lm blk); 183 174 184 175 /** 185 - * OR in the given flushbits to the cached pending_flush_mask 176 + * @update_pending_flush_dspp: OR in the given flushbits to the 177 + * cached pending_flush_mask. 186 178 * No effect on hardware 187 179 * @ctx : ctl path ctx pointer 188 180 * @blk : DSPP block index ··· 193 183 enum dpu_dspp blk, u32 dspp_sub_blk); 194 184 195 185 /** 196 - * OR in the given flushbits to the cached pending_(dsc_)flush_mask 186 + * @update_pending_flush_dsc: OR in the given flushbits to the 187 + * cached pending_(dsc_)flush_mask. 197 188 * No effect on hardware 198 189 * @ctx: ctl path ctx pointer 199 190 * @blk: interface block index ··· 203 192 enum dpu_dsc blk); 204 193 205 194 /** 206 - * OR in the given flushbits to the cached pending_(cdm_)flush_mask 195 + * @update_pending_flush_cdm: OR in the given flushbits to the 196 + * cached pending_(cdm_)flush_mask. 207 197 * No effect on hardware 208 198 * @ctx: ctl path ctx pointer 209 199 * @cdm_num: idx of cdm to be flushed ··· 212 200 void (*update_pending_flush_cdm)(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num); 213 201 214 202 /** 215 - * Write the value of the pending_flush_mask to hardware 203 + * @trigger_flush: Write the value of the pending_flush_mask to hardware 216 204 * @ctx : ctl path ctx pointer 217 205 */ 218 206 void (*trigger_flush)(struct dpu_hw_ctl *ctx); 219 207 220 208 /** 221 - * Read the value of the flush register 209 + * @get_flush_register: Read the value of the flush register 222 210 * @ctx : ctl path ctx pointer 223 211 * @Return: value of the ctl flush register. 224 212 */ 225 213 u32 (*get_flush_register)(struct dpu_hw_ctl *ctx); 226 214 227 215 /** 228 - * Setup ctl_path interface config 216 + * @setup_intf_cfg: Setup ctl_path interface config 229 217 * @ctx 230 218 * @cfg : interface config structure pointer 231 219 */ ··· 233 221 struct dpu_hw_intf_cfg *cfg); 234 222 235 223 /** 236 - * reset ctl_path interface config 224 + * @reset_intf_cfg: reset ctl_path interface config 237 225 * @ctx : ctl path ctx pointer 238 226 * @cfg : interface config structure pointer 239 227 */ 240 228 void (*reset_intf_cfg)(struct dpu_hw_ctl *ctx, 241 229 struct dpu_hw_intf_cfg *cfg); 242 230 231 + /** 232 + * @reset: reset function for this ctl type 233 + */ 243 234 int (*reset)(struct dpu_hw_ctl *c); 244 235 245 - /* 246 - * wait_reset_status - checks ctl reset status 236 + /** 237 + * @wait_reset_status: checks ctl reset status 247 238 * @ctx : ctl path ctx pointer 248 239 * 249 240 * This function checks the ctl reset status bit. ··· 257 242 int (*wait_reset_status)(struct dpu_hw_ctl *ctx); 258 243 259 244 /** 260 - * Set all blend stages to disabled 245 + * @clear_all_blendstages: Set all blend stages to disabled 261 246 * @ctx : ctl path ctx pointer 262 247 */ 263 248 void (*clear_all_blendstages)(struct dpu_hw_ctl *ctx); 264 249 265 250 /** 266 - * Configure layer mixer to pipe configuration 251 + * @setup_blendstage: Configure layer mixer to pipe configuration 267 252 * @ctx : ctl path ctx pointer 268 253 * @lm : layer mixer enumeration 269 254 * @cfg : blend stage configuration ··· 271 256 void (*setup_blendstage)(struct dpu_hw_ctl *ctx, 272 257 enum dpu_lm lm, struct dpu_hw_stage_cfg *cfg); 273 258 259 + /** 260 + * @set_active_fetch_pipes: Set active pipes attached to this CTL 261 + * @ctx: ctl path ctx pointer 262 + * @active_pipes: bitmap of enum dpu_sspp 263 + */ 274 264 void (*set_active_fetch_pipes)(struct dpu_hw_ctl *ctx, 275 265 unsigned long *fetch_active); 276 266 277 267 /** 278 - * Set active pipes attached to this CTL 268 + * @set_active_pipes: Set active pipes attached to this CTL 279 269 * @ctx: ctl path ctx pointer 280 270 * @active_pipes: bitmap of enum dpu_sspp 281 271 */ ··· 288 268 unsigned long *active_pipes); 289 269 290 270 /** 291 - * Set active layer mixers attached to this CTL 271 + * @set_active_lms: Set active layer mixers attached to this CTL 292 272 * @ctx: ctl path ctx pointer 293 273 * @active_lms: bitmap of enum dpu_lm 294 274 */ 295 275 void (*set_active_lms)(struct dpu_hw_ctl *ctx, 296 276 unsigned long *active_lms); 297 - 298 277 }; 299 278 300 279 /** ··· 308 289 * @pending_intf_flush_mask: pending INTF flush 309 290 * @pending_wb_flush_mask: pending WB flush 310 291 * @pending_cwb_flush_mask: pending CWB flush 292 + * @pending_periph_flush_mask: pending PERIPH flush 293 + * @pending_merge_3d_flush_mask: pending MERGE 3D flush 294 + * @pending_dspp_flush_mask: pending DSPP flush 311 295 * @pending_dsc_flush_mask: pending DSC flush 312 296 * @pending_cdm_flush_mask: pending CDM flush 313 297 * @mdss_ver: MDSS revision information ··· 342 320 }; 343 321 344 322 /** 345 - * dpu_hw_ctl - convert base object dpu_hw_base to container 323 + * to_dpu_hw_ctl - convert base object dpu_hw_base to container 346 324 * @hw: Pointer to base hardware block 347 325 * return: Pointer to hardware block container 348 326 */
+1 -2
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cwb.h
··· 28 28 }; 29 29 30 30 /** 31 - * 32 31 * struct dpu_hw_cwb_ops : Interface to the cwb hw driver functions 33 32 * @config_cwb: configure CWB mux 34 33 */ ··· 53 54 }; 54 55 55 56 /** 56 - * dpu_hw_cwb - convert base object dpu_hw_base to container 57 + * to_dpu_hw_cwb - convert base object dpu_hw_base to container 57 58 * @hw: Pointer to base hardware block 58 59 * return: Pointer to hardware block container 59 60 */
+7 -3
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dsc.h
··· 21 21 */ 22 22 struct dpu_hw_dsc_ops { 23 23 /** 24 - * dsc_disable - disable dsc 24 + * @dsc_disable: disable dsc 25 25 * @hw_dsc: Pointer to dsc context 26 26 */ 27 27 void (*dsc_disable)(struct dpu_hw_dsc *hw_dsc); 28 28 29 29 /** 30 - * dsc_config - configures dsc encoder 30 + * @dsc_config: configures dsc encoder 31 31 * @hw_dsc: Pointer to dsc context 32 32 * @dsc: panel dsc parameters 33 33 * @mode: dsc topology mode to be set ··· 39 39 u32 initial_lines); 40 40 41 41 /** 42 - * dsc_config_thresh - programs panel thresholds 42 + * @dsc_config_thresh: programs panel thresholds 43 43 * @hw_dsc: Pointer to dsc context 44 44 * @dsc: panel dsc parameters 45 45 */ 46 46 void (*dsc_config_thresh)(struct dpu_hw_dsc *hw_dsc, 47 47 struct drm_dsc_config *dsc); 48 48 49 + /** 50 + * @dsc_bind_pingpong_blk: binds pixel output from a DSC block 51 + * to a pingpong block 52 + */ 49 53 void (*dsc_bind_pingpong_blk)(struct dpu_hw_dsc *hw_dsc, 50 54 enum dpu_pingpong pp); 51 55 };
+3 -3
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.h
··· 22 22 }; 23 23 24 24 /** 25 - * struct dpu_hw_pcc - pcc feature structure 25 + * struct dpu_hw_pcc_cfg - pcc feature structure 26 26 * @r: red coefficients. 27 27 * @g: green coefficients. 28 28 * @b: blue coefficients. ··· 40 40 */ 41 41 struct dpu_hw_dspp_ops { 42 42 /** 43 - * setup_pcc - setup dspp pcc 43 + * @setup_pcc: setup_pcc - setup dspp pcc 44 44 * @ctx: Pointer to dspp context 45 45 * @cfg: Pointer to configuration 46 46 */ ··· 69 69 }; 70 70 71 71 /** 72 - * dpu_hw_dspp - convert base object dpu_hw_base to container 72 + * to_dpu_hw_dspp - convert base object dpu_hw_base to container 73 73 * @hw: Pointer to base hardware block 74 74 * return: Pointer to hardware block container 75 75 */
+7 -13
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
··· 57 57 /** 58 58 * struct dpu_hw_intf_ops : Interface to the interface Hw driver functions 59 59 * Assumption is these functions will be called after clocks are enabled 60 - * @ setup_timing_gen : programs the timing engine 61 - * @ setup_prog_fetch : enables/disables the programmable fetch logic 62 - * @ enable_timing: enable/disable timing engine 63 - * @ get_status: returns if timing engine is enabled or not 64 - * @ get_line_count: reads current vertical line counter 60 + * @setup_timing_gen : programs the timing engine 61 + * @setup_prg_fetch : enables/disables the programmable fetch logic 62 + * @enable_timing: enable/disable timing engine 63 + * @get_status: returns if timing engine is enabled or not 64 + * @get_line_count: reads current vertical line counter 65 65 * @bind_pingpong_blk: enable/disable the connection with pingpong which will 66 66 * feed pixels to this interface 67 67 * @setup_misr: enable/disable MISR ··· 70 70 * pointer and programs the tear check configuration 71 71 * @disable_tearcheck: Disables tearcheck block 72 72 * @connect_external_te: Read, modify, write to either set or clear listening to external TE 73 - * Return: 1 if TE was originally connected, 0 if not, or -ERROR 74 - * @get_vsync_info: Provides the programmed and current line_count 75 - * @setup_autorefresh: Configure and enable the autorefresh config 76 - * @get_autorefresh: Retrieve autorefresh config from hardware 77 - * Return: 0 on success, -ETIMEDOUT on timeout 73 + * Returns 1 if TE was originally connected, 0 if not, or -ERROR 78 74 * @vsync_sel: Select vsync signal for tear-effect configuration 75 + * @disable_autorefresh: Disable autorefresh if enabled 79 76 * @program_intf_cmd_cfg: Program the DPU to interface datapath for command mode 80 77 */ 81 78 struct dpu_hw_intf_ops { ··· 106 109 107 110 void (*vsync_sel)(struct dpu_hw_intf *intf, enum dpu_vsync_source vsync_source); 108 111 109 - /** 110 - * Disable autorefresh if enabled 111 - */ 112 112 void (*disable_autorefresh)(struct dpu_hw_intf *intf, uint32_t encoder_id, u16 vdisplay); 113 113 114 114 void (*program_intf_cmd_cfg)(struct dpu_hw_intf *intf,
+11 -12
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
··· 25 25 }; 26 26 27 27 /** 28 - * 29 28 * struct dpu_hw_lm_ops : Interface to the mixer Hw driver functions 30 29 * Assumption is these functions will be called after clocks are enabled 31 30 */ 32 31 struct dpu_hw_lm_ops { 33 - /* 34 - * Sets up mixer output width and height 32 + /** 33 + * @setup_mixer_out: Sets up mixer output width and height 35 34 * and border color if enabled 36 35 */ 37 36 void (*setup_mixer_out)(struct dpu_hw_mixer *ctx, 38 37 struct dpu_hw_mixer_cfg *cfg); 39 38 40 - /* 41 - * Alpha blending configuration 39 + /** 40 + * @setup_blend_config: Alpha blending configuration 42 41 * for the specified stage 43 42 */ 44 43 void (*setup_blend_config)(struct dpu_hw_mixer *ctx, uint32_t stage, 45 44 uint32_t fg_alpha, uint32_t bg_alpha, uint32_t blend_op); 46 45 47 - /* 48 - * Alpha color component selection from either fg or bg 46 + /** 47 + * @setup_alpha_out: Alpha color component selection from either fg or bg 49 48 */ 50 49 void (*setup_alpha_out)(struct dpu_hw_mixer *ctx, uint32_t mixer_op); 51 50 52 51 /** 53 - * Clear layer mixer to pipe configuration 52 + * @clear_all_blendstages: Clear layer mixer to pipe configuration 54 53 * @ctx : mixer ctx pointer 55 54 * Returns: 0 on success or -error 56 55 */ 57 56 int (*clear_all_blendstages)(struct dpu_hw_mixer *ctx); 58 57 59 58 /** 60 - * Configure layer mixer to pipe configuration 59 + * @setup_blendstage: Configure layer mixer to pipe configuration 61 60 * @ctx : mixer ctx pointer 62 61 * @lm : layer mixer enumeration 63 62 * @stage_cfg : blend stage configuration ··· 66 67 struct dpu_hw_stage_cfg *stage_cfg); 67 68 68 69 /** 69 - * setup_border_color : enable/disable border color 70 + * @setup_border_color : enable/disable border color 70 71 */ 71 72 void (*setup_border_color)(struct dpu_hw_mixer *ctx, 72 73 struct dpu_mdss_color *color, 73 74 u8 border_en); 74 75 75 76 /** 76 - * setup_misr: Enable/disable MISR 77 + * @setup_misr: Enable/disable MISR 77 78 */ 78 79 void (*setup_misr)(struct dpu_hw_mixer *ctx); 79 80 80 81 /** 81 - * collect_misr: Read MISR signature 82 + * @collect_misr: Read MISR signature 82 83 */ 83 84 int (*collect_misr)(struct dpu_hw_mixer *ctx, u32 *misr_value); 84 85 };
+1 -1
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
··· 34 34 #define DPU_MAX_PLANES 4 35 35 #endif 36 36 37 - #define STAGES_PER_PLANE 2 37 + #define STAGES_PER_PLANE 1 38 38 #define PIPES_PER_STAGE 2 39 39 #define PIPES_PER_PLANE (PIPES_PER_STAGE * STAGES_PER_PLANE) 40 40 #ifndef DPU_MAX_DE_CURVES
-1
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_merge3d.h
··· 12 12 struct dpu_hw_merge_3d; 13 13 14 14 /** 15 - * 16 15 * struct dpu_hw_merge_3d_ops : Interface to the merge_3d Hw driver functions 17 16 * Assumption is these functions will be called after clocks are enabled 18 17 * @setup_3d_mode : enable 3D merge
+10 -10
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
··· 34 34 }; 35 35 36 36 /** 37 - * 38 37 * struct dpu_hw_pingpong_ops : Interface to the pingpong Hw driver functions 39 38 * Assumption is these functions will be called after clocks are enabled 40 39 * @enable_tearcheck: program and enable tear check block ··· 43 44 */ 44 45 struct dpu_hw_pingpong_ops { 45 46 /** 46 - * enables vysnc generation and sets up init value of 47 + * @enable_tearcheck: enables vysnc generation and sets up init value of 47 48 * read pointer and programs the tear check cofiguration 48 49 */ 49 50 int (*enable_tearcheck)(struct dpu_hw_pingpong *pp, 50 51 struct dpu_hw_tear_check *cfg); 51 52 52 53 /** 53 - * disables tear check block 54 + * @disable_tearcheck: disables tear check block 54 55 */ 55 56 int (*disable_tearcheck)(struct dpu_hw_pingpong *pp); 56 57 57 58 /** 58 - * read, modify, write to either set or clear listening to external TE 59 + * @connect_external_te: read, modify, write to either set or clear 60 + * listening to external TE 59 61 * @Return: 1 if TE was originally connected, 0 if not, or -ERROR 60 62 */ 61 63 int (*connect_external_te)(struct dpu_hw_pingpong *pp, 62 64 bool enable_external_te); 63 65 64 66 /** 65 - * Obtain current vertical line counter 67 + * @get_line_count: Obtain current vertical line counter 66 68 */ 67 69 u32 (*get_line_count)(struct dpu_hw_pingpong *pp); 68 70 69 71 /** 70 - * Disable autorefresh if enabled 72 + * @disable_autorefresh: Disable autorefresh if enabled 71 73 */ 72 74 void (*disable_autorefresh)(struct dpu_hw_pingpong *pp, uint32_t encoder_id, u16 vdisplay); 73 75 74 76 /** 75 - * Setup dither matix for pingpong block 77 + * @setup_dither: Setup dither matix for pingpong block 76 78 */ 77 79 void (*setup_dither)(struct dpu_hw_pingpong *pp, 78 80 struct dpu_hw_dither_cfg *cfg); 79 81 /** 80 - * Enable DSC 82 + * @enable_dsc: Enable DSC 81 83 */ 82 84 int (*enable_dsc)(struct dpu_hw_pingpong *pp); 83 85 84 86 /** 85 - * Disable DSC 87 + * @disable_dsc: Disable DSC 86 88 */ 87 89 void (*disable_dsc)(struct dpu_hw_pingpong *pp); 88 90 89 91 /** 90 - * Setup DSC 92 + * @setup_dsc: Setup DSC 91 93 */ 92 94 int (*setup_dsc)(struct dpu_hw_pingpong *pp); 93 95 };
+24 -23
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
··· 14 14 15 15 #define DPU_SSPP_MAX_PITCH_SIZE 0xffff 16 16 17 - /** 17 + /* 18 18 * Flags 19 19 */ 20 20 #define DPU_SSPP_FLIP_LR BIT(0) ··· 23 23 #define DPU_SSPP_ROT_90 BIT(3) 24 24 #define DPU_SSPP_SOLID_FILL BIT(4) 25 25 26 - /** 26 + /* 27 27 * Component indices 28 28 */ 29 29 enum { ··· 36 36 }; 37 37 38 38 /** 39 - * DPU_SSPP_RECT_SOLO - multirect disabled 40 - * DPU_SSPP_RECT_0 - rect0 of a multirect pipe 41 - * DPU_SSPP_RECT_1 - rect1 of a multirect pipe 39 + * enum dpu_sspp_multirect_index - multirect mode 40 + * @DPU_SSPP_RECT_SOLO: multirect disabled 41 + * @DPU_SSPP_RECT_0: rect0 of a multirect pipe 42 + * @DPU_SSPP_RECT_1: rect1 of a multirect pipe 42 43 * 43 44 * Note: HW supports multirect with either RECT0 or 44 45 * RECT1. Considering no benefit of such configs over ··· 144 143 * struct dpu_sw_pipe_cfg : software pipe configuration 145 144 * @src_rect: src ROI, caller takes into account the different operations 146 145 * such as decimation, flip etc to program this field 147 - * @dest_rect: destination ROI. 146 + * @dst_rect: destination ROI. 148 147 * @rotation: simplified drm rotation hint 149 148 */ 150 149 struct dpu_sw_pipe_cfg { ··· 166 165 /** 167 166 * struct dpu_sw_pipe - software pipe description 168 167 * @sspp: backing SSPP pipe 169 - * @index: index of the rectangle of SSPP 170 - * @mode: parallel or time multiplex multirect mode 168 + * @multirect_index: index of the rectangle of SSPP 169 + * @multirect_mode: parallel or time multiplex multirect mode 171 170 */ 172 171 struct dpu_sw_pipe { 173 172 struct dpu_hw_sspp *sspp; ··· 182 181 */ 183 182 struct dpu_hw_sspp_ops { 184 183 /** 185 - * setup_format - setup pixel format cropping rectangle, flip 184 + * @setup_format: setup pixel format cropping rectangle, flip 186 185 * @pipe: Pointer to software pipe context 187 186 * @cfg: Pointer to pipe config structure 188 187 * @flags: Extra flags for format config ··· 191 190 const struct msm_format *fmt, u32 flags); 192 191 193 192 /** 194 - * setup_rects - setup pipe ROI rectangles 193 + * @setup_rects: setup pipe ROI rectangles 195 194 * @pipe: Pointer to software pipe context 196 195 * @cfg: Pointer to pipe config structure 197 196 */ ··· 199 198 struct dpu_sw_pipe_cfg *cfg); 200 199 201 200 /** 202 - * setup_pe - setup pipe pixel extension 201 + * @setup_pe: setup pipe pixel extension 203 202 * @ctx: Pointer to pipe context 204 203 * @pe_ext: Pointer to pixel ext settings 205 204 */ ··· 207 206 struct dpu_hw_pixel_ext *pe_ext); 208 207 209 208 /** 210 - * setup_sourceaddress - setup pipe source addresses 209 + * @setup_sourceaddress: setup pipe source addresses 211 210 * @pipe: Pointer to software pipe context 212 211 * @layout: format layout information for programming buffer to hardware 213 212 */ ··· 215 214 struct dpu_hw_fmt_layout *layout); 216 215 217 216 /** 218 - * setup_csc - setup color space coversion 217 + * @setup_csc: setup color space coversion 219 218 * @ctx: Pointer to pipe context 220 219 * @data: Pointer to config structure 221 220 */ 222 221 void (*setup_csc)(struct dpu_hw_sspp *ctx, const struct dpu_csc_cfg *data); 223 222 224 223 /** 225 - * setup_solidfill - enable/disable colorfill 224 + * @setup_solidfill: enable/disable colorfill 226 225 * @pipe: Pointer to software pipe context 227 226 * @const_color: Fill color value 228 227 * @flags: Pipe flags ··· 230 229 void (*setup_solidfill)(struct dpu_sw_pipe *pipe, u32 color); 231 230 232 231 /** 233 - * setup_multirect - setup multirect configuration 232 + * @setup_multirect: setup multirect configuration 234 233 * @pipe: Pointer to software pipe context 235 234 */ 236 235 237 236 void (*setup_multirect)(struct dpu_sw_pipe *pipe); 238 237 239 238 /** 240 - * setup_sharpening - setup sharpening 239 + * @setup_sharpening: setup sharpening 241 240 * @ctx: Pointer to pipe context 242 241 * @cfg: Pointer to config structure 243 242 */ 244 243 void (*setup_sharpening)(struct dpu_hw_sspp *ctx, 245 244 struct dpu_hw_sharp_cfg *cfg); 246 245 247 - 248 246 /** 249 - * setup_qos_lut - setup QoS LUTs 247 + * @setup_qos_lut: setup QoS LUTs 250 248 * @ctx: Pointer to pipe context 251 249 * @cfg: LUT configuration 252 250 */ ··· 253 253 struct dpu_hw_qos_cfg *cfg); 254 254 255 255 /** 256 - * setup_qos_ctrl - setup QoS control 256 + * @setup_qos_ctrl: setup QoS control 257 257 * @ctx: Pointer to pipe context 258 258 * @danger_safe_en: flags controlling enabling of danger/safe QoS/LUT 259 259 */ ··· 261 261 bool danger_safe_en); 262 262 263 263 /** 264 - * setup_clk_force_ctrl - setup clock force control 264 + * @setup_clk_force_ctrl: setup clock force control 265 265 * @ctx: Pointer to pipe context 266 266 * @enable: enable clock force if true 267 267 */ ··· 269 269 bool enable); 270 270 271 271 /** 272 - * setup_histogram - setup histograms 272 + * @setup_histogram: setup histograms 273 273 * @ctx: Pointer to pipe context 274 274 * @cfg: Pointer to histogram configuration 275 275 */ ··· 277 277 void *cfg); 278 278 279 279 /** 280 - * setup_scaler - setup scaler 280 + * @setup_scaler: setup scaler 281 281 * @scaler3_cfg: Pointer to scaler configuration 282 282 * @format: pixel format parameters 283 283 */ ··· 286 286 const struct msm_format *format); 287 287 288 288 /** 289 - * setup_cdp - setup client driven prefetch 289 + * @setup_cdp: setup client driven prefetch 290 290 * @pipe: Pointer to software pipe context 291 291 * @fmt: format used by the sw pipe 292 292 * @enable: whether the CDP should be enabled for this pipe ··· 303 303 * @ubwc: UBWC configuration data 304 304 * @idx: pipe index 305 305 * @cap: pointer to layer_cfg 306 + * @mdss_ver: MDSS version info to use for feature checks 306 307 * @ops: pointer to operations possible for this pipe 307 308 */ 308 309 struct dpu_hw_sspp {
+10 -11
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
··· 77 77 /** 78 78 * struct dpu_hw_mdp_ops - interface to the MDP TOP Hw driver functions 79 79 * Assumption is these functions will be called after clocks are enabled. 80 - * @setup_split_pipe : Programs the pipe control registers 81 - * @setup_pp_split : Programs the pp split control registers 82 - * @setup_traffic_shaper : programs traffic shaper control 83 80 */ 84 81 struct dpu_hw_mdp_ops { 85 - /** setup_split_pipe() : Registers are not double buffered, thisk 82 + /** 83 + * @setup_split_pipe : Programs the pipe control registers. 84 + * Registers are not double buffered, this 86 85 * function should be called before timing control enable 87 86 * @mdp : mdp top context driver 88 87 * @cfg : upper and lower part of pipe configuration ··· 90 91 struct split_pipe_cfg *p); 91 92 92 93 /** 93 - * setup_traffic_shaper() : Setup traffic shaper control 94 + * @setup_traffic_shaper : programs traffic shaper control. 94 95 * @mdp : mdp top context driver 95 96 * @cfg : traffic shaper configuration 96 97 */ ··· 98 99 struct traffic_shaper_cfg *cfg); 99 100 100 101 /** 101 - * setup_clk_force_ctrl - set clock force control 102 + * @setup_clk_force_ctrl: set clock force control 102 103 * @mdp: mdp top context driver 103 104 * @clk_ctrl: clock to be controlled 104 105 * @enable: force on enable ··· 108 109 enum dpu_clk_ctrl_type clk_ctrl, bool enable); 109 110 110 111 /** 111 - * get_danger_status - get danger status 112 + * @get_danger_status: get danger status 112 113 * @mdp: mdp top context driver 113 114 * @status: Pointer to danger safe status 114 115 */ ··· 116 117 struct dpu_danger_safe_status *status); 117 118 118 119 /** 119 - * setup_vsync_source - setup vsync source configuration details 120 + * @setup_vsync_source: setup vsync source configuration details 120 121 * @mdp: mdp top context driver 121 122 * @cfg: vsync source selection configuration 122 123 */ ··· 124 125 struct dpu_vsync_source_cfg *cfg); 125 126 126 127 /** 127 - * get_safe_status - get safe status 128 + * @get_safe_status: get safe status 128 129 * @mdp: mdp top context driver 129 130 * @status: Pointer to danger safe status 130 131 */ ··· 132 133 struct dpu_danger_safe_status *status); 133 134 134 135 /** 135 - * dp_phy_intf_sel - configure intf to phy mapping 136 + * @dp_phy_intf_sel: configure intf to phy mapping 136 137 * @mdp: mdp top context driver 137 138 * @phys: list of phys the DP interfaces should be connected to. 0 disables the INTF. 138 139 */ 139 140 void (*dp_phy_intf_sel)(struct dpu_hw_mdp *mdp, enum dpu_dp_phy_sel phys[2]); 140 141 141 142 /** 142 - * intf_audio_select - select the external interface for audio 143 + * @intf_audio_select: select the external interface for audio 143 144 * @mdp: mdp top context driver 144 145 */ 145 146 void (*intf_audio_select)(struct dpu_hw_mdp *mdp);
+8 -8
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
··· 17 17 */ 18 18 struct dpu_hw_vbif_ops { 19 19 /** 20 - * set_limit_conf - set transaction limit config 20 + * @set_limit_conf: set transaction limit config 21 21 * @vbif: vbif context driver 22 22 * @xin_id: client interface identifier 23 23 * @rd: true for read limit; false for write limit ··· 27 27 u32 xin_id, bool rd, u32 limit); 28 28 29 29 /** 30 - * get_limit_conf - get transaction limit config 30 + * @get_limit_conf: get transaction limit config 31 31 * @vbif: vbif context driver 32 32 * @xin_id: client interface identifier 33 33 * @rd: true for read limit; false for write limit ··· 37 37 u32 xin_id, bool rd); 38 38 39 39 /** 40 - * set_halt_ctrl - set halt control 40 + * @set_halt_ctrl: set halt control 41 41 * @vbif: vbif context driver 42 42 * @xin_id: client interface identifier 43 43 * @enable: halt control enable ··· 46 46 u32 xin_id, bool enable); 47 47 48 48 /** 49 - * get_halt_ctrl - get halt control 49 + * @get_halt_ctrl: get halt control 50 50 * @vbif: vbif context driver 51 51 * @xin_id: client interface identifier 52 52 * @return: halt control enable ··· 55 55 u32 xin_id); 56 56 57 57 /** 58 - * set_qos_remap - set QoS priority remap 58 + * @set_qos_remap: set QoS priority remap 59 59 * @vbif: vbif context driver 60 60 * @xin_id: client interface identifier 61 61 * @level: priority level ··· 65 65 u32 xin_id, u32 level, u32 remap_level); 66 66 67 67 /** 68 - * set_mem_type - set memory type 68 + * @set_mem_type: set memory type 69 69 * @vbif: vbif context driver 70 70 * @xin_id: client interface identifier 71 71 * @value: memory type value ··· 74 74 u32 xin_id, u32 value); 75 75 76 76 /** 77 - * clear_errors - clear any vbif errors 77 + * @clear_errors: clear any vbif errors 78 78 * This function clears any detected pending/source errors 79 79 * on the VBIF interface, and optionally returns the detected 80 80 * error mask(s). ··· 86 86 u32 *pnd_errors, u32 *src_errors); 87 87 88 88 /** 89 - * set_write_gather_en - set write_gather enable 89 + * @set_write_gather_en: set write_gather enable 90 90 * @vbif: vbif context driver 91 91 * @xin_id: client interface identifier 92 92 */
+2 -2
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.h
··· 22 22 }; 23 23 24 24 /** 25 - * 26 25 * struct dpu_hw_wb_ops : Interface to the wb hw driver functions 27 26 * Assumption is these functions will be called after clocks are enabled 28 27 * @setup_outaddress: setup output address from the writeback job 29 28 * @setup_outformat: setup output format of writeback block from writeback job 29 + * @setup_roi: setup ROI (Region of Interest) parameters 30 30 * @setup_qos_lut: setup qos LUT for writeback block based on input 31 31 * @setup_cdp: setup chroma down prefetch block for writeback block 32 32 * @setup_clk_force_ctrl: setup clock force control ··· 61 61 * struct dpu_hw_wb : WB driver object 62 62 * @hw: block hardware details 63 63 * @idx: hardware index number within type 64 - * @wb_hw_caps: hardware capabilities 64 + * @caps: hardware capabilities 65 65 * @ops: function pointers 66 66 */ 67 67 struct dpu_hw_wb {
+41 -98
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
··· 826 826 struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); 827 827 struct dpu_sw_pipe_cfg *pipe_cfg; 828 828 struct dpu_sw_pipe_cfg *r_pipe_cfg; 829 - struct dpu_sw_pipe_cfg init_pipe_cfg; 830 829 struct drm_rect fb_rect = { 0 }; 831 - const struct drm_display_mode *mode = &crtc_state->adjusted_mode; 832 830 uint32_t max_linewidth; 833 - u32 num_lm; 834 - int stage_id, num_stages; 835 831 836 832 min_scale = FRAC_16_16(1, MAX_UPSCALE_RATIO); 837 833 max_scale = MAX_DOWNSCALE_RATIO << 16; ··· 850 854 return -EINVAL; 851 855 } 852 856 853 - num_lm = dpu_crtc_get_num_lm(crtc_state); 854 - 857 + /* move the assignment here, to ease handling to another pairs later */ 858 + pipe_cfg = &pstate->pipe_cfg[0]; 859 + r_pipe_cfg = &pstate->pipe_cfg[1]; 855 860 /* state->src is 16.16, src_rect is not */ 856 - drm_rect_fp_to_int(&init_pipe_cfg.src_rect, &new_plane_state->src); 861 + drm_rect_fp_to_int(&pipe_cfg->src_rect, &new_plane_state->src); 862 + 863 + pipe_cfg->dst_rect = new_plane_state->dst; 857 864 858 865 fb_rect.x2 = new_plane_state->fb->width; 859 866 fb_rect.y2 = new_plane_state->fb->height; ··· 881 882 882 883 max_linewidth = pdpu->catalog->caps->max_linewidth; 883 884 884 - drm_rect_rotate(&init_pipe_cfg.src_rect, 885 + drm_rect_rotate(&pipe_cfg->src_rect, 885 886 new_plane_state->fb->width, new_plane_state->fb->height, 886 887 new_plane_state->rotation); 887 888 888 - /* 889 - * We have 1 mixer pair cfg for 1:1:1 and 2:2:1 topology, 2 mixer pair 890 - * configs for left and right half screen in case of 4:4:2 topology. 891 - * But we may have 2 rect to split wide plane that exceeds limit with 1 892 - * config for 2:2:1. So need to handle both wide plane splitting, and 893 - * two halves of screen splitting for quad-pipe case. Check dest 894 - * rectangle left/right clipping first, then check wide rectangle 895 - * splitting in every half next. 896 - */ 897 - num_stages = (num_lm + 1) / 2; 898 - /* iterate mixer configs for this plane, to separate left/right with the id */ 899 - for (stage_id = 0; stage_id < num_stages; stage_id++) { 900 - struct drm_rect mixer_rect = { 901 - .x1 = stage_id * mode->hdisplay / num_stages, 902 - .y1 = 0, 903 - .x2 = (stage_id + 1) * mode->hdisplay / num_stages, 904 - .y2 = mode->vdisplay 905 - }; 906 - int cfg_idx = stage_id * PIPES_PER_STAGE; 907 - 908 - pipe_cfg = &pstate->pipe_cfg[cfg_idx]; 909 - r_pipe_cfg = &pstate->pipe_cfg[cfg_idx + 1]; 910 - 911 - drm_rect_fp_to_int(&pipe_cfg->src_rect, &new_plane_state->src); 912 - pipe_cfg->dst_rect = new_plane_state->dst; 913 - 914 - DPU_DEBUG_PLANE(pdpu, "checking src " DRM_RECT_FMT 915 - " vs clip window " DRM_RECT_FMT "\n", 916 - DRM_RECT_ARG(&pipe_cfg->src_rect), 917 - DRM_RECT_ARG(&mixer_rect)); 918 - 919 - /* 920 - * If this plane does not fall into mixer rect, check next 921 - * mixer rect. 922 - */ 923 - if (!drm_rect_clip_scaled(&pipe_cfg->src_rect, 924 - &pipe_cfg->dst_rect, 925 - &mixer_rect)) { 926 - memset(pipe_cfg, 0, 2 * sizeof(struct dpu_sw_pipe_cfg)); 927 - 928 - continue; 889 + if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) || 890 + _dpu_plane_calc_clk(&crtc_state->adjusted_mode, pipe_cfg) > max_mdp_clk_rate) { 891 + if (drm_rect_width(&pipe_cfg->src_rect) > 2 * max_linewidth) { 892 + DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n", 893 + DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth); 894 + return -E2BIG; 929 895 } 930 896 931 - pipe_cfg->dst_rect.x1 -= mixer_rect.x1; 932 - pipe_cfg->dst_rect.x2 -= mixer_rect.x1; 933 - 934 - DPU_DEBUG_PLANE(pdpu, "Got clip src:" DRM_RECT_FMT " dst: " DRM_RECT_FMT "\n", 935 - DRM_RECT_ARG(&pipe_cfg->src_rect), DRM_RECT_ARG(&pipe_cfg->dst_rect)); 936 - 937 - /* Split wide rect into 2 rect */ 938 - if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) || 939 - _dpu_plane_calc_clk(mode, pipe_cfg) > max_mdp_clk_rate) { 940 - 941 - if (drm_rect_width(&pipe_cfg->src_rect) > 2 * max_linewidth) { 942 - DPU_DEBUG_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n", 943 - DRM_RECT_ARG(&pipe_cfg->src_rect), max_linewidth); 944 - return -E2BIG; 945 - } 946 - 947 - memcpy(r_pipe_cfg, pipe_cfg, sizeof(struct dpu_sw_pipe_cfg)); 948 - pipe_cfg->src_rect.x2 = (pipe_cfg->src_rect.x1 + pipe_cfg->src_rect.x2) >> 1; 949 - pipe_cfg->dst_rect.x2 = (pipe_cfg->dst_rect.x1 + pipe_cfg->dst_rect.x2) >> 1; 950 - r_pipe_cfg->src_rect.x1 = pipe_cfg->src_rect.x2; 951 - r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2; 952 - DPU_DEBUG_PLANE(pdpu, "Split wide plane into:" 953 - DRM_RECT_FMT " and " DRM_RECT_FMT "\n", 954 - DRM_RECT_ARG(&pipe_cfg->src_rect), 955 - DRM_RECT_ARG(&r_pipe_cfg->src_rect)); 956 - } else { 957 - memset(r_pipe_cfg, 0, sizeof(struct dpu_sw_pipe_cfg)); 958 - } 959 - 960 - drm_rect_rotate_inv(&pipe_cfg->src_rect, 961 - new_plane_state->fb->width, 962 - new_plane_state->fb->height, 963 - new_plane_state->rotation); 964 - 965 - if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) 966 - drm_rect_rotate_inv(&r_pipe_cfg->src_rect, 967 - new_plane_state->fb->width, 968 - new_plane_state->fb->height, 969 - new_plane_state->rotation); 897 + *r_pipe_cfg = *pipe_cfg; 898 + pipe_cfg->src_rect.x2 = (pipe_cfg->src_rect.x1 + pipe_cfg->src_rect.x2) >> 1; 899 + pipe_cfg->dst_rect.x2 = (pipe_cfg->dst_rect.x1 + pipe_cfg->dst_rect.x2) >> 1; 900 + r_pipe_cfg->src_rect.x1 = pipe_cfg->src_rect.x2; 901 + r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2; 902 + } else { 903 + memset(r_pipe_cfg, 0, sizeof(*r_pipe_cfg)); 970 904 } 905 + 906 + drm_rect_rotate_inv(&pipe_cfg->src_rect, 907 + new_plane_state->fb->width, new_plane_state->fb->height, 908 + new_plane_state->rotation); 909 + if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) 910 + drm_rect_rotate_inv(&r_pipe_cfg->src_rect, 911 + new_plane_state->fb->width, new_plane_state->fb->height, 912 + new_plane_state->rotation); 971 913 972 914 pstate->needs_qos_remap = drm_atomic_crtc_needs_modeset(crtc_state); 973 915 ··· 985 1045 drm_atomic_get_new_plane_state(state, plane); 986 1046 struct dpu_plane *pdpu = to_dpu_plane(plane); 987 1047 struct dpu_plane_state *pstate = to_dpu_plane_state(new_plane_state); 988 - struct dpu_sw_pipe *pipe; 989 - struct dpu_sw_pipe_cfg *pipe_cfg; 990 - int ret = 0, i; 1048 + struct dpu_sw_pipe *pipe = &pstate->pipe[0]; 1049 + struct dpu_sw_pipe *r_pipe = &pstate->pipe[1]; 1050 + struct dpu_sw_pipe_cfg *pipe_cfg = &pstate->pipe_cfg[0]; 1051 + struct dpu_sw_pipe_cfg *r_pipe_cfg = &pstate->pipe_cfg[1]; 1052 + int ret = 0; 991 1053 992 - for (i = 0; i < PIPES_PER_PLANE; i++) { 993 - pipe = &pstate->pipe[i]; 994 - pipe_cfg = &pstate->pipe_cfg[i]; 995 - if (!drm_rect_width(&pipe_cfg->src_rect)) 996 - continue; 997 - DPU_DEBUG_PLANE(pdpu, "pipe %d is in use, validate it\n", i); 998 - ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, 1054 + ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, 1055 + &crtc_state->adjusted_mode, 1056 + new_plane_state); 1057 + if (ret) 1058 + return ret; 1059 + 1060 + if (drm_rect_width(&r_pipe_cfg->src_rect) != 0) { 1061 + ret = dpu_plane_atomic_check_pipe(pdpu, r_pipe, r_pipe_cfg, 999 1062 &crtc_state->adjusted_mode, 1000 1063 new_plane_state); 1001 1064 if (ret)
+5 -1
drivers/gpu/drm/msm/disp/mdp_format.h
··· 24 24 #define MSM_FORMAT_FLAG_UNPACK_TIGHT BIT(MSM_FORMAT_FLAG_UNPACK_TIGHT_BIT) 25 25 #define MSM_FORMAT_FLAG_UNPACK_ALIGN_MSB BIT(MSM_FORMAT_FLAG_UNPACK_ALIGN_MSB_BIT) 26 26 27 - /** 27 + /* 28 28 * DPU HW,Component order color map 29 29 */ 30 30 enum { ··· 37 37 /** 38 38 * struct msm_format: defines the format configuration 39 39 * @pixel_format: format fourcc 40 + * @bpc_g_y: element bit widths: BPC for G or Y 41 + * @bpc_b_cb: element bit widths: BPC for B or Cb 42 + * @bpc_r_cr: element bit widths: BPC for R or Cr 43 + * @bpc_a: element bit widths: BPC for the alpha channel 40 44 * @element: element color ordering 41 45 * @fetch_type: how the color components are packed in pixel format 42 46 * @chroma_sample: chroma sub-samplng type
+1 -1
drivers/gpu/drm/msm/dp/dp_debug.h
··· 12 12 #if defined(CONFIG_DEBUG_FS) 13 13 14 14 /** 15 - * msm_dp_debug_get() - configure and get the DisplayPlot debug module data 15 + * msm_dp_debug_init() - configure and get the DisplayPlot debug module data 16 16 * 17 17 * @dev: device instance of the caller 18 18 * @panel: instance of panel module
+1
drivers/gpu/drm/msm/dp/dp_drm.c
··· 18 18 /** 19 19 * msm_dp_bridge_detect - callback to determine if connector is connected 20 20 * @bridge: Pointer to drm bridge structure 21 + * @connector: Pointer to drm connector structure 21 22 * Returns: Bridge's 'is connected' status 22 23 */ 23 24 static enum drm_connector_status
+5 -4
drivers/gpu/drm/msm/dp/dp_link.h
··· 80 80 }; 81 81 82 82 /** 83 - * mdss_dp_test_bit_depth_to_bpp() - convert test bit depth to bpp 83 + * msm_dp_link_bit_depth_to_bpp() - convert test bit depth to bpp 84 84 * @tbd: test bit depth 85 85 * 86 - * Returns the bits per pixel (bpp) to be used corresponding to the 87 - * git bit depth value. This function assumes that bit depth has 86 + * Returns: the bits per pixel (bpp) to be used corresponding to the 87 + * bit depth value. This function assumes that bit depth has 88 88 * already been validated. 89 89 */ 90 90 static inline u32 msm_dp_link_bit_depth_to_bpp(u32 tbd) ··· 120 120 121 121 /** 122 122 * msm_dp_link_get() - get the functionalities of dp test module 123 - * 123 + * @dev: kernel device structure 124 + * @aux: DisplayPort AUX channel 124 125 * 125 126 * return: a pointer to msm_dp_link struct 126 127 */
+4 -4
drivers/gpu/drm/msm/dp/dp_panel.h
··· 63 63 64 64 /** 65 65 * is_link_rate_valid() - validates the link rate 66 - * @lane_rate: link rate requested by the sink 66 + * @bw_code: link rate requested by the sink 67 67 * 68 - * Returns true if the requested link rate is supported. 68 + * Returns: true if the requested link rate is supported. 69 69 */ 70 70 static inline bool is_link_rate_valid(u32 bw_code) 71 71 { ··· 76 76 } 77 77 78 78 /** 79 - * msm_dp_link_is_lane_count_valid() - validates the lane count 79 + * is_lane_count_valid() - validates the lane count 80 80 * @lane_count: lane count requested by the sink 81 81 * 82 - * Returns true if the requested lane count is supported. 82 + * Returns: true if the requested lane count is supported. 83 83 */ 84 84 static inline bool is_lane_count_valid(u32 lane_count) 85 85 {
+19 -17
drivers/gpu/drm/msm/msm_fence.h
··· 16 16 * incrementing fence seqno at the end of each submit 17 17 */ 18 18 struct msm_fence_context { 19 + /** @dev: the drm device */ 19 20 struct drm_device *dev; 20 - /** name: human readable name for fence timeline */ 21 + /** @name: human readable name for fence timeline */ 21 22 char name[32]; 22 - /** context: see dma_fence_context_alloc() */ 23 + /** @context: see dma_fence_context_alloc() */ 23 24 unsigned context; 24 - /** index: similar to context, but local to msm_fence_context's */ 25 + /** @index: similar to context, but local to msm_fence_context's */ 25 26 unsigned index; 26 - 27 27 /** 28 - * last_fence: 29 - * 28 + * @last_fence: 30 29 * Last assigned fence, incremented each time a fence is created 31 30 * on this fence context. If last_fence == completed_fence, 32 31 * there is no remaining pending work 33 32 */ 34 33 uint32_t last_fence; 35 - 36 34 /** 37 - * completed_fence: 38 - * 35 + * @completed_fence: 39 36 * The last completed fence, updated from the CPU after interrupt 40 37 * from GPU 41 38 */ 42 39 uint32_t completed_fence; 43 - 44 40 /** 45 - * fenceptr: 46 - * 41 + * @fenceptr: 47 42 * The address that the GPU directly writes with completed fence 48 43 * seqno. This can be ahead of completed_fence. We can peek at 49 44 * this to see if a fence has already signaled but the CPU hasn't ··· 46 51 */ 47 52 volatile uint32_t *fenceptr; 48 53 54 + /** 55 + * @spinlock: fence context spinlock 56 + */ 49 57 spinlock_t spinlock; 50 58 51 59 /* ··· 57 59 * don't queue, so maybe that is ok 58 60 */ 59 61 60 - /** next_deadline: Time of next deadline */ 62 + /** @next_deadline: Time of next deadline */ 61 63 ktime_t next_deadline; 62 - 63 64 /** 64 - * next_deadline_fence: 65 - * 65 + * @next_deadline_fence: 66 66 * Fence value for next pending deadline. The deadline timer is 67 67 * canceled when this fence is signaled. 68 68 */ 69 69 uint32_t next_deadline_fence; 70 - 70 + /** 71 + * @deadline_timer: tracks nearest deadline of a fence timeline and 72 + * expires just before it. 73 + */ 71 74 struct hrtimer deadline_timer; 75 + /** 76 + * @deadline_work: work to do after deadline_timer expires 77 + */ 72 78 struct kthread_work deadline_work; 73 79 }; 74 80
+4 -1
drivers/gpu/drm/msm/msm_gem_vma.c
··· 65 65 }; 66 66 67 67 /** 68 - * struct msm_vma_op - A MAP or UNMAP operation 68 + * struct msm_vm_op - A MAP or UNMAP operation 69 69 */ 70 70 struct msm_vm_op { 71 71 /** @op: The operation type */ ··· 798 798 * synchronous operations are supported. In a user managed VM, userspace 799 799 * handles virtual address allocation, and both async and sync operations 800 800 * are supported. 801 + * 802 + * Returns: pointer to the created &struct drm_gpuvm on success 803 + * or an ERR_PTR(-errno) on failure. 801 804 */ 802 805 struct drm_gpuvm * 803 806 msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name,
+18 -50
drivers/gpu/drm/msm/msm_gpu.h
··· 116 116 * struct msm_gpu_devfreq - devfreq related state 117 117 */ 118 118 struct msm_gpu_devfreq { 119 - /** devfreq: devfreq instance */ 119 + /** @devfreq: devfreq instance */ 120 120 struct devfreq *devfreq; 121 - 122 - /** lock: lock for "suspended", "busy_cycles", and "time" */ 121 + /** @lock: lock for "suspended", "busy_cycles", and "time" */ 123 122 struct mutex lock; 124 - 125 123 /** 126 - * idle_freq: 127 - * 124 + * @idle_freq: 128 125 * Shadow frequency used while the GPU is idle. From the PoV of 129 126 * the devfreq governor, we are continuing to sample busyness and 130 127 * adjust frequency while the GPU is idle, but we use this shadow ··· 129 132 * it is inactive. 130 133 */ 131 134 unsigned long idle_freq; 132 - 133 135 /** 134 - * boost_constraint: 135 - * 136 + * @boost_freq: 136 137 * A PM QoS constraint to boost min freq for a period of time 137 138 * until the boost expires. 138 139 */ 139 140 struct dev_pm_qos_request boost_freq; 140 - 141 141 /** 142 - * busy_cycles: Last busy counter value, for calculating elapsed busy 142 + * @busy_cycles: Last busy counter value, for calculating elapsed busy 143 143 * cycles since last sampling period. 144 144 */ 145 145 u64 busy_cycles; 146 - 147 - /** time: Time of last sampling period. */ 146 + /** @time: Time of last sampling period. */ 148 147 ktime_t time; 149 - 150 - /** idle_time: Time of last transition to idle: */ 148 + /** @idle_time: Time of last transition to idle. */ 151 149 ktime_t idle_time; 152 - 153 150 /** 154 - * idle_work: 155 - * 151 + * @idle_work: 156 152 * Used to delay clamping to idle freq on active->idle transition. 157 153 */ 158 154 struct msm_hrtimer_work idle_work; 159 - 160 155 /** 161 - * boost_work: 162 - * 156 + * @boost_work: 163 157 * Used to reset the boost_constraint after the boost period has 164 158 * elapsed 165 159 */ 166 160 struct msm_hrtimer_work boost_work; 167 161 168 - /** suspended: tracks if we're suspended */ 162 + /** @suspended: tracks if we're suspended */ 169 163 bool suspended; 170 164 }; 171 165 ··· 346 358 struct msm_context { 347 359 /** @queuelock: synchronizes access to submitqueues list */ 348 360 rwlock_t queuelock; 349 - 350 361 /** @submitqueues: list of &msm_gpu_submitqueue created by userspace */ 351 362 struct list_head submitqueues; 352 - 353 363 /** 354 364 * @queueid: 355 - * 356 365 * Counter incremented each time a submitqueue is created, used to 357 366 * assign &msm_gpu_submitqueue.id 358 367 */ 359 368 int queueid; 360 - 361 369 /** 362 370 * @closed: The device file associated with this context has been closed. 363 - * 364 371 * Once the device is closed, any submits that have not been written 365 372 * to the ring buffer are no-op'd. 366 373 */ 367 374 bool closed; 368 - 369 375 /** 370 376 * @userspace_managed_vm: 371 - * 372 377 * Has userspace opted-in to userspace managed VM (ie. VM_BIND) via 373 378 * MSM_PARAM_EN_VM_BIND? 374 379 */ 375 380 bool userspace_managed_vm; 376 - 377 381 /** 378 382 * @vm: 379 - * 380 383 * The per-process GPU address-space. Do not access directly, use 381 384 * msm_context_vm(). 382 385 */ 383 386 struct drm_gpuvm *vm; 384 - 385 - /** @kref: the reference count */ 387 + /** @ref: the reference count */ 386 388 struct kref ref; 387 - 388 389 /** 389 390 * @seqno: 390 - * 391 391 * A unique per-process sequence number. Used to detect context 392 392 * switches, without relying on keeping a, potentially dangling, 393 393 * pointer to the previous context. 394 394 */ 395 395 int seqno; 396 - 397 396 /** 398 397 * @sysprof: 399 - * 400 398 * The value of MSM_PARAM_SYSPROF set by userspace. This is 401 399 * intended to be used by system profiling tools like Mesa's 402 400 * pps-producer (perfetto), and restricted to CAP_SYS_ADMIN. ··· 397 423 * file is closed. 398 424 */ 399 425 int sysprof; 400 - 401 426 /** 402 427 * @comm: Overridden task comm, see MSM_PARAM_COMM 403 428 * 404 429 * Accessed under msm_gpu::lock 405 430 */ 406 431 char *comm; 407 - 408 432 /** 409 433 * @cmdline: Overridden task cmdline, see MSM_PARAM_CMDLINE 410 434 * 411 435 * Accessed under msm_gpu::lock 412 436 */ 413 437 char *cmdline; 414 - 415 438 /** 416 - * @elapsed: 417 - * 439 + * @elapsed_ns: 418 440 * The total (cumulative) elapsed time GPU was busy with rendering 419 441 * from this context in ns. 420 442 */ 421 443 uint64_t elapsed_ns; 422 - 423 444 /** 424 445 * @cycles: 425 - * 426 446 * The total (cumulative) GPU cycles elapsed attributed to this 427 447 * context. 428 448 */ 429 449 uint64_t cycles; 430 - 431 450 /** 432 451 * @entities: 433 - * 434 452 * Table of per-priority-level sched entities used by submitqueues 435 453 * associated with this &drm_file. Because some userspace apps 436 454 * make assumptions about rendering from multiple gl contexts ··· 432 466 * level. 433 467 */ 434 468 struct drm_sched_entity *entities[NR_SCHED_PRIORITIES * MSM_GPU_MAX_RINGS]; 435 - 436 469 /** 437 470 * @ctx_mem: 438 - * 439 471 * Total amount of memory of GEM buffers with handles attached for 440 472 * this context. 441 473 */ ··· 443 479 struct drm_gpuvm *msm_context_vm(struct drm_device *dev, struct msm_context *ctx); 444 480 445 481 /** 446 - * msm_context_is_vm_bind() - has userspace opted in to VM_BIND? 482 + * msm_context_is_vmbind() - has userspace opted in to VM_BIND? 447 483 * 448 484 * @ctx: the drm_file context 449 485 * ··· 451 487 * do sparse binding including having multiple, potentially partial, 452 488 * mappings in the VM. Therefore certain legacy uabi (ie. GET_IOVA, 453 489 * SET_IOVA) are rejected because they don't have a sensible meaning. 490 + * 491 + * Returns: %true if userspace is managing the VM, %false otherwise. 454 492 */ 455 493 static inline bool 456 494 msm_context_is_vmbind(struct msm_context *ctx) ··· 484 518 * This allows generations without preemption (nr_rings==1) to have some 485 519 * amount of prioritization, and provides more priority levels for gens 486 520 * that do have preemption. 521 + * 522 + * Returns: %0 on success, %-errno on error. 487 523 */ 488 524 static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio, 489 525 unsigned *ring_nr, enum drm_sched_priority *sched_prio) ··· 509 541 } 510 542 511 543 /** 512 - * struct msm_gpu_submitqueues - Userspace created context. 544 + * struct msm_gpu_submitqueue - Userspace created context. 513 545 * 514 546 * A submitqueue is associated with a gl context or vk queue (or equiv) 515 547 * in userspace.
+2 -2
drivers/gpu/drm/msm/msm_iommu.c
··· 364 364 } 365 365 366 366 /** 367 - * alloc_pt() - Custom page table allocator 367 + * msm_iommu_pagetable_alloc_pt() - Custom page table allocator 368 368 * @cookie: Cookie passed at page table allocation time. 369 369 * @size: Size of the page table. This size should be fixed, 370 370 * and determined at creation time based on the granule size. ··· 416 416 417 417 418 418 /** 419 - * free_pt() - Custom page table free function 419 + * msm_iommu_pagetable_free_pt() - Custom page table free function 420 420 * @cookie: Cookie passed at page table allocation time. 421 421 * @data: Page table to free. 422 422 * @size: Size of the page table. This size should be fixed,
+5 -5
drivers/gpu/drm/msm/msm_perf.c
··· 65 65 66 66 if ((perf->cnt++ % 32) == 0) { 67 67 /* Header line: */ 68 - n = snprintf(ptr, rem, "%%BUSY"); 68 + n = scnprintf(ptr, rem, "%%BUSY"); 69 69 ptr += n; 70 70 rem -= n; 71 71 72 72 for (i = 0; i < gpu->num_perfcntrs; i++) { 73 73 const struct msm_gpu_perfcntr *perfcntr = &gpu->perfcntrs[i]; 74 - n = snprintf(ptr, rem, "\t%s", perfcntr->name); 74 + n = scnprintf(ptr, rem, "\t%s", perfcntr->name); 75 75 ptr += n; 76 76 rem -= n; 77 77 } ··· 93 93 return ret; 94 94 95 95 val = totaltime ? 1000 * activetime / totaltime : 0; 96 - n = snprintf(ptr, rem, "%3d.%d%%", val / 10, val % 10); 96 + n = scnprintf(ptr, rem, "%3d.%d%%", val / 10, val % 10); 97 97 ptr += n; 98 98 rem -= n; 99 99 100 100 for (i = 0; i < ret; i++) { 101 101 /* cycle counters (I think).. convert to MHz.. */ 102 102 val = cntrs[i] / 10000; 103 - n = snprintf(ptr, rem, "\t%5d.%02d", 103 + n = scnprintf(ptr, rem, "\t%5d.%02d", 104 104 val / 100, val % 100); 105 105 ptr += n; 106 106 rem -= n; 107 107 } 108 108 } 109 109 110 - n = snprintf(ptr, rem, "\n"); 110 + n = scnprintf(ptr, rem, "\n"); 111 111 ptr += n; 112 112 rem -= n; 113 113