Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge https://gitlab.freedesktop.org/drm/msm into drm-next

On the display side, cleanups and fixes to enabled modifiers
(QCOM_COMPRESSED). And otherwise mostly misc fixes all around.

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Rob Clark <robdclark@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/CAF6AEGuZ5uBKpf=fHvKpTiD10nychuEY8rnE+HeRz0QMvtY5_A@mail.gmail.com

+399 -873
+59
Documentation/devicetree/bindings/display/msm/gmu.txt
··· 1 + Qualcomm adreno/snapdragon GMU (Graphics management unit) 2 + 3 + The GMU is a programmable power controller for the GPU. the CPU controls the 4 + GMU which in turn handles power controls for the GPU. 5 + 6 + Required properties: 7 + - compatible: "qcom,adreno-gmu-XYZ.W", "qcom,adreno-gmu" 8 + for example: "qcom,adreno-gmu-630.2", "qcom,adreno-gmu" 9 + Note that you need to list the less specific "qcom,adreno-gmu" 10 + for generic matches and the more specific identifier to identify 11 + the specific device. 12 + - reg: Physical base address and length of the GMU registers. 13 + - reg-names: Matching names for the register regions 14 + * "gmu" 15 + * "gmu_pdc" 16 + * "gmu_pdc_seg" 17 + - interrupts: The interrupt signals from the GMU. 18 + - interrupt-names: Matching names for the interrupts 19 + * "hfi" 20 + * "gmu" 21 + - clocks: phandles to the device clocks 22 + - clock-names: Matching names for the clocks 23 + * "gmu" 24 + * "cxo" 25 + * "axi" 26 + * "mnoc" 27 + - power-domains: should be <&clock_gpucc GPU_CX_GDSC> 28 + - iommus: phandle to the adreno iommu 29 + - operating-points-v2: phandle to the OPP operating points 30 + 31 + Example: 32 + 33 + / { 34 + ... 35 + 36 + gmu: gmu@506a000 { 37 + compatible="qcom,adreno-gmu-630.2", "qcom,adreno-gmu"; 38 + 39 + reg = <0x506a000 0x30000>, 40 + <0xb280000 0x10000>, 41 + <0xb480000 0x10000>; 42 + reg-names = "gmu", "gmu_pdc", "gmu_pdc_seq"; 43 + 44 + interrupts = <GIC_SPI 304 IRQ_TYPE_LEVEL_HIGH>, 45 + <GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>; 46 + interrupt-names = "hfi", "gmu"; 47 + 48 + clocks = <&gpucc GPU_CC_CX_GMU_CLK>, 49 + <&gpucc GPU_CC_CXO_CLK>, 50 + <&gcc GCC_DDRSS_GPU_AXI_CLK>, 51 + <&gcc GCC_GPU_MEMNOC_GFX_CLK>; 52 + clock-names = "gmu", "cxo", "axi", "memnoc"; 53 + 54 + power-domains = <&gpucc GPU_CX_GDSC>; 55 + iommus = <&adreno_smmu 5>; 56 + 57 + operating-points-v2 = <&gmu_opp_table>; 58 + }; 59 + };
+39 -3
Documentation/devicetree/bindings/display/msm/gpu.txt
··· 10 10 If "amd,imageon" is used, there should be no top level msm device. 11 11 - reg: Physical base address and length of the controller's registers. 12 12 - interrupts: The interrupt signal from the gpu. 13 - - clocks: device clocks 13 + - clocks: device clocks (if applicable) 14 14 See ../clocks/clock-bindings.txt for details. 15 - - clock-names: the following clocks are required: 15 + - clock-names: the following clocks are required by a3xx, a4xx and a5xx 16 + cores: 16 17 * "core" 17 18 * "iface" 18 19 * "mem_iface" 20 + For GMU attached devices the GPU clocks are not used and are not required. The 21 + following devices should not list clocks: 22 + - qcom,adreno-630.2 23 + - iommus: optional phandle to an adreno iommu instance 24 + - operating-points-v2: optional phandle to the OPP operating points 25 + - qcom,gmu: For GMU attached devices a phandle to the GMU device that will 26 + control the power for the GPU. Applicable targets: 27 + - qcom,adreno-630.2 19 28 20 - Example: 29 + Example 3xx/4xx/a5xx: 21 30 22 31 / { 23 32 ... ··· 44 35 <&mmcc GFX3D_CLK>, 45 36 <&mmcc GFX3D_AHB_CLK>, 46 37 <&mmcc MMSS_IMEM_AHB_CLK>; 38 + }; 39 + }; 40 + 41 + Example a6xx (with GMU): 42 + 43 + / { 44 + ... 45 + 46 + gpu@5000000 { 47 + compatible = "qcom,adreno-630.2", "qcom,adreno"; 48 + #stream-id-cells = <16>; 49 + 50 + reg = <0x5000000 0x40000>, <0x509e000 0x10>; 51 + reg-names = "kgsl_3d0_reg_memory", "cx_mem"; 52 + 53 + /* 54 + * Look ma, no clocks! The GPU clocks and power are 55 + * controlled entirely by the GMU 56 + */ 57 + 58 + interrupts = <GIC_SPI 300 IRQ_TYPE_LEVEL_HIGH>; 59 + 60 + iommus = <&adreno_smmu 0>; 61 + 62 + operating-points-v2 = <&gpu_opp_table>; 63 + 64 + qcom,gmu = <&gmu>; 47 65 }; 48 66 };
+2 -1
MAINTAINERS
··· 4851 4851 4852 4852 DRM DRIVER FOR MSM ADRENO GPU 4853 4853 M: Rob Clark <robdclark@gmail.com> 4854 + M: Sean Paul <sean@poorly.run> 4854 4855 L: linux-arm-msm@vger.kernel.org 4855 4856 L: dri-devel@lists.freedesktop.org 4856 4857 L: freedreno@lists.freedesktop.org 4857 - T: git git://people.freedesktop.org/~robclark/linux 4858 + T: git https://gitlab.freedesktop.org/drm/msm.git 4858 4859 S: Maintained 4859 4860 F: drivers/gpu/drm/msm/ 4860 4861 F: include/uapi/drm/msm_drm.h
+2 -7
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
··· 465 465 return; 466 466 } 467 467 468 - mixer->encoder = enc; 469 - 470 468 cstate->num_mixers++; 471 469 DPU_DEBUG("setup mixer %d: lm %d\n", 472 470 i, mixer->hw_lm->idx - LM_0); ··· 716 718 * may delay and flush at an irq event (e.g. ppdone) 717 719 */ 718 720 drm_for_each_encoder_mask(encoder, crtc->dev, 719 - crtc->state->encoder_mask) { 720 - struct dpu_encoder_kickoff_params params = { 0 }; 721 - dpu_encoder_prepare_for_kickoff(encoder, &params, async); 722 - } 723 - 721 + crtc->state->encoder_mask) 722 + dpu_encoder_prepare_for_kickoff(encoder, async); 724 723 725 724 if (!async) { 726 725 /* wait for frame_event_done completion */
-2
drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
··· 84 84 * struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC 85 85 * @hw_lm: LM HW Driver context 86 86 * @lm_ctl: CTL Path HW driver context 87 - * @encoder: Encoder attached to this lm & ctl 88 87 * @mixer_op_mode: mixer blending operation mode 89 88 * @flush_mask: mixer flush mask for ctl, mixer and pipe 90 89 */ 91 90 struct dpu_crtc_mixer { 92 91 struct dpu_hw_mixer *hw_lm; 93 92 struct dpu_hw_ctl *lm_ctl; 94 - struct drm_encoder *encoder; 95 93 u32 mixer_op_mode; 96 94 u32 flush_mask; 97 95 };
+10 -22
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
··· 205 205 bool idle_pc_supported; 206 206 struct mutex rc_lock; 207 207 enum dpu_enc_rc_states rc_state; 208 - struct kthread_delayed_work delayed_off_work; 208 + struct delayed_work delayed_off_work; 209 209 struct kthread_work vsync_event_work; 210 210 struct msm_display_topology topology; 211 211 bool mode_set_complete; ··· 742 742 { 743 743 struct dpu_encoder_virt *dpu_enc; 744 744 struct msm_drm_private *priv; 745 - struct msm_drm_thread *disp_thread; 746 745 bool is_vid_mode = false; 747 746 748 747 if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private || ··· 753 754 priv = drm_enc->dev->dev_private; 754 755 is_vid_mode = dpu_enc->disp_info.capabilities & 755 756 MSM_DISPLAY_CAP_VID_MODE; 756 - 757 - if (drm_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) { 758 - DPU_ERROR("invalid crtc index\n"); 759 - return -EINVAL; 760 - } 761 - disp_thread = &priv->disp_thread[drm_enc->crtc->index]; 762 757 763 758 /* 764 759 * when idle_pc is not supported, process only KICKOFF, STOP and MODESET ··· 770 777 switch (sw_event) { 771 778 case DPU_ENC_RC_EVENT_KICKOFF: 772 779 /* cancel delayed off work, if any */ 773 - if (kthread_cancel_delayed_work_sync( 774 - &dpu_enc->delayed_off_work)) 780 + if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work)) 775 781 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n", 776 782 sw_event); 777 783 ··· 829 837 return 0; 830 838 } 831 839 832 - kthread_queue_delayed_work( 833 - &disp_thread->worker, 834 - &dpu_enc->delayed_off_work, 835 - msecs_to_jiffies(dpu_enc->idle_timeout)); 840 + queue_delayed_work(priv->wq, &dpu_enc->delayed_off_work, 841 + msecs_to_jiffies(dpu_enc->idle_timeout)); 836 842 837 843 trace_dpu_enc_rc(DRMID(drm_enc), sw_event, 838 844 dpu_enc->idle_pc_supported, dpu_enc->rc_state, ··· 839 849 840 850 case DPU_ENC_RC_EVENT_PRE_STOP: 841 851 /* cancel delayed off work, if any */ 842 - if (kthread_cancel_delayed_work_sync( 843 - &dpu_enc->delayed_off_work)) 852 + if (cancel_delayed_work_sync(&dpu_enc->delayed_off_work)) 844 853 DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n", 845 854 sw_event); 846 855 ··· 1357 1368 } 1358 1369 } 1359 1370 1360 - static void dpu_encoder_off_work(struct kthread_work *work) 1371 + static void dpu_encoder_off_work(struct work_struct *work) 1361 1372 { 1362 1373 struct dpu_encoder_virt *dpu_enc = container_of(work, 1363 1374 struct dpu_encoder_virt, delayed_off_work.work); ··· 1745 1756 nsecs_to_jiffies(ktime_to_ns(wakeup_time))); 1746 1757 } 1747 1758 1748 - void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc, 1749 - struct dpu_encoder_kickoff_params *params, bool async) 1759 + void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc, bool async) 1750 1760 { 1751 1761 struct dpu_encoder_virt *dpu_enc; 1752 1762 struct dpu_encoder_phys *phys; 1753 1763 bool needs_hw_reset = false; 1754 1764 unsigned int i; 1755 1765 1756 - if (!drm_enc || !params) { 1766 + if (!drm_enc) { 1757 1767 DPU_ERROR("invalid args\n"); 1758 1768 return; 1759 1769 } ··· 1766 1778 phys = dpu_enc->phys_encs[i]; 1767 1779 if (phys) { 1768 1780 if (phys->ops.prepare_for_kickoff) 1769 - phys->ops.prepare_for_kickoff(phys, params); 1781 + phys->ops.prepare_for_kickoff(phys); 1770 1782 if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET) 1771 1783 needs_hw_reset = true; 1772 1784 } ··· 2181 2193 2182 2194 2183 2195 mutex_init(&dpu_enc->rc_lock); 2184 - kthread_init_delayed_work(&dpu_enc->delayed_off_work, 2196 + INIT_DELAYED_WORK(&dpu_enc->delayed_off_work, 2185 2197 dpu_encoder_off_work); 2186 2198 dpu_enc->idle_timeout = IDLE_TIMEOUT; 2187 2199
+1 -12
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
··· 38 38 }; 39 39 40 40 /** 41 - * dpu_encoder_kickoff_params - info encoder requires at kickoff 42 - * @affected_displays: bitmask, bit set means the ROI of the commit lies within 43 - * the bounds of the physical display at the bit index 44 - */ 45 - struct dpu_encoder_kickoff_params { 46 - unsigned long affected_displays; 47 - }; 48 - 49 - /** 50 41 * dpu_encoder_get_hw_resources - Populate table of required hardware resources 51 42 * @encoder: encoder pointer 52 43 * @hw_res: resource table to populate with encoder required resources ··· 79 88 * Immediately: if no previous commit is outstanding. 80 89 * Delayed: Block until next trigger can be issued. 81 90 * @encoder: encoder pointer 82 - * @params: kickoff time parameters 83 91 * @async: true if this is an asynchronous commit 84 92 */ 85 - void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder, 86 - struct dpu_encoder_kickoff_params *params, bool async); 93 + void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder, bool async); 87 94 88 95 /** 89 96 * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
+1 -2
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
··· 144 144 int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc); 145 145 int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc); 146 146 int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc); 147 - void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc, 148 - struct dpu_encoder_kickoff_params *params); 147 + void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc); 149 148 void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc); 150 149 void (*trigger_start)(struct dpu_encoder_phys *phys_enc); 151 150 bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
+2 -3
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
··· 594 594 } 595 595 596 596 static void dpu_encoder_phys_cmd_prepare_for_kickoff( 597 - struct dpu_encoder_phys *phys_enc, 598 - struct dpu_encoder_kickoff_params *params) 597 + struct dpu_encoder_phys *phys_enc) 599 598 { 600 599 struct dpu_encoder_phys_cmd *cmd_enc = 601 600 to_dpu_encoder_phys_cmd(phys_enc); ··· 692 693 693 694 /* required for both controllers */ 694 695 if (!rc && cmd_enc->serialize_wait4pp) 695 - dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL); 696 + dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc); 696 697 697 698 return rc; 698 699 }
+2 -3
drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
··· 587 587 } 588 588 589 589 static void dpu_encoder_phys_vid_prepare_for_kickoff( 590 - struct dpu_encoder_phys *phys_enc, 591 - struct dpu_encoder_kickoff_params *params) 590 + struct dpu_encoder_phys *phys_enc) 592 591 { 593 592 struct dpu_encoder_phys_vid *vid_enc; 594 593 struct dpu_hw_ctl *ctl; 595 594 int rc; 596 595 597 - if (!phys_enc || !params) { 596 + if (!phys_enc) { 598 597 DPU_ERROR("invalid encoder/parameters\n"); 599 598 return; 600 599 }
+2 -35
drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
··· 263 263 264 264 INTERLEAVED_RGB_FMT(RGB565, 265 265 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, 266 - C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3, 266 + C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, 267 267 false, 2, 0, 268 268 DPU_FETCH_LINEAR, 1), 269 269 270 270 INTERLEAVED_RGB_FMT(BGR565, 271 271 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT, 272 - C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3, 272 + C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3, 273 273 false, 2, 0, 274 274 DPU_FETCH_LINEAR, 1), 275 275 ··· 1136 1136 if (fmt) 1137 1137 return &fmt->base; 1138 1138 return NULL; 1139 - } 1140 - 1141 - uint32_t dpu_populate_formats( 1142 - const struct dpu_format_extended *format_list, 1143 - uint32_t *pixel_formats, 1144 - uint64_t *pixel_modifiers, 1145 - uint32_t pixel_formats_max) 1146 - { 1147 - uint32_t i, fourcc_format; 1148 - 1149 - if (!format_list || !pixel_formats) 1150 - return 0; 1151 - 1152 - for (i = 0, fourcc_format = 0; 1153 - format_list->fourcc_format && i < pixel_formats_max; 1154 - ++format_list) { 1155 - /* verify if listed format is in dpu_format_map? */ 1156 - 1157 - /* optionally return modified formats */ 1158 - if (pixel_modifiers) { 1159 - /* assume same modifier for all fb planes */ 1160 - pixel_formats[i] = format_list->fourcc_format; 1161 - pixel_modifiers[i++] = format_list->modifier; 1162 - } else { 1163 - /* assume base formats grouped together */ 1164 - if (fourcc_format != format_list->fourcc_format) { 1165 - fourcc_format = format_list->fourcc_format; 1166 - pixel_formats[i++] = fourcc_format; 1167 - } 1168 - } 1169 - } 1170 - 1171 - return i; 1172 1139 }
-14
drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
··· 41 41 const uint64_t modifiers); 42 42 43 43 /** 44 - * dpu_populate_formats - populate the given array with fourcc codes supported 45 - * @format_list: pointer to list of possible formats 46 - * @pixel_formats: array to populate with fourcc codes 47 - * @pixel_modifiers: array to populate with drm modifiers, can be NULL 48 - * @pixel_formats_max: length of pixel formats array 49 - * Return: number of elements populated 50 - */ 51 - uint32_t dpu_populate_formats( 52 - const struct dpu_format_extended *format_list, 53 - uint32_t *pixel_formats, 54 - uint64_t *pixel_modifiers, 55 - uint32_t pixel_formats_max); 56 - 57 - /** 58 44 * dpu_format_check_modified_format - validate format and buffers for 59 45 * dpu non-standard, i.e. modified format 60 46 * @kms: kms driver
+4
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
··· 151 151 .id = DPU_SSPP_CSC_10BIT, \ 152 152 .base = 0x1a00, .len = 0x100,}, \ 153 153 .format_list = plane_formats_yuv, \ 154 + .num_formats = ARRAY_SIZE(plane_formats_yuv), \ 154 155 .virt_format_list = plane_formats, \ 156 + .virt_num_formats = ARRAY_SIZE(plane_formats), \ 155 157 } 156 158 157 159 #define _DMA_SBLK(num, sdma_pri) \ ··· 165 163 .src_blk = {.name = STRCAT("sspp_src_", num), \ 166 164 .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \ 167 165 .format_list = plane_formats, \ 166 + .num_formats = ARRAY_SIZE(plane_formats), \ 168 167 .virt_format_list = plane_formats, \ 168 + .virt_num_formats = ARRAY_SIZE(plane_formats), \ 169 169 } 170 170 171 171 static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = _VIG_SBLK("0", 5);
+6 -13
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
··· 252 252 }; 253 253 254 254 /** 255 - * struct dpu_format_extended - define dpu specific pixel format+modifier 256 - * @fourcc_format: Base FOURCC pixel format code 257 - * @modifier: 64-bit drm format modifier, same modifier must be applied to all 258 - * framebuffer planes 259 - */ 260 - struct dpu_format_extended { 261 - uint32_t fourcc_format; 262 - uint64_t modifier; 263 - }; 264 - 265 - /** 266 255 * enum dpu_qos_lut_usage - define QoS LUT use cases 267 256 */ 268 257 enum dpu_qos_lut_usage { ··· 337 348 * @pcc_blk: 338 349 * @igc_blk: 339 350 * @format_list: Pointer to list of supported formats 351 + * @num_formats: Number of supported formats 340 352 * @virt_format_list: Pointer to list of supported formats for virtual planes 353 + * @virt_num_formats: Number of supported formats for virtual planes 341 354 */ 342 355 struct dpu_sspp_sub_blks { 343 356 const struct dpu_sspp_blks_common *common; ··· 357 366 struct dpu_pp_blk pcc_blk; 358 367 struct dpu_pp_blk igc_blk; 359 368 360 - const struct dpu_format_extended *format_list; 361 - const struct dpu_format_extended *virt_format_list; 369 + const u32 *format_list; 370 + u32 num_formats; 371 + const u32 *virt_format_list; 372 + u32 virt_num_formats; 362 373 }; 363 374 364 375 /**
+72 -148
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
··· 12 12 13 13 #include "dpu_hw_mdss.h" 14 14 15 - static const struct dpu_format_extended plane_formats[] = { 16 - {DRM_FORMAT_ARGB8888, 0}, 17 - {DRM_FORMAT_ABGR8888, 0}, 18 - {DRM_FORMAT_RGBA8888, 0}, 19 - {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, 20 - {DRM_FORMAT_BGRA8888, 0}, 21 - {DRM_FORMAT_XRGB8888, 0}, 22 - {DRM_FORMAT_RGBX8888, 0}, 23 - {DRM_FORMAT_BGRX8888, 0}, 24 - {DRM_FORMAT_XBGR8888, 0}, 25 - {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, 26 - {DRM_FORMAT_RGB888, 0}, 27 - {DRM_FORMAT_BGR888, 0}, 28 - {DRM_FORMAT_RGB565, 0}, 29 - {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED}, 30 - {DRM_FORMAT_BGR565, 0}, 31 - {DRM_FORMAT_ARGB1555, 0}, 32 - {DRM_FORMAT_ABGR1555, 0}, 33 - {DRM_FORMAT_RGBA5551, 0}, 34 - {DRM_FORMAT_BGRA5551, 0}, 35 - {DRM_FORMAT_XRGB1555, 0}, 36 - {DRM_FORMAT_XBGR1555, 0}, 37 - {DRM_FORMAT_RGBX5551, 0}, 38 - {DRM_FORMAT_BGRX5551, 0}, 39 - {DRM_FORMAT_ARGB4444, 0}, 40 - {DRM_FORMAT_ABGR4444, 0}, 41 - {DRM_FORMAT_RGBA4444, 0}, 42 - {DRM_FORMAT_BGRA4444, 0}, 43 - {DRM_FORMAT_XRGB4444, 0}, 44 - {DRM_FORMAT_XBGR4444, 0}, 45 - {DRM_FORMAT_RGBX4444, 0}, 46 - {DRM_FORMAT_BGRX4444, 0}, 47 - {0, 0}, 15 + static const uint32_t qcom_compressed_supported_formats[] = { 16 + DRM_FORMAT_ABGR8888, 17 + DRM_FORMAT_XBGR8888, 18 + DRM_FORMAT_BGR565, 48 19 }; 49 20 50 - static const struct dpu_format_extended plane_formats_yuv[] = { 51 - {DRM_FORMAT_ARGB8888, 0}, 52 - {DRM_FORMAT_ABGR8888, 0}, 53 - {DRM_FORMAT_RGBA8888, 0}, 54 - {DRM_FORMAT_BGRX8888, 0}, 55 - {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, 56 - {DRM_FORMAT_BGRA8888, 0}, 57 - {DRM_FORMAT_XRGB8888, 0}, 58 - {DRM_FORMAT_XBGR8888, 0}, 59 - {DRM_FORMAT_RGBX8888, 0}, 60 - {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, 61 - {DRM_FORMAT_RGB888, 0}, 62 - {DRM_FORMAT_BGR888, 0}, 63 - {DRM_FORMAT_RGB565, 0}, 64 - {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED}, 65 - {DRM_FORMAT_BGR565, 0}, 66 - {DRM_FORMAT_ARGB1555, 0}, 67 - {DRM_FORMAT_ABGR1555, 0}, 68 - {DRM_FORMAT_RGBA5551, 0}, 69 - {DRM_FORMAT_BGRA5551, 0}, 70 - {DRM_FORMAT_XRGB1555, 0}, 71 - {DRM_FORMAT_XBGR1555, 0}, 72 - {DRM_FORMAT_RGBX5551, 0}, 73 - {DRM_FORMAT_BGRX5551, 0}, 74 - {DRM_FORMAT_ARGB4444, 0}, 75 - {DRM_FORMAT_ABGR4444, 0}, 76 - {DRM_FORMAT_RGBA4444, 0}, 77 - {DRM_FORMAT_BGRA4444, 0}, 78 - {DRM_FORMAT_XRGB4444, 0}, 79 - {DRM_FORMAT_XBGR4444, 0}, 80 - {DRM_FORMAT_RGBX4444, 0}, 81 - {DRM_FORMAT_BGRX4444, 0}, 82 - 83 - {DRM_FORMAT_NV12, 0}, 84 - {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED}, 85 - {DRM_FORMAT_NV21, 0}, 86 - {DRM_FORMAT_NV16, 0}, 87 - {DRM_FORMAT_NV61, 0}, 88 - {DRM_FORMAT_VYUY, 0}, 89 - {DRM_FORMAT_UYVY, 0}, 90 - {DRM_FORMAT_YUYV, 0}, 91 - {DRM_FORMAT_YVYU, 0}, 92 - {DRM_FORMAT_YUV420, 0}, 93 - {DRM_FORMAT_YVU420, 0}, 94 - {0, 0}, 21 + static const uint32_t plane_formats[] = { 22 + DRM_FORMAT_ARGB8888, 23 + DRM_FORMAT_ABGR8888, 24 + DRM_FORMAT_RGBA8888, 25 + DRM_FORMAT_BGRA8888, 26 + DRM_FORMAT_XRGB8888, 27 + DRM_FORMAT_RGBX8888, 28 + DRM_FORMAT_BGRX8888, 29 + DRM_FORMAT_XBGR8888, 30 + DRM_FORMAT_RGB888, 31 + DRM_FORMAT_BGR888, 32 + DRM_FORMAT_RGB565, 33 + DRM_FORMAT_BGR565, 34 + DRM_FORMAT_ARGB1555, 35 + DRM_FORMAT_ABGR1555, 36 + DRM_FORMAT_RGBA5551, 37 + DRM_FORMAT_BGRA5551, 38 + DRM_FORMAT_XRGB1555, 39 + DRM_FORMAT_XBGR1555, 40 + DRM_FORMAT_RGBX5551, 41 + DRM_FORMAT_BGRX5551, 42 + DRM_FORMAT_ARGB4444, 43 + DRM_FORMAT_ABGR4444, 44 + DRM_FORMAT_RGBA4444, 45 + DRM_FORMAT_BGRA4444, 46 + DRM_FORMAT_XRGB4444, 47 + DRM_FORMAT_XBGR4444, 48 + DRM_FORMAT_RGBX4444, 49 + DRM_FORMAT_BGRX4444, 95 50 }; 96 51 97 - static const struct dpu_format_extended cursor_formats[] = { 98 - {DRM_FORMAT_ARGB8888, 0}, 99 - {DRM_FORMAT_ABGR8888, 0}, 100 - {DRM_FORMAT_RGBA8888, 0}, 101 - {DRM_FORMAT_BGRA8888, 0}, 102 - {DRM_FORMAT_XRGB8888, 0}, 103 - {DRM_FORMAT_ARGB1555, 0}, 104 - {DRM_FORMAT_ABGR1555, 0}, 105 - {DRM_FORMAT_RGBA5551, 0}, 106 - {DRM_FORMAT_BGRA5551, 0}, 107 - {DRM_FORMAT_ARGB4444, 0}, 108 - {DRM_FORMAT_ABGR4444, 0}, 109 - {DRM_FORMAT_RGBA4444, 0}, 110 - {DRM_FORMAT_BGRA4444, 0}, 111 - {0, 0}, 112 - }; 52 + static const uint32_t plane_formats_yuv[] = { 53 + DRM_FORMAT_ARGB8888, 54 + DRM_FORMAT_ABGR8888, 55 + DRM_FORMAT_RGBA8888, 56 + DRM_FORMAT_BGRX8888, 57 + DRM_FORMAT_BGRA8888, 58 + DRM_FORMAT_XRGB8888, 59 + DRM_FORMAT_XBGR8888, 60 + DRM_FORMAT_RGBX8888, 61 + DRM_FORMAT_RGB888, 62 + DRM_FORMAT_BGR888, 63 + DRM_FORMAT_RGB565, 64 + DRM_FORMAT_BGR565, 65 + DRM_FORMAT_ARGB1555, 66 + DRM_FORMAT_ABGR1555, 67 + DRM_FORMAT_RGBA5551, 68 + DRM_FORMAT_BGRA5551, 69 + DRM_FORMAT_XRGB1555, 70 + DRM_FORMAT_XBGR1555, 71 + DRM_FORMAT_RGBX5551, 72 + DRM_FORMAT_BGRX5551, 73 + DRM_FORMAT_ARGB4444, 74 + DRM_FORMAT_ABGR4444, 75 + DRM_FORMAT_RGBA4444, 76 + DRM_FORMAT_BGRA4444, 77 + DRM_FORMAT_XRGB4444, 78 + DRM_FORMAT_XBGR4444, 79 + DRM_FORMAT_RGBX4444, 80 + DRM_FORMAT_BGRX4444, 113 81 114 - static const struct dpu_format_extended wb2_formats[] = { 115 - {DRM_FORMAT_RGB565, 0}, 116 - {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED}, 117 - {DRM_FORMAT_RGB888, 0}, 118 - {DRM_FORMAT_ARGB8888, 0}, 119 - {DRM_FORMAT_RGBA8888, 0}, 120 - {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, 121 - {DRM_FORMAT_XRGB8888, 0}, 122 - {DRM_FORMAT_RGBX8888, 0}, 123 - {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED}, 124 - {DRM_FORMAT_ARGB1555, 0}, 125 - {DRM_FORMAT_RGBA5551, 0}, 126 - {DRM_FORMAT_XRGB1555, 0}, 127 - {DRM_FORMAT_RGBX5551, 0}, 128 - {DRM_FORMAT_ARGB4444, 0}, 129 - {DRM_FORMAT_RGBA4444, 0}, 130 - {DRM_FORMAT_RGBX4444, 0}, 131 - {DRM_FORMAT_XRGB4444, 0}, 132 - 133 - {DRM_FORMAT_BGR565, 0}, 134 - {DRM_FORMAT_BGR888, 0}, 135 - {DRM_FORMAT_ABGR8888, 0}, 136 - {DRM_FORMAT_BGRA8888, 0}, 137 - {DRM_FORMAT_BGRX8888, 0}, 138 - {DRM_FORMAT_XBGR8888, 0}, 139 - {DRM_FORMAT_ABGR1555, 0}, 140 - {DRM_FORMAT_BGRA5551, 0}, 141 - {DRM_FORMAT_XBGR1555, 0}, 142 - {DRM_FORMAT_BGRX5551, 0}, 143 - {DRM_FORMAT_ABGR4444, 0}, 144 - {DRM_FORMAT_BGRA4444, 0}, 145 - {DRM_FORMAT_BGRX4444, 0}, 146 - {DRM_FORMAT_XBGR4444, 0}, 147 - 148 - {DRM_FORMAT_YUV420, 0}, 149 - {DRM_FORMAT_NV12, 0}, 150 - {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED}, 151 - {DRM_FORMAT_NV16, 0}, 152 - {DRM_FORMAT_YUYV, 0}, 153 - 154 - {0, 0}, 155 - }; 156 - 157 - static const struct dpu_format_extended rgb_10bit_formats[] = { 158 - {DRM_FORMAT_BGRA1010102, 0}, 159 - {DRM_FORMAT_BGRX1010102, 0}, 160 - {DRM_FORMAT_RGBA1010102, 0}, 161 - {DRM_FORMAT_RGBX1010102, 0}, 162 - {DRM_FORMAT_ABGR2101010, 0}, 163 - {DRM_FORMAT_ABGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED}, 164 - {DRM_FORMAT_XBGR2101010, 0}, 165 - {DRM_FORMAT_XBGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED}, 166 - {DRM_FORMAT_ARGB2101010, 0}, 167 - {DRM_FORMAT_XRGB2101010, 0}, 82 + DRM_FORMAT_NV12, 83 + DRM_FORMAT_NV21, 84 + DRM_FORMAT_NV16, 85 + DRM_FORMAT_NV61, 86 + DRM_FORMAT_VYUY, 87 + DRM_FORMAT_UYVY, 88 + DRM_FORMAT_YUYV, 89 + DRM_FORMAT_YVYU, 90 + DRM_FORMAT_YUV420, 91 + DRM_FORMAT_YVU420, 168 92 };
-44
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
··· 170 170 /** 171 171 * AD4 interrupt status bit definitions 172 172 */ 173 - #define DPU_INTR_BRIGHTPR_UPDATED BIT(4) 174 - #define DPU_INTR_DARKENH_UPDATED BIT(3) 175 - #define DPU_INTR_STREN_OUTROI_UPDATED BIT(2) 176 - #define DPU_INTR_STREN_INROI_UPDATED BIT(1) 177 173 #define DPU_INTR_BACKLIGHT_UPDATED BIT(0) 178 174 /** 179 175 * struct dpu_intr_reg - array of DPU register sets ··· 778 782 return -EINVAL; 779 783 } 780 784 781 - static void dpu_hw_intr_set_mask(struct dpu_hw_intr *intr, uint32_t reg_off, 782 - uint32_t mask) 783 - { 784 - if (!intr) 785 - return; 786 - 787 - DPU_REG_WRITE(&intr->hw, reg_off, mask); 788 - 789 - /* ensure register writes go through */ 790 - wmb(); 791 - } 792 - 793 785 static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr, 794 786 void (*cbfunc)(void *, int), 795 787 void *arg) ··· 988 1004 return 0; 989 1005 } 990 1006 991 - static int dpu_hw_intr_get_valid_interrupts(struct dpu_hw_intr *intr, 992 - uint32_t *mask) 993 - { 994 - if (!intr || !mask) 995 - return -EINVAL; 996 - 997 - *mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1 998 - | IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP; 999 - 1000 - return 0; 1001 - } 1002 - 1003 1007 static void dpu_hw_intr_get_interrupt_statuses(struct dpu_hw_intr *intr) 1004 1008 { 1005 1009 int i; ··· 1037 1065 wmb(); 1038 1066 } 1039 1067 1040 - static void dpu_hw_intr_clear_interrupt_status(struct dpu_hw_intr *intr, 1041 - int irq_idx) 1042 - { 1043 - unsigned long irq_flags; 1044 - 1045 - if (!intr) 1046 - return; 1047 - 1048 - spin_lock_irqsave(&intr->irq_lock, irq_flags); 1049 - dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx); 1050 - spin_unlock_irqrestore(&intr->irq_lock, irq_flags); 1051 - } 1052 - 1053 1068 static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr, 1054 1069 int irq_idx, bool clear) 1055 1070 { ··· 1072 1113 1073 1114 static void __setup_intr_ops(struct dpu_hw_intr_ops *ops) 1074 1115 { 1075 - ops->set_mask = dpu_hw_intr_set_mask; 1076 1116 ops->irq_idx_lookup = dpu_hw_intr_irqidx_lookup; 1077 1117 ops->enable_irq = dpu_hw_intr_enable_irq; 1078 1118 ops->disable_irq = dpu_hw_intr_disable_irq; 1079 1119 ops->dispatch_irqs = dpu_hw_intr_dispatch_irq; 1080 1120 ops->clear_all_irqs = dpu_hw_intr_clear_irqs; 1081 1121 ops->disable_all_irqs = dpu_hw_intr_disable_irqs; 1082 - ops->get_valid_interrupts = dpu_hw_intr_get_valid_interrupts; 1083 1122 ops->get_interrupt_statuses = dpu_hw_intr_get_interrupt_statuses; 1084 - ops->clear_interrupt_status = dpu_hw_intr_clear_interrupt_status; 1085 1123 ops->clear_intr_status_nolock = dpu_hw_intr_clear_intr_status_nolock; 1086 1124 ops->get_interrupt_status = dpu_hw_intr_get_interrupt_status; 1087 1125 }
-44
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
··· 20 20 #include "dpu_hw_util.h" 21 21 #include "dpu_hw_mdss.h" 22 22 23 - #define IRQ_SOURCE_MDP BIT(0) 24 - #define IRQ_SOURCE_DSI0 BIT(4) 25 - #define IRQ_SOURCE_DSI1 BIT(5) 26 - #define IRQ_SOURCE_HDMI BIT(8) 27 - #define IRQ_SOURCE_EDP BIT(12) 28 - #define IRQ_SOURCE_MHL BIT(16) 29 - 30 23 /** 31 24 * dpu_intr_type - HW Interrupt Type 32 25 * @DPU_IRQ_TYPE_WB_ROT_COMP: WB rotator done ··· 88 95 * Interrupt operations. 89 96 */ 90 97 struct dpu_hw_intr_ops { 91 - /** 92 - * set_mask - Programs the given interrupt register with the 93 - * given interrupt mask. Register value will get overwritten. 94 - * @intr: HW interrupt handle 95 - * @reg_off: MDSS HW register offset 96 - * @irqmask: IRQ mask value 97 - */ 98 - void (*set_mask)( 99 - struct dpu_hw_intr *intr, 100 - uint32_t reg, 101 - uint32_t irqmask); 102 - 103 98 /** 104 99 * irq_idx_lookup - Lookup IRQ index on the HW interrupt type 105 100 * Used for all irq related ops ··· 158 177 struct dpu_hw_intr *intr); 159 178 160 179 /** 161 - * clear_interrupt_status - Clears HW interrupt status based on given 162 - * lookup IRQ index. 163 - * @intr: HW interrupt handle 164 - * @irq_idx: Lookup irq index return from irq_idx_lookup 165 - */ 166 - void (*clear_interrupt_status)( 167 - struct dpu_hw_intr *intr, 168 - int irq_idx); 169 - 170 - /** 171 180 * clear_intr_status_nolock() - clears the HW interrupts without lock 172 181 * @intr: HW interrupt handle 173 182 * @irq_idx: Lookup irq index return from irq_idx_lookup ··· 177 206 struct dpu_hw_intr *intr, 178 207 int irq_idx, 179 208 bool clear); 180 - 181 - /** 182 - * get_valid_interrupts - Gets a mask of all valid interrupt sources 183 - * within DPU. These are actually status bits 184 - * within interrupt registers that specify the 185 - * source of the interrupt in IRQs. For example, 186 - * valid interrupt sources can be MDP, DSI, 187 - * HDMI etc. 188 - * @intr: HW interrupt handle 189 - * @mask: Returning the interrupt source MASK 190 - * @return: 0 for success, otherwise failure 191 - */ 192 - int (*get_valid_interrupts)( 193 - struct dpu_hw_intr *intr, 194 - uint32_t *mask); 195 209 }; 196 210 197 211 /**
-7
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
··· 258 258 VBIF_NRT = VBIF_1 259 259 }; 260 260 261 - enum dpu_iommu_domain { 262 - DPU_IOMMU_DOMAIN_UNSECURE, 263 - DPU_IOMMU_DOMAIN_SECURE, 264 - DPU_IOMMU_DOMAIN_MAX 265 - }; 266 - 267 261 /** 268 262 * DPU HW,Component order color map 269 263 */ ··· 352 358 * @alpha_enable: whether the format has an alpha channel 353 359 * @num_planes: number of planes (including meta data planes) 354 360 * @fetch_mode: linear, tiled, or ubwc hw fetch behavior 355 - * @is_yuv: is format a yuv variant 356 361 * @flag: usage bit flags 357 362 * @tile_width: format tile width 358 363 * @tile_height: format tile height
-1
drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
··· 18 18 #include "dpu_hw_mdss.h" 19 19 20 20 #define REG_MASK(n) ((BIT(n)) - 1) 21 - struct dpu_format_extended; 22 21 23 22 /* 24 23 * This is the common struct maintained by each sub block
+36 -29
drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
··· 405 405 } 406 406 } 407 407 408 - static void _dpu_kms_initialize_dsi(struct drm_device *dev, 408 + static int _dpu_kms_initialize_dsi(struct drm_device *dev, 409 409 struct msm_drm_private *priv, 410 410 struct dpu_kms *dpu_kms) 411 411 { 412 412 struct drm_encoder *encoder = NULL; 413 - int i, rc; 413 + int i, rc = 0; 414 + 415 + if (!(priv->dsi[0] || priv->dsi[1])) 416 + return rc; 414 417 415 418 /*TODO: Support two independent DSI connectors */ 416 419 encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_DSI); 417 - if (IS_ERR_OR_NULL(encoder)) { 420 + if (IS_ERR(encoder)) { 418 421 DPU_ERROR("encoder init failed for dsi display\n"); 419 - return; 422 + return PTR_ERR(encoder); 420 423 } 421 424 422 425 priv->encoders[priv->num_encoders++] = encoder; 423 426 424 427 for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) { 425 - if (!priv->dsi[i]) { 426 - DPU_DEBUG("invalid msm_dsi for ctrl %d\n", i); 427 - return; 428 - } 428 + if (!priv->dsi[i]) 429 + continue; 429 430 430 431 rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder); 431 432 if (rc) { 432 433 DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n", 433 434 i, rc); 434 - continue; 435 + break; 435 436 } 436 437 } 438 + 439 + return rc; 437 440 } 438 441 439 442 /** ··· 447 444 * @dpu_kms: Pointer to dpu kms structure 448 445 * Returns: Zero on success 449 446 */ 450 - static void _dpu_kms_setup_displays(struct drm_device *dev, 447 + static int _dpu_kms_setup_displays(struct drm_device *dev, 451 448 struct msm_drm_private *priv, 452 449 struct dpu_kms *dpu_kms) 453 450 { 454 - _dpu_kms_initialize_dsi(dev, priv, dpu_kms); 455 - 456 451 /** 457 452 * Extend this function to initialize other 458 453 * types of displays 459 454 */ 455 + 456 + return _dpu_kms_initialize_dsi(dev, priv, dpu_kms); 460 457 } 461 458 462 459 static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms) ··· 519 516 * Create encoder and query display drivers to create 520 517 * bridges and connectors 521 518 */ 522 - _dpu_kms_setup_displays(dev, priv, dpu_kms); 519 + ret = _dpu_kms_setup_displays(dev, priv, dpu_kms); 520 + if (ret) 521 + goto fail; 523 522 524 523 max_crtc_count = min(catalog->mixer_count, priv->num_encoders); 525 524 ··· 631 626 if (dpu_kms->vbif[VBIF_RT]) 632 627 devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]); 633 628 dpu_kms->vbif[VBIF_RT] = NULL; 629 + 630 + if (dpu_kms->hw_mdp) 631 + dpu_hw_mdp_destroy(dpu_kms->hw_mdp); 632 + dpu_kms->hw_mdp = NULL; 634 633 635 634 if (dpu_kms->mmio) 636 635 devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio); ··· 886 877 goto power_error; 887 878 } 888 879 889 - rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio, 890 - dpu_kms->dev); 880 + rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio); 891 881 if (rc) { 892 882 DPU_ERROR("rm init failed: %d\n", rc); 893 883 goto power_error; ··· 894 886 895 887 dpu_kms->rm_init = true; 896 888 897 - dpu_kms->hw_mdp = dpu_rm_get_mdp(&dpu_kms->rm); 898 - if (IS_ERR_OR_NULL(dpu_kms->hw_mdp)) { 889 + dpu_kms->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, dpu_kms->mmio, 890 + dpu_kms->catalog); 891 + if (IS_ERR(dpu_kms->hw_mdp)) { 899 892 rc = PTR_ERR(dpu_kms->hw_mdp); 900 - if (!dpu_kms->hw_mdp) 901 - rc = -EINVAL; 902 893 DPU_ERROR("failed to get hw_mdp: %d\n", rc); 903 894 dpu_kms->hw_mdp = NULL; 904 895 goto power_error; ··· 933 926 goto hw_intr_init_err; 934 927 } 935 928 936 - /* 937 - * _dpu_kms_drm_obj_init should create the DRM related objects 938 - * i.e. CRTCs, planes, encoders, connectors and so forth 939 - */ 940 - rc = _dpu_kms_drm_obj_init(dpu_kms); 941 - if (rc) { 942 - DPU_ERROR("modeset init failed: %d\n", rc); 943 - goto drm_obj_init_err; 944 - } 945 - 946 929 dev->mode_config.min_width = 0; 947 930 dev->mode_config.min_height = 0; 948 931 ··· 948 951 * Support format modifiers for compression etc. 949 952 */ 950 953 dev->mode_config.allow_fb_modifiers = true; 954 + 955 + /* 956 + * _dpu_kms_drm_obj_init should create the DRM related objects 957 + * i.e. CRTCs, planes, encoders, connectors and so forth 958 + */ 959 + rc = _dpu_kms_drm_obj_init(dpu_kms); 960 + if (rc) { 961 + DPU_ERROR("modeset init failed: %d\n", rc); 962 + goto drm_obj_init_err; 963 + } 951 964 952 965 dpu_vbif_init_memtypes(dpu_kms); 953 966
+21 -15
drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
··· 23 23 struct dpu_irq_controller irq_controller; 24 24 }; 25 25 26 - static irqreturn_t dpu_mdss_irq(int irq, void *arg) 26 + static void dpu_mdss_irq(struct irq_desc *desc) 27 27 { 28 - struct dpu_mdss *dpu_mdss = arg; 28 + struct dpu_mdss *dpu_mdss = irq_desc_get_handler_data(desc); 29 + struct irq_chip *chip = irq_desc_get_chip(desc); 29 30 u32 interrupts; 31 + 32 + chained_irq_enter(chip, desc); 30 33 31 34 interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS); 32 35 ··· 42 39 hwirq); 43 40 if (mapping == 0) { 44 41 DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq); 45 - return IRQ_NONE; 42 + break; 46 43 } 47 44 48 45 rc = generic_handle_irq(mapping); 49 46 if (rc < 0) { 50 47 DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n", 51 48 hwirq, mapping, rc); 52 - return IRQ_NONE; 49 + break; 53 50 } 54 51 55 52 interrupts &= ~(1 << hwirq); 56 53 } 57 54 58 - return IRQ_HANDLED; 55 + chained_irq_exit(chip, desc); 59 56 } 60 57 61 58 static void dpu_mdss_irq_mask(struct irq_data *irqd) ··· 86 83 .irq_unmask = dpu_mdss_irq_unmask, 87 84 }; 88 85 86 + static struct lock_class_key dpu_mdss_lock_key, dpu_mdss_request_key; 87 + 89 88 static int dpu_mdss_irqdomain_map(struct irq_domain *domain, 90 89 unsigned int irq, irq_hw_number_t hwirq) 91 90 { 92 91 struct dpu_mdss *dpu_mdss = domain->host_data; 93 - int ret; 94 92 93 + irq_set_lockdep_class(irq, &dpu_mdss_lock_key, &dpu_mdss_request_key); 95 94 irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq); 96 - ret = irq_set_chip_data(irq, dpu_mdss); 97 - 98 - return ret; 95 + return irq_set_chip_data(irq, dpu_mdss); 99 96 } 100 97 101 98 static const struct irq_domain_ops dpu_mdss_irqdomain_ops = { ··· 162 159 struct msm_drm_private *priv = dev->dev_private; 163 160 struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss); 164 161 struct dss_module_power *mp = &dpu_mdss->mp; 162 + int irq; 165 163 166 164 pm_runtime_suspend(dev->dev); 167 165 pm_runtime_disable(dev->dev); 168 166 _dpu_mdss_irq_domain_fini(dpu_mdss); 169 - free_irq(platform_get_irq(pdev, 0), dpu_mdss); 167 + irq = platform_get_irq(pdev, 0); 168 + irq_set_chained_handler_and_data(irq, NULL, NULL); 170 169 msm_dss_put_clk(mp->clk_config, mp->num_clk); 171 170 devm_kfree(&pdev->dev, mp->clk_config); 172 171 ··· 192 187 struct dpu_mdss *dpu_mdss; 193 188 struct dss_module_power *mp; 194 189 int ret = 0; 190 + int irq; 195 191 196 192 dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL); 197 193 if (!dpu_mdss) ··· 225 219 if (ret) 226 220 goto irq_domain_error; 227 221 228 - ret = request_irq(platform_get_irq(pdev, 0), 229 - dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss); 230 - if (ret) { 231 - DPU_ERROR("failed to init irq: %d\n", ret); 222 + irq = platform_get_irq(pdev, 0); 223 + if (irq < 0) 232 224 goto irq_error; 233 - } 225 + 226 + irq_set_chained_handler_and_data(irq, dpu_mdss_irq, 227 + dpu_mdss); 234 228 235 229 pm_runtime_enable(dev->dev); 236 230
+34 -17
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
··· 95 95 96 96 enum dpu_sspp pipe; 97 97 uint32_t features; /* capabilities from catalog */ 98 - uint32_t nformats; 99 - uint32_t formats[64]; 100 98 101 99 struct dpu_hw_pipe *pipe_hw; 102 100 struct dpu_hw_pipe_cfg pipe_cfg; ··· 117 119 struct dpu_debugfs_regset32 debugfs_scaler; 118 120 struct dpu_debugfs_regset32 debugfs_csc; 119 121 bool debugfs_default_scale; 122 + }; 123 + 124 + static const uint64_t supported_format_modifiers[] = { 125 + DRM_FORMAT_MOD_QCOM_COMPRESSED, 126 + DRM_FORMAT_MOD_LINEAR, 127 + DRM_FORMAT_MOD_INVALID 120 128 }; 121 129 122 130 #define to_dpu_plane(x) container_of(x, struct dpu_plane, base) ··· 1414 1410 debugfs_remove_recursive(pdpu->debugfs_root); 1415 1411 } 1416 1412 1413 + static bool dpu_plane_format_mod_supported(struct drm_plane *plane, 1414 + uint32_t format, uint64_t modifier) 1415 + { 1416 + if (modifier == DRM_FORMAT_MOD_LINEAR) 1417 + return true; 1418 + 1419 + if (modifier == DRM_FORMAT_MOD_QCOM_COMPRESSED) { 1420 + int i; 1421 + for (i = 0; i < ARRAY_SIZE(qcom_compressed_supported_formats); i++) { 1422 + if (format == qcom_compressed_supported_formats[i]) 1423 + return true; 1424 + } 1425 + } 1426 + 1427 + return false; 1428 + } 1429 + 1417 1430 static const struct drm_plane_funcs dpu_plane_funcs = { 1418 1431 .update_plane = drm_atomic_helper_update_plane, 1419 1432 .disable_plane = drm_atomic_helper_disable_plane, ··· 1440 1419 .atomic_destroy_state = dpu_plane_destroy_state, 1441 1420 .late_register = dpu_plane_late_register, 1442 1421 .early_unregister = dpu_plane_early_unregister, 1422 + .format_mod_supported = dpu_plane_format_mod_supported, 1443 1423 }; 1444 1424 1445 1425 static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = { ··· 1466 1444 unsigned long possible_crtcs, u32 master_plane_id) 1467 1445 { 1468 1446 struct drm_plane *plane = NULL, *master_plane = NULL; 1469 - const struct dpu_format_extended *format_list; 1447 + const uint32_t *format_list; 1470 1448 struct dpu_plane *pdpu; 1471 1449 struct msm_drm_private *priv = dev->dev_private; 1472 1450 struct dpu_kms *kms = to_dpu_kms(priv->kms); 1473 1451 int zpos_max = DPU_ZPOS_MAX; 1452 + uint32_t num_formats; 1474 1453 int ret = -EINVAL; 1475 1454 1476 1455 /* create and zero local structure */ ··· 1514 1491 goto clean_sspp; 1515 1492 } 1516 1493 1517 - if (!master_plane_id) 1518 - format_list = pdpu->pipe_sblk->format_list; 1519 - else 1494 + if (pdpu->is_virtual) { 1520 1495 format_list = pdpu->pipe_sblk->virt_format_list; 1521 - 1522 - pdpu->nformats = dpu_populate_formats(format_list, 1523 - pdpu->formats, 1524 - 0, 1525 - ARRAY_SIZE(pdpu->formats)); 1526 - 1527 - if (!pdpu->nformats) { 1528 - DPU_ERROR("[%u]no valid formats for plane\n", pipe); 1529 - goto clean_sspp; 1496 + num_formats = pdpu->pipe_sblk->virt_num_formats; 1497 + } 1498 + else { 1499 + format_list = pdpu->pipe_sblk->format_list; 1500 + num_formats = pdpu->pipe_sblk->num_formats; 1530 1501 } 1531 1502 1532 1503 ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs, 1533 - pdpu->formats, pdpu->nformats, 1534 - NULL, type, NULL); 1504 + format_list, num_formats, 1505 + supported_format_modifiers, type, NULL); 1535 1506 if (ret) 1536 1507 goto clean_sspp; 1537 1508
-27
drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
··· 28 28 /** 29 29 * struct dpu_plane_state: Define dpu extension of drm plane state object 30 30 * @base: base drm plane state object 31 - * @property_state: Local storage for msm_prop properties 32 - * @property_values: cached plane property values 33 31 * @aspace: pointer to address space for input/output buffers 34 - * @input_fence: dereferenced input fence pointer 35 32 * @stage: assigned by crtc blender 36 33 * @multirect_index: index of the rectangle of SSPP 37 34 * @multirect_mode: parallel or time multiplex multirect mode 38 35 * @pending: whether the current update is still pending 39 36 * @scaler3_cfg: configuration data for scaler3 40 37 * @pixel_ext: configuration data for pixel extensions 41 - * @scaler_check_state: indicates status of user provided pixel extension data 42 38 * @cdp_cfg: CDP configuration 43 39 */ 44 40 struct dpu_plane_state { 45 41 struct drm_plane_state base; 46 42 struct msm_gem_address_space *aspace; 47 - void *input_fence; 48 43 enum dpu_stage stage; 49 44 uint32_t multirect_index; 50 45 uint32_t multirect_mode; ··· 102 107 void dpu_plane_flush(struct drm_plane *plane); 103 108 104 109 /** 105 - * dpu_plane_kickoff - final plane operations before commit kickoff 106 - * @plane: Pointer to drm plane structure 107 - */ 108 - void dpu_plane_kickoff(struct drm_plane *plane); 109 - 110 - /** 111 110 * dpu_plane_set_error: enable/disable error condition 112 111 * @plane: pointer to drm_plane structure 113 112 */ ··· 136 147 void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state); 137 148 138 149 /** 139 - * dpu_plane_wait_input_fence - wait for input fence object 140 - * @plane: Pointer to DRM plane object 141 - * @wait_ms: Wait timeout value 142 - * Returns: Zero on success 143 - */ 144 - int dpu_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms); 145 - 146 - /** 147 150 * dpu_plane_color_fill - enables color fill on plane 148 151 * @plane: Pointer to DRM plane object 149 152 * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red ··· 144 163 */ 145 164 int dpu_plane_color_fill(struct drm_plane *plane, 146 165 uint32_t color, uint32_t alpha); 147 - 148 - /** 149 - * dpu_plane_set_revalidate - sets revalidate flag which forces a full 150 - * validation of the plane properties in the next atomic check 151 - * @plane: Pointer to DRM plane object 152 - * @enable: Boolean to set/unset the flag 153 - */ 154 - void dpu_plane_set_revalidate(struct drm_plane *plane, bool enable); 155 166 156 167 #endif /* _DPU_PLANE_H_ */
+46 -279
drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
··· 21 21 #include "dpu_encoder.h" 22 22 #include "dpu_trace.h" 23 23 24 - #define RESERVED_BY_OTHER(h, r) \ 25 - ((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id)) 24 + #define RESERVED_BY_OTHER(h, r) \ 25 + ((h)->enc_id && (h)->enc_id != r) 26 26 27 27 /** 28 28 * struct dpu_rm_requirements - Reservation requirements parameter bundle ··· 34 34 struct dpu_encoder_hw_resources hw_res; 35 35 }; 36 36 37 - /** 38 - * struct dpu_rm_rsvp - Use Case Reservation tagging structure 39 - * Used to tag HW blocks as reserved by a CRTC->Encoder->Connector chain 40 - * By using as a tag, rather than lists of pointers to HW blocks used 41 - * we can avoid some list management since we don't know how many blocks 42 - * of each type a given use case may require. 43 - * @list: List head for list of all reservations 44 - * @seq: Global RSVP sequence number for debugging, especially for 45 - * differentiating differenct allocations for same encoder. 46 - * @enc_id: Reservations are tracked by Encoder DRM object ID. 47 - * CRTCs may be connected to multiple Encoders. 48 - * An encoder or connector id identifies the display path. 49 - */ 50 - struct dpu_rm_rsvp { 51 - struct list_head list; 52 - uint32_t seq; 53 - uint32_t enc_id; 54 - }; 55 37 56 38 /** 57 39 * struct dpu_rm_hw_blk - hardware block tracking list member 58 40 * @list: List head for list of all hardware blocks tracking items 59 - * @rsvp: Pointer to use case reservation if reserved by a client 60 - * @rsvp_nxt: Temporary pointer used during reservation to the incoming 61 - * request. Will be swapped into rsvp if proposal is accepted 62 - * @type: Type of hardware block this structure tracks 63 41 * @id: Hardware ID number, within it's own space, ie. LM_X 64 - * @catalog: Pointer to the hardware catalog entry for this block 42 + * @enc_id: Encoder id to which this blk is binded 65 43 * @hw: Pointer to the hardware register access object for this block 66 44 */ 67 45 struct dpu_rm_hw_blk { 68 46 struct list_head list; 69 - struct dpu_rm_rsvp *rsvp; 70 - struct dpu_rm_rsvp *rsvp_nxt; 71 - enum dpu_hw_blk_type type; 72 47 uint32_t id; 48 + uint32_t enc_id; 73 49 struct dpu_hw_blk *hw; 74 50 }; 75 - 76 - /** 77 - * dpu_rm_dbg_rsvp_stage - enum of steps in making reservation for event logging 78 - */ 79 - enum dpu_rm_dbg_rsvp_stage { 80 - DPU_RM_STAGE_BEGIN, 81 - DPU_RM_STAGE_AFTER_CLEAR, 82 - DPU_RM_STAGE_AFTER_RSVPNEXT, 83 - DPU_RM_STAGE_FINAL 84 - }; 85 - 86 - static void _dpu_rm_print_rsvps( 87 - struct dpu_rm *rm, 88 - enum dpu_rm_dbg_rsvp_stage stage) 89 - { 90 - struct dpu_rm_rsvp *rsvp; 91 - struct dpu_rm_hw_blk *blk; 92 - enum dpu_hw_blk_type type; 93 - 94 - DPU_DEBUG("%d\n", stage); 95 - 96 - list_for_each_entry(rsvp, &rm->rsvps, list) { 97 - DRM_DEBUG_KMS("%d rsvp[s%ue%u]\n", stage, rsvp->seq, 98 - rsvp->enc_id); 99 - } 100 - 101 - for (type = 0; type < DPU_HW_BLK_MAX; type++) { 102 - list_for_each_entry(blk, &rm->hw_blks[type], list) { 103 - if (!blk->rsvp && !blk->rsvp_nxt) 104 - continue; 105 - 106 - DRM_DEBUG_KMS("%d rsvp[s%ue%u->s%ue%u] %d %d\n", stage, 107 - (blk->rsvp) ? blk->rsvp->seq : 0, 108 - (blk->rsvp) ? blk->rsvp->enc_id : 0, 109 - (blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0, 110 - (blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0, 111 - blk->type, blk->id); 112 - } 113 - } 114 - } 115 - 116 - struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm) 117 - { 118 - return rm->hw_mdp; 119 - } 120 51 121 52 void dpu_rm_init_hw_iter( 122 53 struct dpu_rm_hw_iter *iter, ··· 79 148 i->blk = list_prepare_entry(i->blk, blk_list, list); 80 149 81 150 list_for_each_entry_continue(i->blk, blk_list, list) { 82 - struct dpu_rm_rsvp *rsvp = i->blk->rsvp; 83 - 84 - if (i->blk->type != i->type) { 85 - DPU_ERROR("found incorrect block type %d on %d list\n", 86 - i->blk->type, i->type); 87 - return false; 88 - } 89 - 90 - if ((i->enc_id == 0) || (rsvp && rsvp->enc_id == i->enc_id)) { 151 + if (i->enc_id == i->blk->enc_id) { 91 152 i->hw = i->blk->hw; 92 153 DPU_DEBUG("found type %d id %d for enc %d\n", 93 154 i->type, i->blk->id, i->enc_id); ··· 131 208 132 209 int dpu_rm_destroy(struct dpu_rm *rm) 133 210 { 134 - 135 - struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt; 136 211 struct dpu_rm_hw_blk *hw_cur, *hw_nxt; 137 212 enum dpu_hw_blk_type type; 138 - 139 - if (!rm) { 140 - DPU_ERROR("invalid rm\n"); 141 - return -EINVAL; 142 - } 143 - 144 - list_for_each_entry_safe(rsvp_cur, rsvp_nxt, &rm->rsvps, list) { 145 - list_del(&rsvp_cur->list); 146 - kfree(rsvp_cur); 147 - } 148 - 149 213 150 214 for (type = 0; type < DPU_HW_BLK_MAX; type++) { 151 215 list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type], 152 216 list) { 153 217 list_del(&hw_cur->list); 154 - _dpu_rm_hw_destroy(hw_cur->type, hw_cur->hw); 218 + _dpu_rm_hw_destroy(type, hw_cur->hw); 155 219 kfree(hw_cur); 156 220 } 157 221 } 158 - 159 - dpu_hw_mdp_destroy(rm->hw_mdp); 160 - rm->hw_mdp = NULL; 161 222 162 223 mutex_destroy(&rm->rm_lock); 163 224 ··· 157 250 void *hw_catalog_info) 158 251 { 159 252 struct dpu_rm_hw_blk *blk; 160 - struct dpu_hw_mdp *hw_mdp; 161 253 void *hw; 162 - 163 - hw_mdp = rm->hw_mdp; 164 254 165 255 switch (type) { 166 256 case DPU_HW_BLK_LM: ··· 194 290 return -ENOMEM; 195 291 } 196 292 197 - blk->type = type; 198 293 blk->id = id; 199 294 blk->hw = hw; 295 + blk->enc_id = 0; 200 296 list_add_tail(&blk->list, &rm->hw_blks[type]); 201 297 202 298 return 0; ··· 204 300 205 301 int dpu_rm_init(struct dpu_rm *rm, 206 302 struct dpu_mdss_cfg *cat, 207 - void __iomem *mmio, 208 - struct drm_device *dev) 303 + void __iomem *mmio) 209 304 { 210 305 int rc, i; 211 306 enum dpu_hw_blk_type type; 212 307 213 - if (!rm || !cat || !mmio || !dev) { 308 + if (!rm || !cat || !mmio) { 214 309 DPU_ERROR("invalid kms\n"); 215 310 return -EINVAL; 216 311 } ··· 219 316 220 317 mutex_init(&rm->rm_lock); 221 318 222 - INIT_LIST_HEAD(&rm->rsvps); 223 319 for (type = 0; type < DPU_HW_BLK_MAX; type++) 224 320 INIT_LIST_HEAD(&rm->hw_blks[type]); 225 - 226 - rm->dev = dev; 227 - 228 - /* Some of the sub-blocks require an mdptop to be created */ 229 - rm->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, mmio, cat); 230 - if (IS_ERR_OR_NULL(rm->hw_mdp)) { 231 - rc = PTR_ERR(rm->hw_mdp); 232 - rm->hw_mdp = NULL; 233 - DPU_ERROR("failed: mdp hw not available\n"); 234 - goto fail; 235 - } 236 321 237 322 /* Interrogate HW catalog and create tracking items for hw blocks */ 238 323 for (i = 0; i < cat->mixer_count; i++) { ··· 301 410 * proposed use case requirements, incl. hardwired dependent blocks like 302 411 * pingpong 303 412 * @rm: dpu resource manager handle 304 - * @rsvp: reservation currently being created 413 + * @enc_id: encoder id requesting for allocation 305 414 * @reqs: proposed use case requirements 306 415 * @lm: proposed layer mixer, function checks if lm, and all other hardwired 307 416 * blocks connected to the lm (pp) is available and appropriate ··· 313 422 */ 314 423 static bool _dpu_rm_check_lm_and_get_connected_blks( 315 424 struct dpu_rm *rm, 316 - struct dpu_rm_rsvp *rsvp, 425 + uint32_t enc_id, 317 426 struct dpu_rm_requirements *reqs, 318 427 struct dpu_rm_hw_blk *lm, 319 428 struct dpu_rm_hw_blk **pp, ··· 340 449 } 341 450 342 451 /* Already reserved? */ 343 - if (RESERVED_BY_OTHER(lm, rsvp)) { 452 + if (RESERVED_BY_OTHER(lm, enc_id)) { 344 453 DPU_DEBUG("lm %d already reserved\n", lm_cfg->id); 345 454 return false; 346 455 } ··· 358 467 return false; 359 468 } 360 469 361 - if (RESERVED_BY_OTHER(*pp, rsvp)) { 470 + if (RESERVED_BY_OTHER(*pp, enc_id)) { 362 471 DPU_DEBUG("lm %d pp %d already reserved\n", lm->id, 363 472 (*pp)->id); 364 473 return false; ··· 367 476 return true; 368 477 } 369 478 370 - static int _dpu_rm_reserve_lms( 371 - struct dpu_rm *rm, 372 - struct dpu_rm_rsvp *rsvp, 373 - struct dpu_rm_requirements *reqs) 479 + static int _dpu_rm_reserve_lms(struct dpu_rm *rm, uint32_t enc_id, 480 + struct dpu_rm_requirements *reqs) 374 481 375 482 { 376 483 struct dpu_rm_hw_blk *lm[MAX_BLOCKS]; ··· 393 504 lm[lm_count] = iter_i.blk; 394 505 395 506 if (!_dpu_rm_check_lm_and_get_connected_blks( 396 - rm, rsvp, reqs, lm[lm_count], 507 + rm, enc_id, reqs, lm[lm_count], 397 508 &pp[lm_count], NULL)) 398 509 continue; 399 510 ··· 408 519 continue; 409 520 410 521 if (!_dpu_rm_check_lm_and_get_connected_blks( 411 - rm, rsvp, reqs, iter_j.blk, 522 + rm, enc_id, reqs, iter_j.blk, 412 523 &pp[lm_count], iter_i.blk)) 413 524 continue; 414 525 ··· 426 537 if (!lm[i]) 427 538 break; 428 539 429 - lm[i]->rsvp_nxt = rsvp; 430 - pp[i]->rsvp_nxt = rsvp; 540 + lm[i]->enc_id = enc_id; 541 + pp[i]->enc_id = enc_id; 431 542 432 - trace_dpu_rm_reserve_lms(lm[i]->id, lm[i]->type, rsvp->enc_id, 433 - pp[i]->id); 543 + trace_dpu_rm_reserve_lms(lm[i]->id, enc_id, pp[i]->id); 434 544 } 435 545 436 546 return rc; ··· 437 549 438 550 static int _dpu_rm_reserve_ctls( 439 551 struct dpu_rm *rm, 440 - struct dpu_rm_rsvp *rsvp, 552 + uint32_t enc_id, 441 553 const struct msm_display_topology *top) 442 554 { 443 555 struct dpu_rm_hw_blk *ctls[MAX_BLOCKS]; ··· 458 570 unsigned long features = ctl->caps->features; 459 571 bool has_split_display; 460 572 461 - if (RESERVED_BY_OTHER(iter.blk, rsvp)) 573 + if (RESERVED_BY_OTHER(iter.blk, enc_id)) 462 574 continue; 463 575 464 576 has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features; ··· 479 591 return -ENAVAIL; 480 592 481 593 for (i = 0; i < ARRAY_SIZE(ctls) && i < num_ctls; i++) { 482 - ctls[i]->rsvp_nxt = rsvp; 483 - trace_dpu_rm_reserve_ctls(ctls[i]->id, ctls[i]->type, 484 - rsvp->enc_id); 594 + ctls[i]->enc_id = enc_id; 595 + trace_dpu_rm_reserve_ctls(ctls[i]->id, enc_id); 485 596 } 486 597 487 598 return 0; ··· 488 601 489 602 static int _dpu_rm_reserve_intf( 490 603 struct dpu_rm *rm, 491 - struct dpu_rm_rsvp *rsvp, 604 + uint32_t enc_id, 492 605 uint32_t id, 493 606 enum dpu_hw_blk_type type) 494 607 { ··· 501 614 if (iter.blk->id != id) 502 615 continue; 503 616 504 - if (RESERVED_BY_OTHER(iter.blk, rsvp)) { 617 + if (RESERVED_BY_OTHER(iter.blk, enc_id)) { 505 618 DPU_ERROR("type %d id %d already reserved\n", type, id); 506 619 return -ENAVAIL; 507 620 } 508 621 509 - iter.blk->rsvp_nxt = rsvp; 510 - trace_dpu_rm_reserve_intf(iter.blk->id, iter.blk->type, 511 - rsvp->enc_id); 622 + iter.blk->enc_id = enc_id; 623 + trace_dpu_rm_reserve_intf(iter.blk->id, enc_id); 512 624 break; 513 625 } 514 626 ··· 522 636 523 637 static int _dpu_rm_reserve_intf_related_hw( 524 638 struct dpu_rm *rm, 525 - struct dpu_rm_rsvp *rsvp, 639 + uint32_t enc_id, 526 640 struct dpu_encoder_hw_resources *hw_res) 527 641 { 528 642 int i, ret = 0; ··· 532 646 if (hw_res->intfs[i] == INTF_MODE_NONE) 533 647 continue; 534 648 id = i + INTF_0; 535 - ret = _dpu_rm_reserve_intf(rm, rsvp, id, 649 + ret = _dpu_rm_reserve_intf(rm, enc_id, id, 536 650 DPU_HW_BLK_INTF); 537 651 if (ret) 538 652 return ret; ··· 541 655 return ret; 542 656 } 543 657 544 - static int _dpu_rm_make_next_rsvp( 658 + static int _dpu_rm_make_reservation( 545 659 struct dpu_rm *rm, 546 660 struct drm_encoder *enc, 547 661 struct drm_crtc_state *crtc_state, 548 - struct dpu_rm_rsvp *rsvp, 549 662 struct dpu_rm_requirements *reqs) 550 663 { 551 664 int ret; 552 665 553 - /* Create reservation info, tag reserved blocks with it as we go */ 554 - rsvp->seq = ++rm->rsvp_next_seq; 555 - rsvp->enc_id = enc->base.id; 556 - list_add_tail(&rsvp->list, &rm->rsvps); 557 - 558 - ret = _dpu_rm_reserve_lms(rm, rsvp, reqs); 666 + ret = _dpu_rm_reserve_lms(rm, enc->base.id, reqs); 559 667 if (ret) { 560 668 DPU_ERROR("unable to find appropriate mixers\n"); 561 669 return ret; 562 670 } 563 671 564 - ret = _dpu_rm_reserve_ctls(rm, rsvp, &reqs->topology); 672 + ret = _dpu_rm_reserve_ctls(rm, enc->base.id, &reqs->topology); 565 673 if (ret) { 566 674 DPU_ERROR("unable to find appropriate CTL\n"); 567 675 return ret; 568 676 } 569 677 570 - ret = _dpu_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res); 678 + ret = _dpu_rm_reserve_intf_related_hw(rm, enc->base.id, &reqs->hw_res); 571 679 if (ret) 572 680 return ret; 573 681 ··· 586 706 return 0; 587 707 } 588 708 589 - static struct dpu_rm_rsvp *_dpu_rm_get_rsvp( 590 - struct dpu_rm *rm, 591 - struct drm_encoder *enc) 709 + static void _dpu_rm_release_reservation(struct dpu_rm *rm, uint32_t enc_id) 592 710 { 593 - struct dpu_rm_rsvp *i; 594 - 595 - if (!rm || !enc) { 596 - DPU_ERROR("invalid params\n"); 597 - return NULL; 598 - } 599 - 600 - if (list_empty(&rm->rsvps)) 601 - return NULL; 602 - 603 - list_for_each_entry(i, &rm->rsvps, list) 604 - if (i->enc_id == enc->base.id) 605 - return i; 606 - 607 - return NULL; 608 - } 609 - 610 - /** 611 - * _dpu_rm_release_rsvp - release resources and release a reservation 612 - * @rm: KMS handle 613 - * @rsvp: RSVP pointer to release and release resources for 614 - */ 615 - static void _dpu_rm_release_rsvp(struct dpu_rm *rm, struct dpu_rm_rsvp *rsvp) 616 - { 617 - struct dpu_rm_rsvp *rsvp_c, *rsvp_n; 618 711 struct dpu_rm_hw_blk *blk; 619 712 enum dpu_hw_blk_type type; 620 713 621 - if (!rsvp) 622 - return; 623 - 624 - DPU_DEBUG("rel rsvp %d enc %d\n", rsvp->seq, rsvp->enc_id); 625 - 626 - list_for_each_entry_safe(rsvp_c, rsvp_n, &rm->rsvps, list) { 627 - if (rsvp == rsvp_c) { 628 - list_del(&rsvp_c->list); 629 - break; 630 - } 631 - } 632 - 633 714 for (type = 0; type < DPU_HW_BLK_MAX; type++) { 634 715 list_for_each_entry(blk, &rm->hw_blks[type], list) { 635 - if (blk->rsvp == rsvp) { 636 - blk->rsvp = NULL; 637 - DPU_DEBUG("rel rsvp %d enc %d %d %d\n", 638 - rsvp->seq, rsvp->enc_id, 639 - blk->type, blk->id); 640 - } 641 - if (blk->rsvp_nxt == rsvp) { 642 - blk->rsvp_nxt = NULL; 643 - DPU_DEBUG("rel rsvp_nxt %d enc %d %d %d\n", 644 - rsvp->seq, rsvp->enc_id, 645 - blk->type, blk->id); 716 + if (blk->enc_id == enc_id) { 717 + blk->enc_id = 0; 718 + DPU_DEBUG("rel enc %d %d %d\n", enc_id, 719 + type, blk->id); 646 720 } 647 721 } 648 722 } 649 - 650 - kfree(rsvp); 651 723 } 652 724 653 725 void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc) 654 726 { 655 - struct dpu_rm_rsvp *rsvp; 656 - 657 - if (!rm || !enc) { 658 - DPU_ERROR("invalid params\n"); 659 - return; 660 - } 661 - 662 727 mutex_lock(&rm->rm_lock); 663 728 664 - rsvp = _dpu_rm_get_rsvp(rm, enc); 665 - if (!rsvp) { 666 - DPU_ERROR("failed to find rsvp for enc %d\n", enc->base.id); 667 - goto end; 668 - } 729 + _dpu_rm_release_reservation(rm, enc->base.id); 669 730 670 - _dpu_rm_release_rsvp(rm, rsvp); 671 - end: 672 731 mutex_unlock(&rm->rm_lock); 673 - } 674 - 675 - static void _dpu_rm_commit_rsvp(struct dpu_rm *rm, struct dpu_rm_rsvp *rsvp) 676 - { 677 - struct dpu_rm_hw_blk *blk; 678 - enum dpu_hw_blk_type type; 679 - 680 - /* Swap next rsvp to be the active */ 681 - for (type = 0; type < DPU_HW_BLK_MAX; type++) { 682 - list_for_each_entry(blk, &rm->hw_blks[type], list) { 683 - if (blk->rsvp_nxt) { 684 - blk->rsvp = blk->rsvp_nxt; 685 - blk->rsvp_nxt = NULL; 686 - } 687 - } 688 - } 689 732 } 690 733 691 734 int dpu_rm_reserve( ··· 618 815 struct msm_display_topology topology, 619 816 bool test_only) 620 817 { 621 - struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt; 622 818 struct dpu_rm_requirements reqs; 623 819 int ret; 624 820 ··· 630 828 631 829 mutex_lock(&rm->rm_lock); 632 830 633 - _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_BEGIN); 634 - 635 831 ret = _dpu_rm_populate_requirements(rm, enc, crtc_state, &reqs, 636 832 topology); 637 833 if (ret) { ··· 637 837 goto end; 638 838 } 639 839 640 - /* 641 - * We only support one active reservation per-hw-block. But to implement 642 - * transactional semantics for test-only, and for allowing failure while 643 - * modifying your existing reservation, over the course of this 644 - * function we can have two reservations: 645 - * Current: Existing reservation 646 - * Next: Proposed reservation. The proposed reservation may fail, or may 647 - * be discarded if in test-only mode. 648 - * If reservation is successful, and we're not in test-only, then we 649 - * replace the current with the next. 650 - */ 651 - rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL); 652 - if (!rsvp_nxt) { 653 - ret = -ENOMEM; 654 - goto end; 655 - } 656 - 657 - rsvp_cur = _dpu_rm_get_rsvp(rm, enc); 658 - 659 - /* Check the proposed reservation, store it in hw's "next" field */ 660 - ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, rsvp_nxt, &reqs); 661 - 662 - _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_RSVPNEXT); 663 - 840 + ret = _dpu_rm_make_reservation(rm, enc, crtc_state, &reqs); 664 841 if (ret) { 665 842 DPU_ERROR("failed to reserve hw resources: %d\n", ret); 666 - _dpu_rm_release_rsvp(rm, rsvp_nxt); 843 + _dpu_rm_release_reservation(rm, enc->base.id); 667 844 } else if (test_only) { 668 - /* 669 - * Normally, if test_only, test the reservation and then undo 670 - * However, if the user requests LOCK, then keep the reservation 671 - * made during the atomic_check phase. 672 - */ 673 - DPU_DEBUG("test_only: discard test rsvp[s%de%d]\n", 674 - rsvp_nxt->seq, rsvp_nxt->enc_id); 675 - _dpu_rm_release_rsvp(rm, rsvp_nxt); 676 - } else { 677 - _dpu_rm_release_rsvp(rm, rsvp_cur); 678 - 679 - _dpu_rm_commit_rsvp(rm, rsvp_nxt); 845 + /* test_only: test the reservation and then undo */ 846 + DPU_DEBUG("test_only: discard test [enc: %d]\n", 847 + enc->base.id); 848 + _dpu_rm_release_reservation(rm, enc->base.id); 680 849 } 681 - 682 - _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_FINAL); 683 850 684 851 end: 685 852 mutex_unlock(&rm->rm_lock);
+1 -27
drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
··· 22 22 23 23 /** 24 24 * struct dpu_rm - DPU dynamic hardware resource manager 25 - * @dev: device handle for event logging purposes 26 - * @rsvps: list of hardware reservations by each crtc->encoder->connector 27 25 * @hw_blks: array of lists of hardware resources present in the system, one 28 26 * list per type of hardware block 29 - * @hw_mdp: hardware object for mdp_top 30 27 * @lm_max_width: cached layer mixer maximum width 31 - * @rsvp_next_seq: sequence number for next reservation for debugging purposes 32 28 * @rm_lock: resource manager mutex 33 29 */ 34 30 struct dpu_rm { 35 - struct drm_device *dev; 36 - struct list_head rsvps; 37 31 struct list_head hw_blks[DPU_HW_BLK_MAX]; 38 - struct dpu_hw_mdp *hw_mdp; 39 32 uint32_t lm_max_width; 40 - uint32_t rsvp_next_seq; 41 33 struct mutex rm_lock; 42 34 }; 43 35 ··· 59 67 * @rm: DPU Resource Manager handle 60 68 * @cat: Pointer to hardware catalog 61 69 * @mmio: mapped register io address of MDP 62 - * @dev: device handle for event logging purposes 63 70 * @Return: 0 on Success otherwise -ERROR 64 71 */ 65 72 int dpu_rm_init(struct dpu_rm *rm, 66 73 struct dpu_mdss_cfg *cat, 67 - void __iomem *mmio, 68 - struct drm_device *dev); 74 + void __iomem *mmio); 69 75 70 76 /** 71 77 * dpu_rm_destroy - Free all memory allocated by dpu_rm_init ··· 102 112 void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc); 103 113 104 114 /** 105 - * dpu_rm_get_mdp - Retrieve HW block for MDP TOP. 106 - * This is never reserved, and is usable by any display. 107 - * @rm: DPU Resource Manager handle 108 - * @Return: Pointer to hw block or NULL 109 - */ 110 - struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm); 111 - 112 - /** 113 115 * dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list 114 116 * using dpu_rm_get_hw 115 117 * @iter: iter object to initialize ··· 126 144 * @Return: true on match found, false on no match found 127 145 */ 128 146 bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter); 129 - 130 - /** 131 - * dpu_rm_check_property_topctl - validate property bitmask before it is set 132 - * @val: user's proposed topology control bitmask 133 - * @Return: 0 on success or error 134 - */ 135 - int dpu_rm_check_property_topctl(uint64_t val); 136 - 137 147 #endif /* __DPU_RM_H__ */
+11 -17
drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
··· 831 831 ); 832 832 833 833 DECLARE_EVENT_CLASS(dpu_rm_iter_template, 834 - TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id), 835 - TP_ARGS(id, type, enc_id), 834 + TP_PROTO(uint32_t id, uint32_t enc_id), 835 + TP_ARGS(id, enc_id), 836 836 TP_STRUCT__entry( 837 837 __field( uint32_t, id ) 838 - __field( enum dpu_hw_blk_type, type ) 839 838 __field( uint32_t, enc_id ) 840 839 ), 841 840 TP_fast_assign( 842 841 __entry->id = id; 843 - __entry->type = type; 844 842 __entry->enc_id = enc_id; 845 843 ), 846 - TP_printk("id:%d type:%d enc_id:%u", __entry->id, __entry->type, 847 - __entry->enc_id) 844 + TP_printk("id:%d enc_id:%u", __entry->id, __entry->enc_id) 848 845 ); 849 846 DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_intf, 850 - TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id), 851 - TP_ARGS(id, type, enc_id) 847 + TP_PROTO(uint32_t id, uint32_t enc_id), 848 + TP_ARGS(id, enc_id) 852 849 ); 853 850 DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_ctls, 854 - TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id), 855 - TP_ARGS(id, type, enc_id) 851 + TP_PROTO(uint32_t id, uint32_t enc_id), 852 + TP_ARGS(id, enc_id) 856 853 ); 857 854 858 855 TRACE_EVENT(dpu_rm_reserve_lms, 859 - TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id, 860 - uint32_t pp_id), 861 - TP_ARGS(id, type, enc_id, pp_id), 856 + TP_PROTO(uint32_t id, uint32_t enc_id, uint32_t pp_id), 857 + TP_ARGS(id, enc_id, pp_id), 862 858 TP_STRUCT__entry( 863 859 __field( uint32_t, id ) 864 - __field( enum dpu_hw_blk_type, type ) 865 860 __field( uint32_t, enc_id ) 866 861 __field( uint32_t, pp_id ) 867 862 ), 868 863 TP_fast_assign( 869 864 __entry->id = id; 870 - __entry->type = type; 871 865 __entry->enc_id = enc_id; 872 866 __entry->pp_id = pp_id; 873 867 ), 874 - TP_printk("id:%d type:%d enc_id:%u pp_id:%u", __entry->id, 875 - __entry->type, __entry->enc_id, __entry->pp_id) 868 + TP_printk("id:%d enc_id:%u pp_id:%u", __entry->id, 869 + __entry->enc_id, __entry->pp_id) 876 870 ); 877 871 878 872 TRACE_EVENT(dpu_vbif_wait_xin_halt_fail,
+34 -92
drivers/gpu/drm/msm/msm_drv.c
··· 207 207 return val; 208 208 } 209 209 210 - struct vblank_event { 211 - struct list_head node; 210 + struct msm_vblank_work { 211 + struct work_struct work; 212 212 int crtc_id; 213 213 bool enable; 214 + struct msm_drm_private *priv; 214 215 }; 215 216 216 - static void vblank_ctrl_worker(struct kthread_work *work) 217 + static void vblank_ctrl_worker(struct work_struct *work) 217 218 { 218 - struct msm_vblank_ctrl *vbl_ctrl = container_of(work, 219 - struct msm_vblank_ctrl, work); 220 - struct msm_drm_private *priv = container_of(vbl_ctrl, 221 - struct msm_drm_private, vblank_ctrl); 219 + struct msm_vblank_work *vbl_work = container_of(work, 220 + struct msm_vblank_work, work); 221 + struct msm_drm_private *priv = vbl_work->priv; 222 222 struct msm_kms *kms = priv->kms; 223 - struct vblank_event *vbl_ev, *tmp; 224 - unsigned long flags; 225 223 226 - spin_lock_irqsave(&vbl_ctrl->lock, flags); 227 - list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) { 228 - list_del(&vbl_ev->node); 229 - spin_unlock_irqrestore(&vbl_ctrl->lock, flags); 224 + if (vbl_work->enable) 225 + kms->funcs->enable_vblank(kms, priv->crtcs[vbl_work->crtc_id]); 226 + else 227 + kms->funcs->disable_vblank(kms, priv->crtcs[vbl_work->crtc_id]); 230 228 231 - if (vbl_ev->enable) 232 - kms->funcs->enable_vblank(kms, 233 - priv->crtcs[vbl_ev->crtc_id]); 234 - else 235 - kms->funcs->disable_vblank(kms, 236 - priv->crtcs[vbl_ev->crtc_id]); 237 - 238 - kfree(vbl_ev); 239 - 240 - spin_lock_irqsave(&vbl_ctrl->lock, flags); 241 - } 242 - 243 - spin_unlock_irqrestore(&vbl_ctrl->lock, flags); 229 + kfree(vbl_work); 244 230 } 245 231 246 232 static int vblank_ctrl_queue_work(struct msm_drm_private *priv, 247 233 int crtc_id, bool enable) 248 234 { 249 - struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl; 250 - struct vblank_event *vbl_ev; 251 - unsigned long flags; 235 + struct msm_vblank_work *vbl_work; 252 236 253 - vbl_ev = kzalloc(sizeof(*vbl_ev), GFP_ATOMIC); 254 - if (!vbl_ev) 237 + vbl_work = kzalloc(sizeof(*vbl_work), GFP_ATOMIC); 238 + if (!vbl_work) 255 239 return -ENOMEM; 256 240 257 - vbl_ev->crtc_id = crtc_id; 258 - vbl_ev->enable = enable; 241 + INIT_WORK(&vbl_work->work, vblank_ctrl_worker); 259 242 260 - spin_lock_irqsave(&vbl_ctrl->lock, flags); 261 - list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list); 262 - spin_unlock_irqrestore(&vbl_ctrl->lock, flags); 243 + vbl_work->crtc_id = crtc_id; 244 + vbl_work->enable = enable; 245 + vbl_work->priv = priv; 263 246 264 - kthread_queue_work(&priv->disp_thread[crtc_id].worker, 265 - &vbl_ctrl->work); 247 + queue_work(priv->wq, &vbl_work->work); 266 248 267 249 return 0; 268 250 } ··· 256 274 struct msm_drm_private *priv = ddev->dev_private; 257 275 struct msm_kms *kms = priv->kms; 258 276 struct msm_mdss *mdss = priv->mdss; 259 - struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl; 260 - struct vblank_event *vbl_ev, *tmp; 261 277 int i; 262 278 263 279 /* We must cancel and cleanup any pending vblank enable/disable 264 280 * work before drm_irq_uninstall() to avoid work re-enabling an 265 281 * irq after uninstall has disabled it. 266 282 */ 267 - kthread_flush_work(&vbl_ctrl->work); 268 - list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) { 269 - list_del(&vbl_ev->node); 270 - kfree(vbl_ev); 271 - } 272 283 273 - /* clean up display commit/event worker threads */ 284 + flush_workqueue(priv->wq); 285 + destroy_workqueue(priv->wq); 286 + 287 + /* clean up event worker threads */ 274 288 for (i = 0; i < priv->num_crtcs; i++) { 275 - if (priv->disp_thread[i].thread) { 276 - kthread_flush_worker(&priv->disp_thread[i].worker); 277 - kthread_stop(priv->disp_thread[i].thread); 278 - priv->disp_thread[i].thread = NULL; 279 - } 280 - 281 289 if (priv->event_thread[i].thread) { 282 - kthread_flush_worker(&priv->event_thread[i].worker); 283 - kthread_stop(priv->event_thread[i].thread); 290 + kthread_destroy_worker(&priv->event_thread[i].worker); 284 291 priv->event_thread[i].thread = NULL; 285 292 } 286 293 } ··· 293 322 pm_runtime_get_sync(dev); 294 323 drm_irq_uninstall(ddev); 295 324 pm_runtime_put_sync(dev); 296 - 297 - flush_workqueue(priv->wq); 298 - destroy_workqueue(priv->wq); 299 325 300 326 if (kms && kms->funcs) 301 327 kms->funcs->destroy(kms); ··· 458 490 priv->wq = alloc_ordered_workqueue("msm", 0); 459 491 460 492 INIT_LIST_HEAD(&priv->inactive_list); 461 - INIT_LIST_HEAD(&priv->vblank_ctrl.event_list); 462 - kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker); 463 - spin_lock_init(&priv->vblank_ctrl.lock); 464 493 465 494 drm_mode_config_init(ddev); 466 495 ··· 519 554 */ 520 555 param.sched_priority = 16; 521 556 for (i = 0; i < priv->num_crtcs; i++) { 522 - 523 - /* initialize display thread */ 524 - priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id; 525 - kthread_init_worker(&priv->disp_thread[i].worker); 526 - priv->disp_thread[i].dev = ddev; 527 - priv->disp_thread[i].thread = 528 - kthread_run(kthread_worker_fn, 529 - &priv->disp_thread[i].worker, 530 - "crtc_commit:%d", priv->disp_thread[i].crtc_id); 531 - if (IS_ERR(priv->disp_thread[i].thread)) { 532 - DRM_DEV_ERROR(dev, "failed to create crtc_commit kthread\n"); 533 - priv->disp_thread[i].thread = NULL; 534 - goto err_msm_uninit; 535 - } 536 - 537 - ret = sched_setscheduler(priv->disp_thread[i].thread, 538 - SCHED_FIFO, &param); 539 - if (ret) 540 - dev_warn(dev, "disp_thread set priority failed: %d\n", 541 - ret); 542 - 543 557 /* initialize event thread */ 544 558 priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id; 545 559 kthread_init_worker(&priv->event_thread[i].worker); ··· 533 589 goto err_msm_uninit; 534 590 } 535 591 536 - /** 537 - * event thread should also run at same priority as disp_thread 538 - * because it is handling frame_done events. A lower priority 539 - * event thread and higher priority disp_thread can causes 540 - * frame_pending counters beyond 2. This can lead to commit 541 - * failure at crtc commit level. 542 - */ 543 592 ret = sched_setscheduler(priv->event_thread[i].thread, 544 593 SCHED_FIFO, &param); 545 594 if (ret) ··· 851 914 ret = -EINVAL; 852 915 break; 853 916 } 854 - ret = copy_from_user(msm_obj->name, 855 - u64_to_user_ptr(args->value), args->len); 917 + if (copy_from_user(msm_obj->name, u64_to_user_ptr(args->value), 918 + args->len)) { 919 + msm_obj->name[0] = '\0'; 920 + ret = -EFAULT; 921 + break; 922 + } 856 923 msm_obj->name[args->len] = '\0'; 857 924 for (i = 0; i < args->len; i++) { 858 925 if (!isprint(msm_obj->name[i])) { ··· 872 931 } 873 932 args->len = strlen(msm_obj->name); 874 933 if (args->value) { 875 - ret = copy_to_user(u64_to_user_ptr(args->value), 876 - msm_obj->name, args->len); 934 + if (copy_to_user(u64_to_user_ptr(args->value), 935 + msm_obj->name, args->len)) 936 + ret = -EFAULT; 877 937 } 878 938 break; 879 939 }
+1 -9
drivers/gpu/drm/msm/msm_drv.h
··· 77 77 PLANE_PROP_MAX_NUM 78 78 }; 79 79 80 - struct msm_vblank_ctrl { 81 - struct kthread_work work; 82 - struct list_head event_list; 83 - spinlock_t lock; 84 - }; 85 - 86 80 #define MSM_GPU_MAX_RINGS 4 87 81 #define MAX_H_TILES_PER_DISPLAY 2 88 82 ··· 120 126 121 127 /** 122 128 * struct msm_display_info - defines display properties 123 - * @intf_type: DRM_MODE_CONNECTOR_ display type 129 + * @intf_type: DRM_MODE_ENCODER_ type 124 130 * @capabilities: Bitmask of display flags 125 131 * @num_of_h_tiles: Number of horizontal tiles in case of split interface 126 132 * @h_tile_instance: Controller instance used per tile. Number of elements is ··· 193 199 unsigned int num_crtcs; 194 200 struct drm_crtc *crtcs[MAX_CRTCS]; 195 201 196 - struct msm_drm_thread disp_thread[MAX_CRTCS]; 197 202 struct msm_drm_thread event_thread[MAX_CRTCS]; 198 203 199 204 unsigned int num_encoders; ··· 221 228 struct notifier_block vmap_notifier; 222 229 struct shrinker shrinker; 223 230 224 - struct msm_vblank_ctrl vblank_ctrl; 225 231 struct drm_atomic_state *pm_state; 226 232 }; 227 233
+13
include/drm/drm_dp_helper.h
··· 560 560 # define DP_TEST_LINK_EDID_READ (1 << 2) 561 561 # define DP_TEST_LINK_PHY_TEST_PATTERN (1 << 3) /* DPCD >= 1.1 */ 562 562 # define DP_TEST_LINK_FAUX_PATTERN (1 << 4) /* DPCD >= 1.2 */ 563 + # define DP_TEST_LINK_AUDIO_PATTERN (1 << 5) /* DPCD >= 1.2 */ 564 + # define DP_TEST_LINK_AUDIO_DISABLED_VIDEO (1 << 6) /* DPCD >= 1.2 */ 563 565 564 566 #define DP_TEST_LINK_RATE 0x219 565 567 # define DP_LINK_RATE_162 (0x6) ··· 610 608 # define DP_COLOR_FORMAT_RGB (0 << 1) 611 609 # define DP_COLOR_FORMAT_YCbCr422 (1 << 1) 612 610 # define DP_COLOR_FORMAT_YCbCr444 (2 << 1) 611 + # define DP_TEST_DYNAMIC_RANGE_VESA (0 << 3) 613 612 # define DP_TEST_DYNAMIC_RANGE_CEA (1 << 3) 614 613 # define DP_TEST_YCBCR_COEFFICIENTS (1 << 4) 615 614 # define DP_YCBCR_COEFFICIENTS_ITU601 (0 << 4) ··· 660 657 661 658 #define DP_TEST_SINK 0x270 662 659 # define DP_TEST_SINK_START (1 << 0) 660 + #define DP_TEST_AUDIO_MODE 0x271 661 + #define DP_TEST_AUDIO_PATTERN_TYPE 0x272 662 + #define DP_TEST_AUDIO_PERIOD_CH1 0x273 663 + #define DP_TEST_AUDIO_PERIOD_CH2 0x274 664 + #define DP_TEST_AUDIO_PERIOD_CH3 0x275 665 + #define DP_TEST_AUDIO_PERIOD_CH4 0x276 666 + #define DP_TEST_AUDIO_PERIOD_CH5 0x277 667 + #define DP_TEST_AUDIO_PERIOD_CH6 0x278 668 + #define DP_TEST_AUDIO_PERIOD_CH7 0x279 669 + #define DP_TEST_AUDIO_PERIOD_CH8 0x27A 663 670 664 671 #define DP_FEC_STATUS 0x280 /* 1.4 */ 665 672 # define DP_FEC_DECODE_EN_DETECTED (1 << 0)