Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/xe/display: Implement display support

As for display, the intent is to share the display code with the i915
driver so that there is maximum reuse there.

We do this by recompiling i915/display code twice.
Now that i915 has been adapted to support the Xe build, we can add
the xe/display support.

This initial work is a collaboration of many people and unfortunately
this squashed patch won't fully honor the proper credits.
But let's try to add a few from the squashed patches:

Co-developed-by: Matthew Brost <matthew.brost@intel.com>
Co-developed-by: Jani Nikula <jani.nikula@intel.com>
Co-developed-by: Lucas De Marchi <lucas.demarchi@intel.com>
Co-developed-by: Matt Roper <matthew.d.roper@intel.com>
Co-developed-by: Mauro Carvalho Chehab <mchehab@kernel.org>
Co-developed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Co-developed-by: Dave Airlie <airlied@redhat.com>
Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>

authored by

Maarten Lankhorst and committed by
Rodrigo Vivi
44e69495 a839e365

+2873 -51
+1
drivers/gpu/drm/xe/.kunitconfig
··· 6 6 CONFIG_DRM_FBDEV_EMULATION=y 7 7 CONFIG_DRM_KMS_HELPER=y 8 8 CONFIG_DRM_XE=y 9 + CONFIG_DRM_XE_DISPLAY=n 9 10 CONFIG_EXPERT=y 10 11 CONFIG_FB=y 11 12 CONFIG_DRM_XE_KUNIT_TEST=y
+22
drivers/gpu/drm/xe/Kconfig
··· 12 12 select DRM_KMS_HELPER 13 13 select DRM_PANEL 14 14 select DRM_SUBALLOC_HELPER 15 + select DRM_DISPLAY_DP_HELPER 16 + select DRM_DISPLAY_HDCP_HELPER 17 + select DRM_DISPLAY_HDMI_HELPER 18 + select DRM_DISPLAY_HELPER 19 + select DRM_MIPI_DSI 15 20 select RELAY 16 21 select IRQ_WORK 22 + # i915 depends on ACPI_VIDEO when ACPI is enabled 23 + # but for select to work, need to select ACPI_VIDEO's dependencies, ick 24 + select BACKLIGHT_CLASS_DEVICE if ACPI 25 + select INPUT if ACPI 26 + select ACPI_VIDEO if X86 && ACPI 27 + select ACPI_BUTTON if ACPI 28 + select ACPI_WMI if ACPI 17 29 select SYNC_FILE 18 30 select IOSF_MBI 19 31 select CRC32 ··· 44 32 Experimental driver for Intel Xe series GPUs 45 33 46 34 If "M" is selected, the module will be called xe. 35 + 36 + config DRM_XE_DISPLAY 37 + bool "Enable display support" 38 + depends on DRM_XE && EXPERT && DRM_XE=m 39 + select FB_IOMEM_HELPERS 40 + select I2C 41 + select I2C_ALGOBIT 42 + default y 43 + help 44 + Disable this option only if you want to compile out display support. 47 45 48 46 config DRM_XE_FORCE_PROBE 49 47 string "Force probe xe for selected Intel hardware IDs"
+136 -5
drivers/gpu/drm/xe/Makefile
··· 24 24 subdir-ccflags-y += $(call cc-disable-warning, frame-address) 25 25 subdir-ccflags-$(CONFIG_DRM_XE_WERROR) += -Werror 26 26 27 - # Fine grained warnings disable 28 - CFLAGS_xe_pci.o = $(call cc-disable-warning, override-init) 29 - 30 27 subdir-ccflags-y += -I$(obj) -I$(srctree)/$(src) 31 28 32 29 # generated sources ··· 123 126 # graphics hardware monitoring (HWMON) support 124 127 xe-$(CONFIG_HWMON) += xe_hwmon.o 125 128 126 - obj-$(CONFIG_DRM_XE) += xe.o 127 - obj-$(CONFIG_DRM_XE_KUNIT_TEST) += tests/ 129 + # i915 Display compat #defines and #includes 130 + subdir-ccflags-$(CONFIG_DRM_XE_DISPLAY) += \ 131 + -I$(srctree)/$(src)/display/ext \ 132 + -I$(srctree)/$(src)/compat-i915-headers \ 133 + -I$(srctree)/drivers/gpu/drm/xe/display/ \ 134 + -I$(srctree)/drivers/gpu/drm/i915/display/ \ 135 + -Ddrm_i915_gem_object=xe_bo \ 136 + -Ddrm_i915_private=xe_device 137 + 138 + CFLAGS_i915-display/intel_fbdev.o = $(call cc-disable-warning, override-init) 139 + CFLAGS_i915-display/intel_display_device.o = $(call cc-disable-warning, override-init) 140 + 141 + # Rule to build SOC code shared with i915 142 + $(obj)/i915-soc/%.o: $(srctree)/drivers/gpu/drm/i915/soc/%.c FORCE 143 + $(call cmd,force_checksrc) 144 + $(call if_changed_rule,cc_o_c) 145 + 146 + # Rule to build display code shared with i915 147 + $(obj)/i915-display/%.o: $(srctree)/drivers/gpu/drm/i915/display/%.c FORCE 148 + $(call cmd,force_checksrc) 149 + $(call if_changed_rule,cc_o_c) 150 + 151 + # Display code specific to xe 152 + xe-$(CONFIG_DRM_XE_DISPLAY) += \ 153 + xe_display.o \ 154 + display/xe_fb_pin.o \ 155 + display/xe_hdcp_gsc.o \ 156 + display/xe_plane_initial.o \ 157 + display/xe_display_rps.o \ 158 + display/intel_fbdev_fb.o \ 159 + display/intel_fb_bo.o \ 160 + display/ext/i915_irq.o \ 161 + display/ext/i915_utils.o 162 + 163 + # SOC code shared with i915 164 + xe-$(CONFIG_DRM_XE_DISPLAY) += \ 165 + i915-soc/intel_dram.o \ 166 + i915-soc/intel_pch.o 167 + 168 + # Display code shared with i915 169 + xe-$(CONFIG_DRM_XE_DISPLAY) += \ 170 + i915-display/icl_dsi.o \ 171 + i915-display/intel_atomic.o \ 172 + i915-display/intel_atomic_plane.o \ 173 + i915-display/intel_audio.o \ 174 + i915-display/intel_backlight.o \ 175 + i915-display/intel_bios.o \ 176 + i915-display/intel_bw.o \ 177 + i915-display/intel_cdclk.o \ 178 + i915-display/intel_color.o \ 179 + i915-display/intel_combo_phy.o \ 180 + i915-display/intel_connector.o \ 181 + i915-display/intel_crtc.o \ 182 + i915-display/intel_crtc_state_dump.o \ 183 + i915-display/intel_cursor.o \ 184 + i915-display/intel_cx0_phy.o \ 185 + i915-display/intel_ddi.o \ 186 + i915-display/intel_ddi_buf_trans.o \ 187 + i915-display/intel_display.o \ 188 + i915-display/intel_display_debugfs.o \ 189 + i915-display/intel_display_debugfs_params.o \ 190 + i915-display/intel_display_device.o \ 191 + i915-display/intel_display_driver.o \ 192 + i915-display/intel_display_irq.o \ 193 + i915-display/intel_display_params.o \ 194 + i915-display/intel_display_power.o \ 195 + i915-display/intel_display_power_map.o \ 196 + i915-display/intel_display_power_well.o \ 197 + i915-display/intel_display_trace.o \ 198 + i915-display/intel_display_wa.o \ 199 + i915-display/intel_dkl_phy.o \ 200 + i915-display/intel_dmc.o \ 201 + i915-display/intel_dp.o \ 202 + i915-display/intel_dp_aux.o \ 203 + i915-display/intel_dp_aux_backlight.o \ 204 + i915-display/intel_dp_hdcp.o \ 205 + i915-display/intel_dp_link_training.o \ 206 + i915-display/intel_dp_mst.o \ 207 + i915-display/intel_dpll.o \ 208 + i915-display/intel_dpll_mgr.o \ 209 + i915-display/intel_dpt_common.o \ 210 + i915-display/intel_drrs.o \ 211 + i915-display/intel_dsb.o \ 212 + i915-display/intel_dsi.o \ 213 + i915-display/intel_dsi_dcs_backlight.o \ 214 + i915-display/intel_dsi_vbt.o \ 215 + i915-display/intel_fb.o \ 216 + i915-display/intel_fbc.o \ 217 + i915-display/intel_fdi.o \ 218 + i915-display/intel_fifo_underrun.o \ 219 + i915-display/intel_frontbuffer.o \ 220 + i915-display/intel_global_state.o \ 221 + i915-display/intel_gmbus.o \ 222 + i915-display/intel_hdcp.o \ 223 + i915-display/intel_hdmi.o \ 224 + i915-display/intel_hotplug.o \ 225 + i915-display/intel_hotplug_irq.o \ 226 + i915-display/intel_hti.o \ 227 + i915-display/intel_link_bw.o \ 228 + i915-display/intel_lspcon.o \ 229 + i915-display/intel_modeset_lock.o \ 230 + i915-display/intel_modeset_setup.o \ 231 + i915-display/intel_modeset_verify.o \ 232 + i915-display/intel_panel.o \ 233 + i915-display/intel_pipe_crc.o \ 234 + i915-display/intel_pmdemand.o \ 235 + i915-display/intel_pps.o \ 236 + i915-display/intel_psr.o \ 237 + i915-display/intel_qp_tables.o \ 238 + i915-display/intel_quirks.o \ 239 + i915-display/intel_snps_phy.o \ 240 + i915-display/intel_tc.o \ 241 + i915-display/intel_vblank.o \ 242 + i915-display/intel_vdsc.o \ 243 + i915-display/intel_vga.o \ 244 + i915-display/intel_vrr.o \ 245 + i915-display/intel_wm.o \ 246 + i915-display/skl_scaler.o \ 247 + i915-display/skl_universal_plane.o \ 248 + i915-display/skl_watermark.o 128 249 129 250 xe-$(CONFIG_PERF_EVENTS) += xe_pmu.o 130 251 252 + ifeq ($(CONFIG_ACPI),y) 253 + xe-$(CONFIG_DRM_XE_DISPLAY) += \ 254 + i915-display/intel_acpi.o \ 255 + i915-display/intel_opregion.o 256 + endif 257 + 258 + ifeq ($(CONFIG_DRM_FBDEV_EMULATION),y) 259 + xe-$(CONFIG_DRM_XE_DISPLAY) += i915-display/intel_fbdev.o 260 + endif 261 + 262 + obj-$(CONFIG_DRM_XE) += xe.o 263 + obj-$(CONFIG_DRM_XE_KUNIT_TEST) += tests/ 264 + 131 265 # header test 132 266 hdrtest_find_args := -not -path xe_rtp_helpers.h 267 + ifneq ($(CONFIG_DRM_XE_DISPLAY),y) 268 + hdrtest_find_args += -not -path display/\* -not -path compat-i915-headers/\* -not -path xe_display.h 269 + endif 133 270 134 271 always-$(CONFIG_DRM_XE_WERROR) += \ 135 272 $(patsubst %.h,%.hdrtest, $(shell cd $(srctree)/$(src) && find * -name '*.h' $(hdrtest_find_args)))
+1
drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_lmem.h
··· 1 + /* Empty */
+17
drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_mman.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef _I915_GEM_MMAN_H_ 7 + #define _I915_GEM_MMAN_H_ 8 + 9 + #include "xe_bo_types.h" 10 + #include <drm/drm_prime.h> 11 + 12 + static inline int i915_gem_fb_mmap(struct xe_bo *bo, struct vm_area_struct *vma) 13 + { 14 + return drm_gem_prime_mmap(&bo->ttm.base, vma); 15 + } 16 + 17 + #endif
+65
drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2022 Intel Corporation 4 + */ 5 + 6 + #ifndef _I915_GEM_OBJECT_H_ 7 + #define _I915_GEM_OBJECT_H_ 8 + 9 + #include <linux/types.h> 10 + 11 + #include "xe_bo.h" 12 + 13 + #define i915_gem_object_is_shmem(obj) ((obj)->flags & XE_BO_CREATE_SYSTEM_BIT) 14 + 15 + static inline dma_addr_t i915_gem_object_get_dma_address(const struct xe_bo *bo, pgoff_t n) 16 + { 17 + /* Should never be called */ 18 + WARN_ON(1); 19 + return n; 20 + } 21 + 22 + static inline bool i915_gem_object_is_tiled(const struct xe_bo *bo) 23 + { 24 + /* legacy tiling is unused */ 25 + return false; 26 + } 27 + 28 + static inline bool i915_gem_object_is_userptr(const struct xe_bo *bo) 29 + { 30 + /* legacy tiling is unused */ 31 + return false; 32 + } 33 + 34 + static inline int i915_gem_object_read_from_page(struct xe_bo *bo, 35 + u32 ofs, u64 *ptr, u32 size) 36 + { 37 + struct ttm_bo_kmap_obj map; 38 + void *virtual; 39 + bool is_iomem; 40 + int ret; 41 + 42 + XE_WARN_ON(size != 8); 43 + 44 + ret = xe_bo_lock(bo, true); 45 + if (ret) 46 + return ret; 47 + 48 + ret = ttm_bo_kmap(&bo->ttm, ofs >> PAGE_SHIFT, 1, &map); 49 + if (ret) 50 + goto out_unlock; 51 + 52 + ofs &= ~PAGE_MASK; 53 + virtual = ttm_kmap_obj_virtual(&map, &is_iomem); 54 + if (is_iomem) 55 + *ptr = readq((void __iomem *)(virtual + ofs)); 56 + else 57 + *ptr = *(u64 *)(virtual + ofs); 58 + 59 + ttm_bo_kunmap(&map); 60 + out_unlock: 61 + xe_bo_unlock(bo); 62 + return ret; 63 + } 64 + 65 + #endif
+11
drivers/gpu/drm/xe/compat-i915-headers/gt/intel_rps.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_RPS_H__ 7 + #define __INTEL_RPS_H__ 8 + 9 + #define gen5_rps_irq_handler(x) ({}) 10 + 11 + #endif /* __INTEL_RPS_H__ */
drivers/gpu/drm/xe/compat-i915-headers/i915_active_types.h
+19
drivers/gpu/drm/xe/compat-i915-headers/i915_config.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef __I915_CONFIG_H__ 7 + #define __I915_CONFIG_H__ 8 + 9 + #include <linux/sched.h> 10 + 11 + struct drm_i915_private; 12 + 13 + static inline unsigned long 14 + i915_fence_timeout(const struct drm_i915_private *i915) 15 + { 16 + return MAX_SCHEDULE_TIMEOUT; 17 + } 18 + 19 + #endif /* __I915_CONFIG_H__ */
+14
drivers/gpu/drm/xe/compat-i915-headers/i915_debugfs.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef __I915_DEBUGFS_H__ 7 + #define __I915_DEBUGFS_H__ 8 + 9 + struct drm_i915_gem_object; 10 + struct seq_file; 11 + 12 + static inline void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) {} 13 + 14 + #endif /* __I915_DEBUGFS_H__ */
+230
drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + #ifndef _XE_I915_DRV_H_ 6 + #define _XE_I915_DRV_H_ 7 + 8 + /* 9 + * "Adaptation header" to allow i915 display to also build for xe driver. 10 + * TODO: refactor i915 and xe so this can cease to exist 11 + */ 12 + 13 + #include <drm/drm_drv.h> 14 + 15 + #include "gem/i915_gem_object.h" 16 + 17 + #include "soc/intel_pch.h" 18 + #include "xe_device.h" 19 + #include "xe_bo.h" 20 + #include "xe_pm.h" 21 + #include "xe_step.h" 22 + #include "i915_gpu_error.h" 23 + #include "i915_reg_defs.h" 24 + #include "i915_utils.h" 25 + #include "intel_step.h" 26 + #include "intel_uc_fw.h" 27 + #include "intel_uncore.h" 28 + #include "intel_runtime_pm.h" 29 + #include <linux/pm_runtime.h> 30 + 31 + static inline struct drm_i915_private *to_i915(const struct drm_device *dev) 32 + { 33 + return container_of(dev, struct drm_i915_private, drm); 34 + } 35 + 36 + static inline struct drm_i915_private *kdev_to_i915(struct device *kdev) 37 + { 38 + return dev_get_drvdata(kdev); 39 + } 40 + 41 + 42 + #define INTEL_JASPERLAKE 0 43 + #define INTEL_ELKHARTLAKE 0 44 + #define IS_PLATFORM(xe, x) ((xe)->info.platform == x) 45 + #define INTEL_INFO(dev_priv) (&((dev_priv)->info)) 46 + #define INTEL_DEVID(dev_priv) ((dev_priv)->info.devid) 47 + #define IS_I830(dev_priv) (dev_priv && 0) 48 + #define IS_I845G(dev_priv) (dev_priv && 0) 49 + #define IS_I85X(dev_priv) (dev_priv && 0) 50 + #define IS_I865G(dev_priv) (dev_priv && 0) 51 + #define IS_I915G(dev_priv) (dev_priv && 0) 52 + #define IS_I915GM(dev_priv) (dev_priv && 0) 53 + #define IS_I945G(dev_priv) (dev_priv && 0) 54 + #define IS_I945GM(dev_priv) (dev_priv && 0) 55 + #define IS_I965G(dev_priv) (dev_priv && 0) 56 + #define IS_I965GM(dev_priv) (dev_priv && 0) 57 + #define IS_G45(dev_priv) (dev_priv && 0) 58 + #define IS_GM45(dev_priv) (dev_priv && 0) 59 + #define IS_G4X(dev_priv) (dev_priv && 0) 60 + #define IS_PINEVIEW(dev_priv) (dev_priv && 0) 61 + #define IS_G33(dev_priv) (dev_priv && 0) 62 + #define IS_IRONLAKE(dev_priv) (dev_priv && 0) 63 + #define IS_IRONLAKE_M(dev_priv) (dev_priv && 0) 64 + #define IS_SANDYBRIDGE(dev_priv) (dev_priv && 0) 65 + #define IS_IVYBRIDGE(dev_priv) (dev_priv && 0) 66 + #define IS_IVB_GT1(dev_priv) (dev_priv && 0) 67 + #define IS_VALLEYVIEW(dev_priv) (dev_priv && 0) 68 + #define IS_CHERRYVIEW(dev_priv) (dev_priv && 0) 69 + #define IS_HASWELL(dev_priv) (dev_priv && 0) 70 + #define IS_BROADWELL(dev_priv) (dev_priv && 0) 71 + #define IS_SKYLAKE(dev_priv) (dev_priv && 0) 72 + #define IS_BROXTON(dev_priv) (dev_priv && 0) 73 + #define IS_KABYLAKE(dev_priv) (dev_priv && 0) 74 + #define IS_GEMINILAKE(dev_priv) (dev_priv && 0) 75 + #define IS_COFFEELAKE(dev_priv) (dev_priv && 0) 76 + #define IS_COMETLAKE(dev_priv) (dev_priv && 0) 77 + #define IS_ICELAKE(dev_priv) (dev_priv && 0) 78 + #define IS_JASPERLAKE(dev_priv) (dev_priv && 0) 79 + #define IS_ELKHARTLAKE(dev_priv) (dev_priv && 0) 80 + #define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_TIGERLAKE) 81 + #define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_ROCKETLAKE) 82 + #define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, XE_DG1) 83 + #define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_S) 84 + #define IS_ALDERLAKE_P(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_P) 85 + #define IS_XEHPSDV(dev_priv) (dev_priv && 0) 86 + #define IS_DG2(dev_priv) IS_PLATFORM(dev_priv, XE_DG2) 87 + #define IS_PONTEVECCHIO(dev_priv) IS_PLATFORM(dev_priv, XE_PVC) 88 + #define IS_METEORLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_METEORLAKE) 89 + #define IS_LUNARLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_LUNARLAKE) 90 + 91 + #define IS_HASWELL_ULT(dev_priv) (dev_priv && 0) 92 + #define IS_BROADWELL_ULT(dev_priv) (dev_priv && 0) 93 + #define IS_BROADWELL_ULX(dev_priv) (dev_priv && 0) 94 + 95 + #define IP_VER(ver, rel) ((ver) << 8 | (rel)) 96 + 97 + #define INTEL_DISPLAY_ENABLED(xe) (HAS_DISPLAY((xe)) && !intel_opregion_headless_sku((xe))) 98 + 99 + #define IS_GRAPHICS_VER(xe, first, last) \ 100 + ((xe)->info.graphics_verx100 >= first * 100 && \ 101 + (xe)->info.graphics_verx100 <= (last*100 + 99)) 102 + #define IS_MOBILE(xe) (xe && 0) 103 + #define HAS_LLC(xe) (!IS_DGFX((xe))) 104 + 105 + #define HAS_GMD_ID(xe) GRAPHICS_VERx100(xe) >= 1270 106 + 107 + /* Workarounds not handled yet */ 108 + #define IS_DISPLAY_STEP(xe, first, last) ({u8 __step = (xe)->info.step.display; first <= __step && __step <= last; }) 109 + #define IS_GRAPHICS_STEP(xe, first, last) ({u8 __step = (xe)->info.step.graphics; first <= __step && __step <= last; }) 110 + 111 + #define IS_LP(xe) (0) 112 + #define IS_GEN9_LP(xe) (0) 113 + #define IS_GEN9_BC(xe) (0) 114 + 115 + #define IS_TIGERLAKE_UY(xe) (xe && 0) 116 + #define IS_COMETLAKE_ULX(xe) (xe && 0) 117 + #define IS_COFFEELAKE_ULX(xe) (xe && 0) 118 + #define IS_KABYLAKE_ULX(xe) (xe && 0) 119 + #define IS_SKYLAKE_ULX(xe) (xe && 0) 120 + #define IS_HASWELL_ULX(xe) (xe && 0) 121 + #define IS_COMETLAKE_ULT(xe) (xe && 0) 122 + #define IS_COFFEELAKE_ULT(xe) (xe && 0) 123 + #define IS_KABYLAKE_ULT(xe) (xe && 0) 124 + #define IS_SKYLAKE_ULT(xe) (xe && 0) 125 + 126 + #define IS_DG1_GRAPHICS_STEP(xe, first, last) (IS_DG1(xe) && IS_GRAPHICS_STEP(xe, first, last)) 127 + #define IS_DG2_GRAPHICS_STEP(xe, variant, first, last) \ 128 + ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_ ## variant && \ 129 + IS_GRAPHICS_STEP(xe, first, last)) 130 + #define IS_XEHPSDV_GRAPHICS_STEP(xe, first, last) (IS_XEHPSDV(xe) && IS_GRAPHICS_STEP(xe, first, last)) 131 + 132 + /* XXX: No basedie stepping support yet */ 133 + #define IS_PVC_BD_STEP(xe, first, last) (!WARN_ON(1) && IS_PONTEVECCHIO(xe)) 134 + 135 + #define IS_TIGERLAKE_DISPLAY_STEP(xe, first, last) (IS_TIGERLAKE(xe) && IS_DISPLAY_STEP(xe, first, last)) 136 + #define IS_ROCKETLAKE_DISPLAY_STEP(xe, first, last) (IS_ROCKETLAKE(xe) && IS_DISPLAY_STEP(xe, first, last)) 137 + #define IS_DG1_DISPLAY_STEP(xe, first, last) (IS_DG1(xe) && IS_DISPLAY_STEP(xe, first, last)) 138 + #define IS_DG2_DISPLAY_STEP(xe, first, last) (IS_DG2(xe) && IS_DISPLAY_STEP(xe, first, last)) 139 + #define IS_ADLP_DISPLAY_STEP(xe, first, last) (IS_ALDERLAKE_P(xe) && IS_DISPLAY_STEP(xe, first, last)) 140 + #define IS_ADLS_DISPLAY_STEP(xe, first, last) (IS_ALDERLAKE_S(xe) && IS_DISPLAY_STEP(xe, first, last)) 141 + #define IS_JSL_EHL_DISPLAY_STEP(xe, first, last) (IS_JSL_EHL(xe) && IS_DISPLAY_STEP(xe, first, last)) 142 + #define IS_MTL_DISPLAY_STEP(xe, first, last) (IS_METEORLAKE(xe) && IS_DISPLAY_STEP(xe, first, last)) 143 + 144 + /* FIXME: Add subplatform here */ 145 + #define IS_MTL_GRAPHICS_STEP(xe, sub, first, last) (IS_METEORLAKE(xe) && IS_DISPLAY_STEP(xe, first, last)) 146 + 147 + #define IS_DG2_G10(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G10) 148 + #define IS_DG2_G11(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G11) 149 + #define IS_DG2_G12(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_DG2_G12) 150 + #define IS_RAPTORLAKE_U(xe) ((xe)->info.subplatform == XE_SUBPLATFORM_ALDERLAKE_P_RPLU) 151 + #define IS_ICL_WITH_PORT_F(xe) (xe && 0) 152 + #define HAS_FLAT_CCS(xe) (xe_device_has_flat_ccs(xe)) 153 + #define to_intel_bo(x) gem_to_xe_bo((x)) 154 + #define mkwrite_device_info(xe) (INTEL_INFO(xe)) 155 + 156 + #define HAS_128_BYTE_Y_TILING(xe) (xe || 1) 157 + 158 + #define intel_has_gpu_reset(a) (a && 0) 159 + 160 + #include "intel_wakeref.h" 161 + 162 + static inline bool intel_runtime_pm_get(struct xe_runtime_pm *pm) 163 + { 164 + struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm); 165 + 166 + if (xe_pm_runtime_get(xe) < 0) { 167 + xe_pm_runtime_put(xe); 168 + return false; 169 + } 170 + return true; 171 + } 172 + 173 + static inline bool intel_runtime_pm_get_if_in_use(struct xe_runtime_pm *pm) 174 + { 175 + struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm); 176 + 177 + return xe_pm_runtime_get_if_active(xe); 178 + } 179 + 180 + static inline void intel_runtime_pm_put_unchecked(struct xe_runtime_pm *pm) 181 + { 182 + struct xe_device *xe = container_of(pm, struct xe_device, runtime_pm); 183 + 184 + xe_pm_runtime_put(xe); 185 + } 186 + 187 + static inline void intel_runtime_pm_put(struct xe_runtime_pm *pm, bool wakeref) 188 + { 189 + if (wakeref) 190 + intel_runtime_pm_put_unchecked(pm); 191 + } 192 + 193 + #define intel_runtime_pm_get_raw intel_runtime_pm_get 194 + #define intel_runtime_pm_put_raw intel_runtime_pm_put 195 + #define assert_rpm_wakelock_held(x) do { } while (0) 196 + #define assert_rpm_raw_wakeref_held(x) do { } while (0) 197 + 198 + #define intel_uncore_forcewake_get(x, y) do { } while (0) 199 + #define intel_uncore_forcewake_put(x, y) do { } while (0) 200 + 201 + #define intel_uncore_arm_unclaimed_mmio_detection(x) do { } while (0) 202 + 203 + #define I915_PRIORITY_DISPLAY 0 204 + struct i915_sched_attr { 205 + int priority; 206 + }; 207 + #define i915_gem_fence_wait_priority(fence, attr) do { (void) attr; } while (0) 208 + 209 + #define with_intel_runtime_pm(rpm, wf) \ 210 + for ((wf) = intel_runtime_pm_get(rpm); (wf); \ 211 + intel_runtime_pm_put((rpm), (wf)), (wf) = 0) 212 + 213 + #define pdev_to_i915 pdev_to_xe_device 214 + #define RUNTIME_INFO(xe) (&(xe)->info.i915_runtime) 215 + 216 + #define FORCEWAKE_ALL XE_FORCEWAKE_ALL 217 + #define HPD_STORM_DEFAULT_THRESHOLD 50 218 + 219 + #ifdef CONFIG_ARM64 220 + /* 221 + * arm64 indirectly includes linux/rtc.h, 222 + * which defines a irq_lock, so include it 223 + * here before #define-ing it 224 + */ 225 + #include <linux/rtc.h> 226 + #endif 227 + 228 + #define irq_lock irq.lock 229 + 230 + #endif
+6
drivers/gpu/drm/xe/compat-i915-headers/i915_fixed.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include "../../i915/i915_fixed.h"
+17
drivers/gpu/drm/xe/compat-i915-headers/i915_gpu_error.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef _I915_GPU_ERROR_H_ 7 + #define _I915_GPU_ERROR_H_ 8 + 9 + struct drm_i915_error_state_buf; 10 + 11 + __printf(2, 3) 12 + static inline void 13 + i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...) 14 + { 15 + } 16 + 17 + #endif
+6
drivers/gpu/drm/xe/compat-i915-headers/i915_irq.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include "../../i915/i915_irq.h"
+6
drivers/gpu/drm/xe/compat-i915-headers/i915_reg.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include "../../i915/i915_reg.h"
+6
drivers/gpu/drm/xe/compat-i915-headers/i915_reg_defs.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include "../../i915/i915_reg_defs.h"
+6
drivers/gpu/drm/xe/compat-i915-headers/i915_trace.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #define trace_i915_reg_rw(a...) do { } while (0)
+6
drivers/gpu/drm/xe/compat-i915-headers/i915_utils.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include "../../i915/i915_utils.h"
+44
drivers/gpu/drm/xe/compat-i915-headers/i915_vgpu.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef _I915_VGPU_H_ 7 + #define _I915_VGPU_H_ 8 + 9 + #include <linux/types.h> 10 + 11 + struct drm_i915_private; 12 + struct i915_ggtt; 13 + 14 + static inline void intel_vgpu_detect(struct drm_i915_private *i915) 15 + { 16 + } 17 + static inline bool intel_vgpu_active(struct drm_i915_private *i915) 18 + { 19 + return false; 20 + } 21 + static inline void intel_vgpu_register(struct drm_i915_private *i915) 22 + { 23 + } 24 + static inline bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *i915) 25 + { 26 + return false; 27 + } 28 + static inline bool intel_vgpu_has_hwsp_emulation(struct drm_i915_private *i915) 29 + { 30 + return false; 31 + } 32 + static inline bool intel_vgpu_has_huge_gtt(struct drm_i915_private *i915) 33 + { 34 + return false; 35 + } 36 + static inline int intel_vgt_balloon(struct i915_ggtt *ggtt) 37 + { 38 + return 0; 39 + } 40 + static inline void intel_vgt_deballoon(struct i915_ggtt *ggtt) 41 + { 42 + } 43 + 44 + #endif /* _I915_VGPU_H_ */
+31
drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef I915_VMA_H 7 + #define I915_VMA_H 8 + 9 + #include <uapi/drm/i915_drm.h> 10 + #include <drm/drm_mm.h> 11 + 12 + /* We don't want these from i915_drm.h in case of Xe */ 13 + #undef I915_TILING_X 14 + #undef I915_TILING_Y 15 + #define I915_TILING_X 0 16 + #define I915_TILING_Y 0 17 + 18 + struct xe_bo; 19 + 20 + struct i915_vma { 21 + struct xe_bo *bo, *dpt; 22 + struct drm_mm_node node; 23 + }; 24 + 25 + 26 + static inline u32 i915_ggtt_offset(const struct i915_vma *vma) 27 + { 28 + return vma->node.start; 29 + } 30 + 31 + #endif
+74
drivers/gpu/drm/xe/compat-i915-headers/i915_vma_types.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include <linux/types.h> 7 + #include <linux/build_bug.h> 8 + 9 + /* XX: Figure out how to handle this vma mapping in xe */ 10 + struct intel_remapped_plane_info { 11 + /* in gtt pages */ 12 + u32 offset:31; 13 + u32 linear:1; 14 + union { 15 + /* in gtt pages for !linear */ 16 + struct { 17 + u16 width; 18 + u16 height; 19 + u16 src_stride; 20 + u16 dst_stride; 21 + }; 22 + 23 + /* in gtt pages for linear */ 24 + u32 size; 25 + }; 26 + } __packed; 27 + 28 + struct intel_remapped_info { 29 + struct intel_remapped_plane_info plane[4]; 30 + /* in gtt pages */ 31 + u32 plane_alignment; 32 + } __packed; 33 + 34 + struct intel_rotation_info { 35 + struct intel_remapped_plane_info plane[2]; 36 + } __packed; 37 + 38 + enum i915_gtt_view_type { 39 + I915_GTT_VIEW_NORMAL = 0, 40 + I915_GTT_VIEW_ROTATED = sizeof(struct intel_rotation_info), 41 + I915_GTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info), 42 + }; 43 + 44 + static inline void assert_i915_gem_gtt_types(void) 45 + { 46 + BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 2 * sizeof(u32) + 8 * sizeof(u16)); 47 + BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 5 * sizeof(u32) + 16 * sizeof(u16)); 48 + 49 + /* Check that rotation/remapped shares offsets for simplicity */ 50 + BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) != 51 + offsetof(struct intel_rotation_info, plane[0])); 52 + BUILD_BUG_ON(offsetofend(struct intel_remapped_info, plane[1]) != 53 + offsetofend(struct intel_rotation_info, plane[1])); 54 + 55 + /* As we encode the size of each branch inside the union into its type, 56 + * we have to be careful that each branch has a unique size. 57 + */ 58 + switch ((enum i915_gtt_view_type)0) { 59 + case I915_GTT_VIEW_NORMAL: 60 + case I915_GTT_VIEW_ROTATED: 61 + case I915_GTT_VIEW_REMAPPED: 62 + /* gcc complains if these are identical cases */ 63 + break; 64 + } 65 + } 66 + 67 + struct i915_gtt_view { 68 + enum i915_gtt_view_type type; 69 + union { 70 + /* Members need to contain no holes/padding */ 71 + struct intel_rotation_info rotated; 72 + struct intel_remapped_info remapped; 73 + }; 74 + };
+6
drivers/gpu/drm/xe/compat-i915-headers/intel_clock_gating.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include "../../i915/intel_clock_gating.h"
+6
drivers/gpu/drm/xe/compat-i915-headers/intel_mchbar_regs.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include "../../i915/intel_mchbar_regs.h"
+6
drivers/gpu/drm/xe/compat-i915-headers/intel_pci_config.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include "../../i915/intel_pci_config.h"
+42
drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_PCODE_H__ 7 + #define __INTEL_PCODE_H__ 8 + 9 + #include "intel_uncore.h" 10 + #include "xe_pcode.h" 11 + 12 + static inline int 13 + snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val, 14 + int fast_timeout_us, int slow_timeout_ms) 15 + { 16 + return xe_pcode_write_timeout(__compat_uncore_to_gt(uncore), mbox, val, 17 + slow_timeout_ms ?: 1); 18 + } 19 + 20 + static inline int 21 + snb_pcode_write(struct intel_uncore *uncore, u32 mbox, u32 val) 22 + { 23 + 24 + return xe_pcode_write(__compat_uncore_to_gt(uncore), mbox, val); 25 + } 26 + 27 + static inline int 28 + snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1) 29 + { 30 + return xe_pcode_read(__compat_uncore_to_gt(uncore), mbox, val, val1); 31 + } 32 + 33 + static inline int 34 + skl_pcode_request(struct intel_uncore *uncore, u32 mbox, 35 + u32 request, u32 reply_mask, u32 reply, 36 + int timeout_base_ms) 37 + { 38 + return xe_pcode_request(__compat_uncore_to_gt(uncore), mbox, request, reply_mask, reply, 39 + timeout_base_ms); 40 + } 41 + 42 + #endif /* __INTEL_PCODE_H__ */
+22
drivers/gpu/drm/xe/compat-i915-headers/intel_runtime_pm.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include "intel_wakeref.h" 7 + 8 + enum i915_drm_suspend_mode { 9 + I915_DRM_SUSPEND_IDLE, 10 + I915_DRM_SUSPEND_MEM, 11 + I915_DRM_SUSPEND_HIBERNATE, 12 + }; 13 + 14 + #define intel_runtime_pm xe_runtime_pm 15 + 16 + static inline void disable_rpm_wakeref_asserts(void *rpm) 17 + { 18 + } 19 + 20 + static inline void enable_rpm_wakeref_asserts(void *rpm) 21 + { 22 + }
+20
drivers/gpu/drm/xe/compat-i915-headers/intel_step.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_STEP_H__ 7 + #define __INTEL_STEP_H__ 8 + 9 + #include "xe_device_types.h" 10 + #include "xe_step.h" 11 + 12 + #define intel_display_step_name xe_display_step_name 13 + 14 + static inline 15 + const char *xe_display_step_name(struct xe_device *xe) 16 + { 17 + return xe_step_name(xe->info.step.display); 18 + } 19 + 20 + #endif /* __INTEL_STEP_H__ */
+11
drivers/gpu/drm/xe/compat-i915-headers/intel_uc_fw.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef _INTEL_UC_FW_H_ 7 + #define _INTEL_UC_FW_H_ 8 + 9 + #define INTEL_UC_FIRMWARE_URL "https://git.kernel.org/pub/scm/linux/kernel/git/firmware/linux-firmware.git" 10 + 11 + #endif
+175
drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_UNCORE_H__ 7 + #define __INTEL_UNCORE_H__ 8 + 9 + #include "xe_device.h" 10 + #include "xe_device_types.h" 11 + #include "xe_mmio.h" 12 + 13 + static inline struct xe_gt *__compat_uncore_to_gt(struct intel_uncore *uncore) 14 + { 15 + struct xe_device *xe = container_of(uncore, struct xe_device, uncore); 16 + 17 + return xe_root_mmio_gt(xe); 18 + } 19 + 20 + static inline u32 intel_uncore_read(struct intel_uncore *uncore, 21 + i915_reg_t i915_reg) 22 + { 23 + struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 24 + 25 + return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg); 26 + } 27 + 28 + static inline u32 intel_uncore_read8(struct intel_uncore *uncore, 29 + i915_reg_t i915_reg) 30 + { 31 + struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 32 + 33 + return xe_mmio_read8(__compat_uncore_to_gt(uncore), reg); 34 + } 35 + 36 + static inline u32 intel_uncore_read16(struct intel_uncore *uncore, 37 + i915_reg_t i915_reg) 38 + { 39 + struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 40 + 41 + return xe_mmio_read16(__compat_uncore_to_gt(uncore), reg); 42 + } 43 + 44 + static inline u64 45 + intel_uncore_read64_2x32(struct intel_uncore *uncore, 46 + i915_reg_t i915_lower_reg, i915_reg_t i915_upper_reg) 47 + { 48 + struct xe_reg lower_reg = XE_REG(i915_mmio_reg_offset(i915_lower_reg)); 49 + struct xe_reg upper_reg = XE_REG(i915_mmio_reg_offset(i915_upper_reg)); 50 + u32 upper, lower, old_upper; 51 + int loop = 0; 52 + 53 + upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg); 54 + do { 55 + old_upper = upper; 56 + lower = xe_mmio_read32(__compat_uncore_to_gt(uncore), lower_reg); 57 + upper = xe_mmio_read32(__compat_uncore_to_gt(uncore), upper_reg); 58 + } while (upper != old_upper && loop++ < 2); 59 + 60 + return (u64)upper << 32 | lower; 61 + } 62 + 63 + static inline void intel_uncore_posting_read(struct intel_uncore *uncore, 64 + i915_reg_t i915_reg) 65 + { 66 + struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 67 + 68 + xe_mmio_read32(__compat_uncore_to_gt(uncore), reg); 69 + } 70 + 71 + static inline void intel_uncore_write(struct intel_uncore *uncore, 72 + i915_reg_t i915_reg, u32 val) 73 + { 74 + struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 75 + 76 + xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val); 77 + } 78 + 79 + static inline u32 intel_uncore_rmw(struct intel_uncore *uncore, 80 + i915_reg_t i915_reg, u32 clear, u32 set) 81 + { 82 + struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 83 + 84 + return xe_mmio_rmw32(__compat_uncore_to_gt(uncore), reg, clear, set); 85 + } 86 + 87 + static inline int intel_wait_for_register(struct intel_uncore *uncore, 88 + i915_reg_t i915_reg, u32 mask, 89 + u32 value, unsigned int timeout) 90 + { 91 + struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 92 + 93 + return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value, 94 + timeout * USEC_PER_MSEC, NULL, false); 95 + } 96 + 97 + static inline int intel_wait_for_register_fw(struct intel_uncore *uncore, 98 + i915_reg_t i915_reg, u32 mask, 99 + u32 value, unsigned int timeout) 100 + { 101 + struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 102 + 103 + return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value, 104 + timeout * USEC_PER_MSEC, NULL, false); 105 + } 106 + 107 + static inline int 108 + __intel_wait_for_register(struct intel_uncore *uncore, i915_reg_t i915_reg, 109 + u32 mask, u32 value, unsigned int fast_timeout_us, 110 + unsigned int slow_timeout_ms, u32 *out_value) 111 + { 112 + struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 113 + 114 + return xe_mmio_wait32(__compat_uncore_to_gt(uncore), reg, mask, value, 115 + fast_timeout_us + 1000 * slow_timeout_ms, 116 + out_value, false); 117 + } 118 + 119 + static inline u32 intel_uncore_read_fw(struct intel_uncore *uncore, 120 + i915_reg_t i915_reg) 121 + { 122 + struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 123 + 124 + return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg); 125 + } 126 + 127 + static inline void intel_uncore_write_fw(struct intel_uncore *uncore, 128 + i915_reg_t i915_reg, u32 val) 129 + { 130 + struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 131 + 132 + xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val); 133 + } 134 + 135 + static inline u32 intel_uncore_read_notrace(struct intel_uncore *uncore, 136 + i915_reg_t i915_reg) 137 + { 138 + struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 139 + 140 + return xe_mmio_read32(__compat_uncore_to_gt(uncore), reg); 141 + } 142 + 143 + static inline void intel_uncore_write_notrace(struct intel_uncore *uncore, 144 + i915_reg_t i915_reg, u32 val) 145 + { 146 + struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg)); 147 + 148 + xe_mmio_write32(__compat_uncore_to_gt(uncore), reg, val); 149 + } 150 + 151 + static inline void __iomem *intel_uncore_regs(struct intel_uncore *uncore) 152 + { 153 + struct xe_device *xe = container_of(uncore, struct xe_device, uncore); 154 + 155 + return xe_device_get_root_tile(xe)->mmio.regs; 156 + } 157 + 158 + /* 159 + * The raw_reg_{read,write} macros are intended as a micro-optimization for 160 + * interrupt handlers so that the pointer indirection on uncore->regs can 161 + * be computed once (and presumably cached in a register) instead of generating 162 + * extra load instructions for each MMIO access. 163 + * 164 + * Given that these macros are only intended for non-GSI interrupt registers 165 + * (and the goal is to avoid extra instructions generated by the compiler), 166 + * these macros do not account for uncore->gsi_offset. Any caller that needs 167 + * to use these macros on a GSI register is responsible for adding the 168 + * appropriate GSI offset to the 'base' parameter. 169 + */ 170 + #define raw_reg_read(base, reg) \ 171 + readl(base + i915_mmio_reg_offset(reg)) 172 + #define raw_reg_write(base, reg, value) \ 173 + writel(value, base + i915_mmio_reg_offset(reg)) 174 + 175 + #endif /* __INTEL_UNCORE_H__ */
+8
drivers/gpu/drm/xe/compat-i915-headers/intel_wakeref.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include <linux/types.h> 7 + 8 + typedef bool intel_wakeref_t;
+28
drivers/gpu/drm/xe/compat-i915-headers/pxp/intel_pxp.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_PXP_H__ 7 + #define __INTEL_PXP_H__ 8 + 9 + #include <linux/errno.h> 10 + #include <linux/types.h> 11 + 12 + struct drm_i915_gem_object; 13 + struct intel_pxp; 14 + 15 + static inline int intel_pxp_key_check(struct intel_pxp *pxp, 16 + struct drm_i915_gem_object *obj, 17 + bool assign) 18 + { 19 + return -ENODEV; 20 + } 21 + 22 + static inline bool 23 + i915_gem_object_is_protected(const struct drm_i915_gem_object *obj) 24 + { 25 + return false; 26 + } 27 + 28 + #endif
+6
drivers/gpu/drm/xe/compat-i915-headers/soc/intel_dram.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include "../../../i915/soc/intel_dram.h"
+6
drivers/gpu/drm/xe/compat-i915-headers/soc/intel_gmch.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include "../../../i915/soc/intel_gmch.h"
+6
drivers/gpu/drm/xe/compat-i915-headers/soc/intel_pch.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include "../../../i915/soc/intel_pch.h"
+132
drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2013-2021 Intel Corporation 4 + */ 5 + 6 + #ifndef _VLV_SIDEBAND_H_ 7 + #define _VLV_SIDEBAND_H_ 8 + 9 + #include <linux/types.h> 10 + 11 + #include "vlv_sideband_reg.h" 12 + 13 + enum pipe; 14 + struct drm_i915_private; 15 + 16 + enum { 17 + VLV_IOSF_SB_BUNIT, 18 + VLV_IOSF_SB_CCK, 19 + VLV_IOSF_SB_CCU, 20 + VLV_IOSF_SB_DPIO, 21 + VLV_IOSF_SB_FLISDSI, 22 + VLV_IOSF_SB_GPIO, 23 + VLV_IOSF_SB_NC, 24 + VLV_IOSF_SB_PUNIT, 25 + }; 26 + 27 + static inline void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports) 28 + { 29 + } 30 + static inline u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg) 31 + { 32 + return 0; 33 + } 34 + static inline void vlv_iosf_sb_write(struct drm_i915_private *i915, 35 + u8 port, u32 reg, u32 val) 36 + { 37 + } 38 + static inline void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports) 39 + { 40 + } 41 + static inline void vlv_bunit_get(struct drm_i915_private *i915) 42 + { 43 + } 44 + static inline u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg) 45 + { 46 + return 0; 47 + } 48 + static inline void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val) 49 + { 50 + } 51 + static inline void vlv_bunit_put(struct drm_i915_private *i915) 52 + { 53 + } 54 + static inline void vlv_cck_get(struct drm_i915_private *i915) 55 + { 56 + } 57 + static inline u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg) 58 + { 59 + return 0; 60 + } 61 + static inline void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val) 62 + { 63 + } 64 + static inline void vlv_cck_put(struct drm_i915_private *i915) 65 + { 66 + } 67 + static inline void vlv_ccu_get(struct drm_i915_private *i915) 68 + { 69 + } 70 + static inline u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg) 71 + { 72 + return 0; 73 + } 74 + static inline void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val) 75 + { 76 + } 77 + static inline void vlv_ccu_put(struct drm_i915_private *i915) 78 + { 79 + } 80 + static inline void vlv_dpio_get(struct drm_i915_private *i915) 81 + { 82 + } 83 + static inline u32 vlv_dpio_read(struct drm_i915_private *i915, int pipe, int reg) 84 + { 85 + return 0; 86 + } 87 + static inline void vlv_dpio_write(struct drm_i915_private *i915, 88 + int pipe, int reg, u32 val) 89 + { 90 + } 91 + static inline void vlv_dpio_put(struct drm_i915_private *i915) 92 + { 93 + } 94 + static inline void vlv_flisdsi_get(struct drm_i915_private *i915) 95 + { 96 + } 97 + static inline u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg) 98 + { 99 + return 0; 100 + } 101 + static inline void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val) 102 + { 103 + } 104 + static inline void vlv_flisdsi_put(struct drm_i915_private *i915) 105 + { 106 + } 107 + static inline void vlv_nc_get(struct drm_i915_private *i915) 108 + { 109 + } 110 + static inline u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr) 111 + { 112 + return 0; 113 + } 114 + static inline void vlv_nc_put(struct drm_i915_private *i915) 115 + { 116 + } 117 + static inline void vlv_punit_get(struct drm_i915_private *i915) 118 + { 119 + } 120 + static inline u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr) 121 + { 122 + return 0; 123 + } 124 + static inline int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val) 125 + { 126 + return 0; 127 + } 128 + static inline void vlv_punit_put(struct drm_i915_private *i915) 129 + { 130 + } 131 + 132 + #endif /* _VLV_SIDEBAND_H_ */
+6
drivers/gpu/drm/xe/compat-i915-headers/vlv_sideband_reg.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include "../../i915/vlv_sideband_reg.h"
+77
drivers/gpu/drm/xe/display/ext/i915_irq.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include "i915_drv.h" 7 + #include "i915_irq.h" 8 + #include "i915_reg.h" 9 + #include "intel_uncore.h" 10 + 11 + void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, 12 + i915_reg_t iir, i915_reg_t ier) 13 + { 14 + intel_uncore_write(uncore, imr, 0xffffffff); 15 + intel_uncore_posting_read(uncore, imr); 16 + 17 + intel_uncore_write(uncore, ier, 0); 18 + 19 + /* IIR can theoretically queue up two events. Be paranoid. */ 20 + intel_uncore_write(uncore, iir, 0xffffffff); 21 + intel_uncore_posting_read(uncore, iir); 22 + intel_uncore_write(uncore, iir, 0xffffffff); 23 + intel_uncore_posting_read(uncore, iir); 24 + } 25 + 26 + /* 27 + * We should clear IMR at preinstall/uninstall, and just check at postinstall. 28 + */ 29 + void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg) 30 + { 31 + struct xe_device *xe = container_of(uncore, struct xe_device, uncore); 32 + u32 val = intel_uncore_read(uncore, reg); 33 + 34 + if (val == 0) 35 + return; 36 + 37 + drm_WARN(&xe->drm, 1, 38 + "Interrupt register 0x%x is not zero: 0x%08x\n", 39 + i915_mmio_reg_offset(reg), val); 40 + intel_uncore_write(uncore, reg, 0xffffffff); 41 + intel_uncore_posting_read(uncore, reg); 42 + intel_uncore_write(uncore, reg, 0xffffffff); 43 + intel_uncore_posting_read(uncore, reg); 44 + } 45 + 46 + void gen3_irq_init(struct intel_uncore *uncore, 47 + i915_reg_t imr, u32 imr_val, 48 + i915_reg_t ier, u32 ier_val, 49 + i915_reg_t iir) 50 + { 51 + gen3_assert_iir_is_zero(uncore, iir); 52 + 53 + intel_uncore_write(uncore, ier, ier_val); 54 + intel_uncore_write(uncore, imr, imr_val); 55 + intel_uncore_posting_read(uncore, imr); 56 + } 57 + 58 + bool intel_irqs_enabled(struct xe_device *xe) 59 + { 60 + /* 61 + * XXX: i915 has a racy handling of the irq.enabled, since it doesn't 62 + * lock its transitions. Because of that, the irq.enabled sometimes 63 + * is not read with the irq.lock in place. 64 + * However, the most critical cases like vblank and page flips are 65 + * properly using the locks. 66 + * We cannot take the lock in here or run any kind of assert because 67 + * of i915 inconsistency. 68 + * But at this point the xe irq is better protected against races, 69 + * although the full solution would be protecting the i915 side. 70 + */ 71 + return xe->irq.enabled; 72 + } 73 + 74 + void intel_synchronize_irq(struct xe_device *xe) 75 + { 76 + synchronize_irq(to_pci_dev(xe->drm.dev)->irq); 77 + }
+22
drivers/gpu/drm/xe/display/ext/i915_utils.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include "i915_drv.h" 7 + 8 + bool i915_vtd_active(struct drm_i915_private *i915) 9 + { 10 + if (device_iommu_mapped(i915->drm.dev)) 11 + return true; 12 + 13 + /* Running as a guest, we assume the host is enforcing VT'd */ 14 + return i915_run_as_guest(); 15 + } 16 + 17 + /* i915 specific, just put here for shutting it up */ 18 + int __i915_inject_probe_error(struct drm_i915_private *i915, int err, 19 + const char *func, int line) 20 + { 21 + return 0; 22 + }
+74
drivers/gpu/drm/xe/display/intel_fb_bo.c
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2021 Intel Corporation 4 + */ 5 + 6 + #include <drm/drm_modeset_helper.h> 7 + 8 + #include "i915_drv.h" 9 + #include "intel_display_types.h" 10 + #include "intel_fb_bo.h" 11 + 12 + void intel_fb_bo_framebuffer_fini(struct xe_bo *bo) 13 + { 14 + if (bo->flags & XE_BO_CREATE_PINNED_BIT) { 15 + /* Unpin our kernel fb first */ 16 + xe_bo_lock(bo, false); 17 + xe_bo_unpin(bo); 18 + xe_bo_unlock(bo); 19 + } 20 + xe_bo_put(bo); 21 + } 22 + 23 + int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb, 24 + struct xe_bo *bo, 25 + struct drm_mode_fb_cmd2 *mode_cmd) 26 + { 27 + struct drm_i915_private *i915 = to_i915(bo->ttm.base.dev); 28 + int ret; 29 + 30 + xe_bo_get(bo); 31 + 32 + ret = ttm_bo_reserve(&bo->ttm, true, false, NULL); 33 + if (ret) 34 + return ret; 35 + 36 + if (!(bo->flags & XE_BO_SCANOUT_BIT)) { 37 + /* 38 + * XE_BO_SCANOUT_BIT should ideally be set at creation, or is 39 + * automatically set when creating FB. We cannot change caching 40 + * mode when the boect is VM_BINDed, so we can only set 41 + * coherency with display when unbound. 42 + */ 43 + if (XE_IOCTL_DBG(i915, !list_empty(&bo->ttm.base.gpuva.list))) { 44 + ttm_bo_unreserve(&bo->ttm); 45 + return -EINVAL; 46 + } 47 + bo->flags |= XE_BO_SCANOUT_BIT; 48 + } 49 + ttm_bo_unreserve(&bo->ttm); 50 + 51 + return ret; 52 + } 53 + 54 + struct xe_bo *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915, 55 + struct drm_file *filp, 56 + const struct drm_mode_fb_cmd2 *mode_cmd) 57 + { 58 + struct drm_i915_gem_object *bo; 59 + struct drm_gem_object *gem = drm_gem_object_lookup(filp, mode_cmd->handles[0]); 60 + 61 + if (!gem) 62 + return ERR_PTR(-ENOENT); 63 + 64 + bo = gem_to_xe_bo(gem); 65 + /* Require vram placement or dma-buf import */ 66 + if (IS_DGFX(i915) && 67 + !xe_bo_can_migrate(gem_to_xe_bo(gem), XE_PL_VRAM0) && 68 + bo->ttm.type != ttm_bo_type_sg) { 69 + drm_gem_object_put(gem); 70 + return ERR_PTR(-EREMOTE); 71 + } 72 + 73 + return bo; 74 + }
+24
drivers/gpu/drm/xe/display/intel_fb_bo.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2021 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_FB_BO_H__ 7 + #define __INTEL_FB_BO_H__ 8 + 9 + struct drm_file; 10 + struct drm_mode_fb_cmd2; 11 + struct drm_i915_private; 12 + struct intel_framebuffer; 13 + struct xe_bo; 14 + 15 + void intel_fb_bo_framebuffer_fini(struct xe_bo *bo); 16 + int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb, 17 + struct xe_bo *bo, 18 + struct drm_mode_fb_cmd2 *mode_cmd); 19 + 20 + struct xe_bo *intel_fb_bo_lookup_valid_bo(struct drm_i915_private *i915, 21 + struct drm_file *filp, 22 + const struct drm_mode_fb_cmd2 *mode_cmd); 23 + 24 + #endif
+104
drivers/gpu/drm/xe/display/intel_fbdev_fb.c
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include "intel_fbdev_fb.h" 7 + 8 + #include <drm/drm_fb_helper.h> 9 + 10 + #include "xe_gt.h" 11 + #include "xe_ttm_stolen_mgr.h" 12 + 13 + #include "i915_drv.h" 14 + #include "intel_display_types.h" 15 + 16 + struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, 17 + struct drm_fb_helper_surface_size *sizes) 18 + { 19 + struct drm_framebuffer *fb; 20 + struct drm_device *dev = helper->dev; 21 + struct drm_i915_private *dev_priv = to_i915(dev); 22 + struct drm_mode_fb_cmd2 mode_cmd = {}; 23 + struct drm_i915_gem_object *obj; 24 + int size; 25 + 26 + /* we don't do packed 24bpp */ 27 + if (sizes->surface_bpp == 24) 28 + sizes->surface_bpp = 32; 29 + 30 + mode_cmd.width = sizes->surface_width; 31 + mode_cmd.height = sizes->surface_height; 32 + 33 + mode_cmd.pitches[0] = ALIGN(mode_cmd.width * 34 + DIV_ROUND_UP(sizes->surface_bpp, 8), XE_PAGE_SIZE); 35 + mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, 36 + sizes->surface_depth); 37 + 38 + size = mode_cmd.pitches[0] * mode_cmd.height; 39 + size = PAGE_ALIGN(size); 40 + obj = ERR_PTR(-ENODEV); 41 + 42 + if (!IS_DGFX(dev_priv)) { 43 + obj = xe_bo_create_pin_map(dev_priv, xe_device_get_root_tile(dev_priv), 44 + NULL, size, 45 + ttm_bo_type_kernel, XE_BO_SCANOUT_BIT | 46 + XE_BO_CREATE_STOLEN_BIT | 47 + XE_BO_CREATE_PINNED_BIT); 48 + if (!IS_ERR(obj)) 49 + drm_info(&dev_priv->drm, "Allocated fbdev into stolen\n"); 50 + else 51 + drm_info(&dev_priv->drm, "Allocated fbdev into stolen failed: %li\n", PTR_ERR(obj)); 52 + } 53 + if (IS_ERR(obj)) { 54 + obj = xe_bo_create_pin_map(dev_priv, xe_device_get_root_tile(dev_priv), NULL, size, 55 + ttm_bo_type_kernel, XE_BO_SCANOUT_BIT | 56 + XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(dev_priv)) | 57 + XE_BO_CREATE_PINNED_BIT); 58 + } 59 + 60 + if (IS_ERR(obj)) { 61 + drm_err(&dev_priv->drm, "failed to allocate framebuffer (%pe)\n", obj); 62 + fb = ERR_PTR(-ENOMEM); 63 + goto err; 64 + } 65 + 66 + fb = intel_framebuffer_create(obj, &mode_cmd); 67 + if (IS_ERR(fb)) { 68 + xe_bo_unpin_map_no_vm(obj); 69 + goto err; 70 + } 71 + 72 + drm_gem_object_put(intel_bo_to_drm_bo(obj)); 73 + return fb; 74 + 75 + err: 76 + return fb; 77 + } 78 + 79 + int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info, 80 + struct drm_i915_gem_object *obj, struct i915_vma *vma) 81 + { 82 + struct pci_dev *pdev = to_pci_dev(i915->drm.dev); 83 + 84 + if (!(obj->flags & XE_BO_CREATE_SYSTEM_BIT)) { 85 + if (obj->flags & XE_BO_CREATE_STOLEN_BIT) 86 + info->fix.smem_start = xe_ttm_stolen_io_offset(obj, 0); 87 + else 88 + info->fix.smem_start = 89 + pci_resource_start(pdev, 2) + 90 + xe_bo_addr(obj, 0, XE_PAGE_SIZE); 91 + 92 + info->fix.smem_len = obj->ttm.base.size; 93 + } else { 94 + /* XXX: Pure fiction, as the BO may not be physically accessible.. */ 95 + info->fix.smem_start = 0; 96 + info->fix.smem_len = obj->ttm.base.size; 97 + } 98 + XE_WARN_ON(iosys_map_is_null(&obj->vmap)); 99 + 100 + info->screen_base = obj->vmap.vaddr_iomem; 101 + info->screen_size = intel_bo_to_drm_bo(obj)->size; 102 + 103 + return 0; 104 + }
+21
drivers/gpu/drm/xe/display/intel_fbdev_fb.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef __INTEL_FBDEV_FB_H__ 7 + #define __INTEL_FBDEV_FB_H__ 8 + 9 + struct drm_fb_helper; 10 + struct drm_fb_helper_surface_size; 11 + struct drm_i915_gem_object; 12 + struct drm_i915_private; 13 + struct fb_info; 14 + struct i915_vma; 15 + 16 + struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, 17 + struct drm_fb_helper_surface_size *sizes); 18 + int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info, 19 + struct drm_i915_gem_object *obj, struct i915_vma *vma); 20 + 21 + #endif
+17
drivers/gpu/drm/xe/display/xe_display_rps.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include "intel_display_rps.h" 7 + 8 + void intel_display_rps_boost_after_vblank(struct drm_crtc *crtc, 9 + struct dma_fence *fence) 10 + { 11 + } 12 + 13 + void intel_display_rps_mark_interactive(struct drm_i915_private *i915, 14 + struct intel_atomic_state *state, 15 + bool interactive) 16 + { 17 + }
+326
drivers/gpu/drm/xe/display/xe_fb_pin.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2021 Intel Corporation 4 + */ 5 + 6 + #include "i915_drv.h" 7 + #include "intel_display_types.h" 8 + #include "intel_dpt.h" 9 + #include "intel_fb.h" 10 + #include "intel_fb_pin.h" 11 + #include "xe_ggtt.h" 12 + #include "xe_gt.h" 13 + 14 + #include <drm/ttm/ttm_bo.h> 15 + 16 + static void 17 + write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_ofs, 18 + u32 width, u32 height, u32 src_stride, u32 dst_stride) 19 + { 20 + struct xe_device *xe = xe_bo_device(bo); 21 + struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt; 22 + u32 column, row; 23 + 24 + /* TODO: Maybe rewrite so we can traverse the bo addresses sequentially, 25 + * by writing dpt/ggtt in a different order? 26 + */ 27 + 28 + for (column = 0; column < width; column++) { 29 + u32 src_idx = src_stride * (height - 1) + column + bo_ofs; 30 + 31 + for (row = 0; row < height; row++) { 32 + u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE, 33 + xe->pat.idx[XE_CACHE_WB]); 34 + 35 + iosys_map_wr(map, *dpt_ofs, u64, pte); 36 + *dpt_ofs += 8; 37 + src_idx -= src_stride; 38 + } 39 + 40 + /* The DE ignores the PTEs for the padding tiles */ 41 + *dpt_ofs += (dst_stride - height) * 8; 42 + } 43 + 44 + /* Align to next page */ 45 + *dpt_ofs = ALIGN(*dpt_ofs, 4096); 46 + } 47 + 48 + static int __xe_pin_fb_vma_dpt(struct intel_framebuffer *fb, 49 + const struct i915_gtt_view *view, 50 + struct i915_vma *vma) 51 + { 52 + struct xe_device *xe = to_xe_device(fb->base.dev); 53 + struct xe_tile *tile0 = xe_device_get_root_tile(xe); 54 + struct xe_ggtt *ggtt = tile0->mem.ggtt; 55 + struct xe_bo *bo = intel_fb_obj(&fb->base), *dpt; 56 + u32 dpt_size, size = bo->ttm.base.size; 57 + 58 + if (view->type == I915_GTT_VIEW_NORMAL) 59 + dpt_size = ALIGN(size / XE_PAGE_SIZE * 8, XE_PAGE_SIZE); 60 + else 61 + /* display uses 4K tiles instead of bytes here, convert to entries.. */ 62 + dpt_size = ALIGN(intel_rotation_info_size(&view->rotated) * 8, 63 + XE_PAGE_SIZE); 64 + 65 + dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, 66 + ttm_bo_type_kernel, 67 + XE_BO_CREATE_VRAM0_BIT | 68 + XE_BO_CREATE_GGTT_BIT); 69 + if (IS_ERR(dpt)) 70 + dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, 71 + ttm_bo_type_kernel, 72 + XE_BO_CREATE_STOLEN_BIT | 73 + XE_BO_CREATE_GGTT_BIT); 74 + if (IS_ERR(dpt)) 75 + dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, 76 + ttm_bo_type_kernel, 77 + XE_BO_CREATE_SYSTEM_BIT | 78 + XE_BO_CREATE_GGTT_BIT); 79 + if (IS_ERR(dpt)) 80 + return PTR_ERR(dpt); 81 + 82 + if (view->type == I915_GTT_VIEW_NORMAL) { 83 + u32 x; 84 + 85 + for (x = 0; x < size / XE_PAGE_SIZE; x++) { 86 + u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x * XE_PAGE_SIZE, 87 + xe->pat.idx[XE_CACHE_WB]); 88 + 89 + iosys_map_wr(&dpt->vmap, x * 8, u64, pte); 90 + } 91 + } else { 92 + const struct intel_rotation_info *rot_info = &view->rotated; 93 + u32 i, dpt_ofs = 0; 94 + 95 + for (i = 0; i < ARRAY_SIZE(rot_info->plane); i++) 96 + write_dpt_rotated(bo, &dpt->vmap, &dpt_ofs, 97 + rot_info->plane[i].offset, 98 + rot_info->plane[i].width, 99 + rot_info->plane[i].height, 100 + rot_info->plane[i].src_stride, 101 + rot_info->plane[i].dst_stride); 102 + } 103 + 104 + vma->dpt = dpt; 105 + vma->node = dpt->ggtt_node; 106 + return 0; 107 + } 108 + 109 + static void 110 + write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo_ofs, 111 + u32 width, u32 height, u32 src_stride, u32 dst_stride) 112 + { 113 + struct xe_device *xe = xe_bo_device(bo); 114 + u32 column, row; 115 + 116 + for (column = 0; column < width; column++) { 117 + u32 src_idx = src_stride * (height - 1) + column + bo_ofs; 118 + 119 + for (row = 0; row < height; row++) { 120 + u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE, 121 + xe->pat.idx[XE_CACHE_WB]); 122 + 123 + xe_ggtt_set_pte(ggtt, *ggtt_ofs, pte); 124 + *ggtt_ofs += XE_PAGE_SIZE; 125 + src_idx -= src_stride; 126 + } 127 + 128 + /* The DE ignores the PTEs for the padding tiles */ 129 + *ggtt_ofs += (dst_stride - height) * XE_PAGE_SIZE; 130 + } 131 + } 132 + 133 + static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb, 134 + const struct i915_gtt_view *view, 135 + struct i915_vma *vma) 136 + { 137 + struct xe_bo *bo = intel_fb_obj(&fb->base); 138 + struct xe_device *xe = to_xe_device(fb->base.dev); 139 + struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt; 140 + u32 align; 141 + int ret; 142 + 143 + /* TODO: Consider sharing framebuffer mapping? 144 + * embed i915_vma inside intel_framebuffer 145 + */ 146 + xe_device_mem_access_get(tile_to_xe(ggtt->tile)); 147 + ret = mutex_lock_interruptible(&ggtt->lock); 148 + if (ret) 149 + goto out; 150 + 151 + align = XE_PAGE_SIZE; 152 + if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K) 153 + align = max_t(u32, align, SZ_64K); 154 + 155 + if (bo->ggtt_node.size && view->type == I915_GTT_VIEW_NORMAL) { 156 + vma->node = bo->ggtt_node; 157 + } else if (view->type == I915_GTT_VIEW_NORMAL) { 158 + u32 x, size = bo->ttm.base.size; 159 + 160 + ret = xe_ggtt_insert_special_node_locked(ggtt, &vma->node, size, 161 + align, 0); 162 + if (ret) 163 + goto out_unlock; 164 + 165 + for (x = 0; x < size; x += XE_PAGE_SIZE) { 166 + u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x, 167 + xe->pat.idx[XE_CACHE_WB]); 168 + 169 + xe_ggtt_set_pte(ggtt, vma->node.start + x, pte); 170 + } 171 + } else { 172 + u32 i, ggtt_ofs; 173 + const struct intel_rotation_info *rot_info = &view->rotated; 174 + 175 + /* display seems to use tiles instead of bytes here, so convert it back.. */ 176 + u32 size = intel_rotation_info_size(rot_info) * XE_PAGE_SIZE; 177 + 178 + ret = xe_ggtt_insert_special_node_locked(ggtt, &vma->node, size, 179 + align, 0); 180 + if (ret) 181 + goto out_unlock; 182 + 183 + ggtt_ofs = vma->node.start; 184 + 185 + for (i = 0; i < ARRAY_SIZE(rot_info->plane); i++) 186 + write_ggtt_rotated(bo, ggtt, &ggtt_ofs, 187 + rot_info->plane[i].offset, 188 + rot_info->plane[i].width, 189 + rot_info->plane[i].height, 190 + rot_info->plane[i].src_stride, 191 + rot_info->plane[i].dst_stride); 192 + } 193 + 194 + xe_ggtt_invalidate(ggtt); 195 + out_unlock: 196 + mutex_unlock(&ggtt->lock); 197 + out: 198 + xe_device_mem_access_put(tile_to_xe(ggtt->tile)); 199 + return ret; 200 + } 201 + 202 + static struct i915_vma *__xe_pin_fb_vma(struct intel_framebuffer *fb, 203 + const struct i915_gtt_view *view) 204 + { 205 + struct drm_device *dev = fb->base.dev; 206 + struct xe_device *xe = to_xe_device(dev); 207 + struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); 208 + struct xe_bo *bo = intel_fb_obj(&fb->base); 209 + int ret; 210 + 211 + if (!vma) 212 + return ERR_PTR(-ENODEV); 213 + 214 + /* Remapped view is only required on ADL-P, which xe doesn't support. */ 215 + if (XE_WARN_ON(view->type == I915_GTT_VIEW_REMAPPED)) { 216 + ret = -ENODEV; 217 + goto err; 218 + } 219 + 220 + /* 221 + * Pin the framebuffer, we can't use xe_bo_(un)pin functions as the 222 + * assumptions are incorrect for framebuffers 223 + */ 224 + ret = ttm_bo_reserve(&bo->ttm, false, false, NULL); 225 + if (ret) 226 + goto err; 227 + 228 + if (IS_DGFX(xe)) 229 + ret = xe_bo_migrate(bo, XE_PL_VRAM0); 230 + else 231 + ret = xe_bo_validate(bo, NULL, true); 232 + if (!ret) 233 + ttm_bo_pin(&bo->ttm); 234 + ttm_bo_unreserve(&bo->ttm); 235 + if (ret) 236 + goto err; 237 + 238 + vma->bo = bo; 239 + if (intel_fb_uses_dpt(&fb->base)) 240 + ret = __xe_pin_fb_vma_dpt(fb, view, vma); 241 + else 242 + ret = __xe_pin_fb_vma_ggtt(fb, view, vma); 243 + if (ret) 244 + goto err_unpin; 245 + 246 + return vma; 247 + 248 + err_unpin: 249 + ttm_bo_reserve(&bo->ttm, false, false, NULL); 250 + ttm_bo_unpin(&bo->ttm); 251 + ttm_bo_unreserve(&bo->ttm); 252 + err: 253 + kfree(vma); 254 + return ERR_PTR(ret); 255 + } 256 + 257 + static void __xe_unpin_fb_vma(struct i915_vma *vma) 258 + { 259 + struct xe_device *xe = to_xe_device(vma->bo->ttm.base.dev); 260 + struct xe_ggtt *ggtt = xe_device_get_root_tile(xe)->mem.ggtt; 261 + 262 + if (vma->dpt) 263 + xe_bo_unpin_map_no_vm(vma->dpt); 264 + else if (!drm_mm_node_allocated(&vma->bo->ggtt_node) || 265 + vma->bo->ggtt_node.start != vma->node.start) 266 + xe_ggtt_remove_node(ggtt, &vma->node); 267 + 268 + ttm_bo_reserve(&vma->bo->ttm, false, false, NULL); 269 + ttm_bo_unpin(&vma->bo->ttm); 270 + ttm_bo_unreserve(&vma->bo->ttm); 271 + kfree(vma); 272 + } 273 + 274 + struct i915_vma * 275 + intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, 276 + bool phys_cursor, 277 + const struct i915_gtt_view *view, 278 + bool uses_fence, 279 + unsigned long *out_flags) 280 + { 281 + *out_flags = 0; 282 + 283 + return __xe_pin_fb_vma(to_intel_framebuffer(fb), view); 284 + } 285 + 286 + void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) 287 + { 288 + __xe_unpin_fb_vma(vma); 289 + } 290 + 291 + int intel_plane_pin_fb(struct intel_plane_state *plane_state) 292 + { 293 + struct drm_framebuffer *fb = plane_state->hw.fb; 294 + struct xe_bo *bo = intel_fb_obj(fb); 295 + struct i915_vma *vma; 296 + 297 + /* We reject creating !SCANOUT fb's, so this is weird.. */ 298 + drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_SCANOUT_BIT)); 299 + 300 + vma = __xe_pin_fb_vma(to_intel_framebuffer(fb), &plane_state->view.gtt); 301 + if (IS_ERR(vma)) 302 + return PTR_ERR(vma); 303 + 304 + plane_state->ggtt_vma = vma; 305 + return 0; 306 + } 307 + 308 + void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) 309 + { 310 + __xe_unpin_fb_vma(old_plane_state->ggtt_vma); 311 + old_plane_state->ggtt_vma = NULL; 312 + } 313 + 314 + /* 315 + * For Xe introduce dummy intel_dpt_create which just return NULL and 316 + * intel_dpt_destroy which does nothing. 317 + */ 318 + struct i915_address_space *intel_dpt_create(struct intel_framebuffer *fb) 319 + { 320 + return NULL; 321 + } 322 + 323 + void intel_dpt_destroy(struct i915_address_space *vm) 324 + { 325 + return; 326 + }
+24
drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright 2023, Intel Corporation. 4 + */ 5 + 6 + #include "i915_drv.h" 7 + #include "intel_hdcp_gsc.h" 8 + 9 + int intel_hdcp_gsc_init(struct drm_i915_private *i915) 10 + { 11 + drm_info(&i915->drm, "HDCP support not yet implemented\n"); 12 + return -ENODEV; 13 + } 14 + 15 + void intel_hdcp_gsc_fini(struct drm_i915_private *i915) 16 + { 17 + } 18 + 19 + ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in, 20 + size_t msg_in_len, u8 *msg_out, 21 + size_t msg_out_len) 22 + { 23 + return -ENODEV; 24 + }
+291
drivers/gpu/drm/xe/display/xe_plane_initial.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2021 Intel Corporation 4 + */ 5 + 6 + /* for ioread64 */ 7 + #include <linux/io-64-nonatomic-lo-hi.h> 8 + 9 + #include "xe_ggtt.h" 10 + 11 + #include "i915_drv.h" 12 + #include "intel_atomic_plane.h" 13 + #include "intel_display.h" 14 + #include "intel_display_types.h" 15 + #include "intel_fb.h" 16 + #include "intel_fb_pin.h" 17 + #include "intel_frontbuffer.h" 18 + #include "intel_plane_initial.h" 19 + 20 + static bool 21 + intel_reuse_initial_plane_obj(struct drm_i915_private *i915, 22 + const struct intel_initial_plane_config *plane_config, 23 + struct drm_framebuffer **fb) 24 + { 25 + struct intel_crtc *crtc; 26 + 27 + for_each_intel_crtc(&i915->drm, crtc) { 28 + struct intel_crtc_state *crtc_state = 29 + to_intel_crtc_state(crtc->base.state); 30 + struct intel_plane *plane = 31 + to_intel_plane(crtc->base.primary); 32 + struct intel_plane_state *plane_state = 33 + to_intel_plane_state(plane->base.state); 34 + 35 + if (!crtc_state->uapi.active) 36 + continue; 37 + 38 + if (!plane_state->ggtt_vma) 39 + continue; 40 + 41 + if (intel_plane_ggtt_offset(plane_state) == plane_config->base) { 42 + *fb = plane_state->hw.fb; 43 + return true; 44 + } 45 + } 46 + 47 + return false; 48 + } 49 + 50 + static struct xe_bo * 51 + initial_plane_bo(struct xe_device *xe, 52 + struct intel_initial_plane_config *plane_config) 53 + { 54 + struct xe_tile *tile0 = xe_device_get_root_tile(xe); 55 + struct xe_bo *bo; 56 + resource_size_t phys_base; 57 + u32 base, size, flags; 58 + u64 page_size = xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K ? SZ_64K : SZ_4K; 59 + 60 + if (plane_config->size == 0) 61 + return NULL; 62 + 63 + flags = XE_BO_CREATE_PINNED_BIT | XE_BO_SCANOUT_BIT | XE_BO_CREATE_GGTT_BIT; 64 + 65 + base = round_down(plane_config->base, page_size); 66 + if (IS_DGFX(xe)) { 67 + u64 __iomem *gte = tile0->mem.ggtt->gsm; 68 + u64 pte; 69 + 70 + gte += base / XE_PAGE_SIZE; 71 + 72 + pte = ioread64(gte); 73 + if (!(pte & XE_GGTT_PTE_DM)) { 74 + drm_err(&xe->drm, 75 + "Initial plane programming missing DM bit\n"); 76 + return NULL; 77 + } 78 + 79 + phys_base = pte & ~(page_size - 1); 80 + flags |= XE_BO_CREATE_VRAM0_BIT; 81 + 82 + /* 83 + * We don't currently expect this to ever be placed in the 84 + * stolen portion. 85 + */ 86 + if (phys_base >= tile0->mem.vram.usable_size) { 87 + drm_err(&xe->drm, 88 + "Initial plane programming using invalid range, phys_base=%pa\n", 89 + &phys_base); 90 + return NULL; 91 + } 92 + 93 + drm_dbg(&xe->drm, 94 + "Using phys_base=%pa, based on initial plane programming\n", 95 + &phys_base); 96 + } else { 97 + struct ttm_resource_manager *stolen = ttm_manager_type(&xe->ttm, XE_PL_STOLEN); 98 + 99 + if (!stolen) 100 + return NULL; 101 + phys_base = base; 102 + flags |= XE_BO_CREATE_STOLEN_BIT; 103 + 104 + /* 105 + * If the FB is too big, just don't use it since fbdev is not very 106 + * important and we should probably use that space with FBC or other 107 + * features. 108 + */ 109 + if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) && 110 + plane_config->size * 2 >> PAGE_SHIFT >= stolen->size) 111 + return NULL; 112 + } 113 + 114 + size = round_up(plane_config->base + plane_config->size, 115 + page_size); 116 + size -= base; 117 + 118 + bo = xe_bo_create_pin_map_at(xe, tile0, NULL, size, phys_base, 119 + ttm_bo_type_kernel, flags); 120 + if (IS_ERR(bo)) { 121 + drm_dbg(&xe->drm, 122 + "Failed to create bo phys_base=%pa size %u with flags %x: %li\n", 123 + &phys_base, size, flags, PTR_ERR(bo)); 124 + return NULL; 125 + } 126 + 127 + return bo; 128 + } 129 + 130 + static bool 131 + intel_alloc_initial_plane_obj(struct intel_crtc *crtc, 132 + struct intel_initial_plane_config *plane_config) 133 + { 134 + struct drm_device *dev = crtc->base.dev; 135 + struct drm_i915_private *dev_priv = to_i915(dev); 136 + struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 137 + struct drm_framebuffer *fb = &plane_config->fb->base; 138 + struct xe_bo *bo; 139 + 140 + switch (fb->modifier) { 141 + case DRM_FORMAT_MOD_LINEAR: 142 + case I915_FORMAT_MOD_X_TILED: 143 + case I915_FORMAT_MOD_Y_TILED: 144 + case I915_FORMAT_MOD_4_TILED: 145 + break; 146 + default: 147 + drm_dbg(&dev_priv->drm, 148 + "Unsupported modifier for initial FB: 0x%llx\n", 149 + fb->modifier); 150 + return false; 151 + } 152 + 153 + mode_cmd.pixel_format = fb->format->format; 154 + mode_cmd.width = fb->width; 155 + mode_cmd.height = fb->height; 156 + mode_cmd.pitches[0] = fb->pitches[0]; 157 + mode_cmd.modifier[0] = fb->modifier; 158 + mode_cmd.flags = DRM_MODE_FB_MODIFIERS; 159 + 160 + bo = initial_plane_bo(dev_priv, plane_config); 161 + if (!bo) 162 + return false; 163 + 164 + if (intel_framebuffer_init(to_intel_framebuffer(fb), 165 + bo, &mode_cmd)) { 166 + drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n"); 167 + goto err_bo; 168 + } 169 + /* Reference handed over to fb */ 170 + xe_bo_put(bo); 171 + 172 + return true; 173 + 174 + err_bo: 175 + xe_bo_unpin_map_no_vm(bo); 176 + return false; 177 + } 178 + 179 + static void 180 + intel_find_initial_plane_obj(struct intel_crtc *crtc, 181 + struct intel_initial_plane_config *plane_config) 182 + { 183 + struct drm_device *dev = crtc->base.dev; 184 + struct drm_i915_private *dev_priv = to_i915(dev); 185 + struct intel_plane *plane = 186 + to_intel_plane(crtc->base.primary); 187 + struct intel_plane_state *plane_state = 188 + to_intel_plane_state(plane->base.state); 189 + struct intel_crtc_state *crtc_state = 190 + to_intel_crtc_state(crtc->base.state); 191 + struct drm_framebuffer *fb; 192 + struct i915_vma *vma; 193 + 194 + /* 195 + * TODO: 196 + * Disable planes if get_initial_plane_config() failed. 197 + * Make sure things work if the surface base is not page aligned. 198 + */ 199 + if (!plane_config->fb) 200 + return; 201 + 202 + if (intel_alloc_initial_plane_obj(crtc, plane_config)) 203 + fb = &plane_config->fb->base; 204 + else if (!intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb)) 205 + goto nofb; 206 + 207 + plane_state->uapi.rotation = plane_config->rotation; 208 + intel_fb_fill_view(to_intel_framebuffer(fb), 209 + plane_state->uapi.rotation, &plane_state->view); 210 + 211 + vma = intel_pin_and_fence_fb_obj(fb, false, &plane_state->view.gtt, 212 + false, &plane_state->flags); 213 + if (IS_ERR(vma)) 214 + goto nofb; 215 + 216 + plane_state->ggtt_vma = vma; 217 + plane_state->uapi.src_x = 0; 218 + plane_state->uapi.src_y = 0; 219 + plane_state->uapi.src_w = fb->width << 16; 220 + plane_state->uapi.src_h = fb->height << 16; 221 + 222 + plane_state->uapi.crtc_x = 0; 223 + plane_state->uapi.crtc_y = 0; 224 + plane_state->uapi.crtc_w = fb->width; 225 + plane_state->uapi.crtc_h = fb->height; 226 + 227 + plane_state->uapi.fb = fb; 228 + drm_framebuffer_get(fb); 229 + 230 + plane_state->uapi.crtc = &crtc->base; 231 + intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc); 232 + 233 + atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits); 234 + 235 + plane_config->vma = vma; 236 + 237 + /* 238 + * Flip to the newly created mapping ASAP, so we can re-use the 239 + * first part of GGTT for WOPCM, prevent flickering, and prevent 240 + * the lookup of sysmem scratch pages. 241 + */ 242 + plane->check_plane(crtc_state, plane_state); 243 + plane->async_flip(plane, crtc_state, plane_state, true); 244 + return; 245 + 246 + nofb: 247 + /* 248 + * We've failed to reconstruct the BIOS FB. Current display state 249 + * indicates that the primary plane is visible, but has a NULL FB, 250 + * which will lead to problems later if we don't fix it up. The 251 + * simplest solution is to just disable the primary plane now and 252 + * pretend the BIOS never had it enabled. 253 + */ 254 + intel_plane_disable_noatomic(crtc, plane); 255 + } 256 + 257 + static void plane_config_fini(struct intel_initial_plane_config *plane_config) 258 + { 259 + if (plane_config->fb) { 260 + struct drm_framebuffer *fb = &plane_config->fb->base; 261 + 262 + /* We may only have the stub and not a full framebuffer */ 263 + if (drm_framebuffer_read_refcount(fb)) 264 + drm_framebuffer_put(fb); 265 + else 266 + kfree(fb); 267 + } 268 + } 269 + 270 + void intel_crtc_initial_plane_config(struct intel_crtc *crtc) 271 + { 272 + struct xe_device *xe = to_xe_device(crtc->base.dev); 273 + struct intel_initial_plane_config plane_config = {}; 274 + 275 + /* 276 + * Note that reserving the BIOS fb up front prevents us 277 + * from stuffing other stolen allocations like the ring 278 + * on top. This prevents some ugliness at boot time, and 279 + * can even allow for smooth boot transitions if the BIOS 280 + * fb is large enough for the active pipe configuration. 281 + */ 282 + xe->display.funcs.display->get_initial_plane_config(crtc, &plane_config); 283 + 284 + /* 285 + * If the fb is shared between multiple heads, we'll 286 + * just get the first one. 287 + */ 288 + intel_find_initial_plane_obj(crtc, &plane_config); 289 + 290 + plane_config_fini(&plane_config); 291 + }
+1 -1
drivers/gpu/drm/xe/regs/xe_reg_defs.h
··· 6 6 #ifndef _XE_REG_DEFS_H_ 7 7 #define _XE_REG_DEFS_H_ 8 8 9 - #include "../../i915/i915_reg_defs.h" 9 + #include "compat-i915-headers/i915_reg_defs.h" 10 10 11 11 /** 12 12 * struct xe_reg - Register definition
-13
drivers/gpu/drm/xe/regs/xe_regs.h
··· 56 56 #define GU_MISC_IRQ_OFFSET 0x444f0 57 57 #define GU_MISC_GSE REG_BIT(27) 58 58 59 - #define TRANSCODER_A_OFFSET 0x60000 60 - #define TRANSCODER_B_OFFSET 0x61000 61 - #define TRANSCODER_C_OFFSET 0x62000 62 - #define TRANSCODER_D_OFFSET 0x63000 63 - #define TRANSCODER_DSI0_OFFSET 0x6b000 64 - #define TRANSCODER_DSI1_OFFSET 0x6b800 65 - #define PIPE_A_OFFSET 0x70000 66 - #define PIPE_B_OFFSET 0x71000 67 - #define PIPE_C_OFFSET 0x72000 68 - #define PIPE_D_OFFSET 0x73000 69 - #define PIPE_DSI0_OFFSET 0x7b000 70 - #define PIPE_DSI1_OFFSET 0x7b800 71 - 72 59 #define SOFTWARE_FLAGS_SPR33 XE_REG(0x4f084) 73 60 74 61 #define GU_CNTL_PROTECTED XE_REG(0x10100C)
+3 -3
drivers/gpu/drm/xe/xe_bo.c
··· 1400 1400 1401 1401 xe_assert(xe, tile); 1402 1402 1403 - if (flags & XE_BO_CREATE_STOLEN_BIT && 1404 - flags & XE_BO_FIXED_PLACEMENT_BIT) { 1405 - err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo, start); 1403 + if (flags & XE_BO_FIXED_PLACEMENT_BIT) { 1404 + err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo, 1405 + start + bo->size, U64_MAX); 1406 1406 } else { 1407 1407 err = xe_ggtt_insert_bo(tile->mem.ggtt, bo); 1408 1408 }
+53 -5
drivers/gpu/drm/xe/xe_device.c
··· 18 18 #include "regs/xe_regs.h" 19 19 #include "xe_bo.h" 20 20 #include "xe_debugfs.h" 21 + #include "xe_display.h" 21 22 #include "xe_dma_buf.h" 22 23 #include "xe_drm_client.h" 23 24 #include "xe_drv.h" ··· 191 190 if (xe->ordered_wq) 192 191 destroy_workqueue(xe->ordered_wq); 193 192 193 + if (xe->unordered_wq) 194 + destroy_workqueue(xe->unordered_wq); 195 + 194 196 ttm_device_fini(&xe->ttm); 195 197 } 196 198 ··· 202 198 { 203 199 struct xe_device *xe; 204 200 int err; 201 + 202 + xe_display_driver_set_hooks(&driver); 205 203 206 204 err = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); 207 205 if (err) ··· 243 237 INIT_LIST_HEAD(&xe->pinned.evicted); 244 238 245 239 xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0); 246 - if (!xe->ordered_wq) { 247 - drm_err(&xe->drm, "Failed to allocate xe-ordered-wq\n"); 240 + xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0); 241 + if (!xe->ordered_wq || !xe->unordered_wq) { 242 + drm_err(&xe->drm, "Failed to allocate xe workqueues\n"); 248 243 err = -ENOMEM; 249 244 goto err_put; 250 245 } 251 246 252 - drmm_mutex_init(&xe->drm, &xe->sb_lock); 253 - xe->enabled_irq_mask = ~0; 247 + err = xe_display_create(xe); 248 + if (WARN_ON(err)) 249 + goto err_put; 254 250 255 251 return xe; 256 252 ··· 354 346 xe_pat_init_early(xe); 355 347 356 348 xe->info.mem_region_mask = 1; 349 + err = xe_display_init_nommio(xe); 350 + if (err) 351 + return err; 357 352 358 353 for_each_tile(tile, xe, id) { 359 354 err = xe_tile_alloc(tile); ··· 378 367 return err; 379 368 } 380 369 381 - err = xe_irq_install(xe); 370 + err = xe_display_init_noirq(xe); 382 371 if (err) 383 372 return err; 373 + 374 + err = xe_irq_install(xe); 375 + if (err) 376 + goto err; 384 377 385 378 for_each_gt(gt, xe, id) { 386 379 err = xe_gt_init_early(gt); ··· 407 392 /* Allocate and map stolen after potential VRAM resize */ 408 393 xe_ttm_stolen_mgr_init(xe); 409 394 395 + /* 396 + * Now that GT is initialized (TTM in particular), 397 + * we can try to init display, and inherit the initial fb. 398 + * This is the reason the first allocation needs to be done 399 + * inside display. 400 + */ 401 + err = xe_display_init_noaccel(xe); 402 + if (err) 403 + goto err_irq_shutdown; 404 + 410 405 for_each_gt(gt, xe, id) { 411 406 err = xe_gt_init(gt); 412 407 if (err) ··· 425 400 426 401 xe_heci_gsc_init(xe); 427 402 403 + err = xe_display_init(xe); 404 + if (err) 405 + goto err_fini_display; 406 + 428 407 err = drm_dev_register(&xe->drm, 0); 429 408 if (err) 430 409 goto err_irq_shutdown; 410 + 411 + xe_display_register(xe); 431 412 432 413 xe_debugfs_register(xe); 433 414 ··· 447 416 448 417 return 0; 449 418 419 + err_fini_display: 420 + xe_display_driver_remove(xe); 421 + 450 422 err_irq_shutdown: 451 423 xe_irq_shutdown(xe); 424 + err: 425 + xe_display_fini(xe); 452 426 return err; 427 + } 428 + 429 + static void xe_device_remove_display(struct xe_device *xe) 430 + { 431 + xe_display_unregister(xe); 432 + 433 + drm_dev_unplug(&xe->drm); 434 + xe_display_driver_remove(xe); 453 435 } 454 436 455 437 void xe_device_remove(struct xe_device *xe) 456 438 { 439 + xe_device_remove_display(xe); 440 + 441 + xe_display_fini(xe); 442 + 457 443 xe_heci_gsc_fini(xe); 458 444 459 445 xe_irq_shutdown(xe);
+86
drivers/gpu/drm/xe/xe_device_types.h
··· 20 20 #include "xe_pmu.h" 21 21 #include "xe_step_types.h" 22 22 23 + #if IS_ENABLED(CONFIG_DRM_XE_DISPLAY) 24 + #include "soc/intel_pch.h" 25 + #include "intel_display_core.h" 26 + #include "intel_display_device.h" 27 + #endif 28 + 23 29 struct xe_ggtt; 24 30 struct xe_pat_ops; 25 31 ··· 253 247 u8 has_llc:1; 254 248 /** @has_range_tlb_invalidation: Has range based TLB invalidations */ 255 249 u8 has_range_tlb_invalidation:1; 250 + /** @enable_display: display enabled */ 251 + u8 enable_display:1; 256 252 /** @bypass_mtcfg: Bypass Multi-Tile configuration from MTCFG register */ 257 253 u8 bypass_mtcfg:1; 258 254 /** @supports_mmio_ext: supports MMIO extension/s */ 259 255 u8 supports_mmio_ext:1; 260 256 /** @has_heci_gscfi: device has heci gscfi */ 261 257 u8 has_heci_gscfi:1; 258 + 259 + #if IS_ENABLED(CONFIG_DRM_XE_DISPLAY) 260 + struct { 261 + u32 rawclk_freq; 262 + } i915_runtime; 263 + #endif 262 264 } info; 263 265 264 266 /** @irq: device interrupt state */ ··· 337 323 /** @ordered_wq: used to serialize compute mode resume */ 338 324 struct workqueue_struct *ordered_wq; 339 325 326 + /** @unordered_wq: used to serialize unordered work, mostly display */ 327 + struct workqueue_struct *unordered_wq; 328 + 340 329 /** @tiles: device tiles */ 341 330 struct xe_tile tiles[XE_MAX_TILES_PER_DEVICE]; 342 331 ··· 408 391 /** @needs_flr_on_fini: requests function-reset on fini */ 409 392 bool needs_flr_on_fini; 410 393 394 + #if IS_ENABLED(CONFIG_DRM_XE_DISPLAY) 395 + /* 396 + * Any fields below this point are the ones used by display. 397 + * They are temporarily added here so xe_device can be desguised as 398 + * drm_i915_private during build. After cleanup these should go away, 399 + * migrating to the right sub-structs 400 + */ 401 + struct intel_display display; 402 + enum intel_pch pch_type; 403 + u16 pch_id; 404 + 405 + struct dram_info { 406 + bool wm_lv_0_adjust_needed; 407 + u8 num_channels; 408 + bool symmetric_memory; 409 + enum intel_dram_type { 410 + INTEL_DRAM_UNKNOWN, 411 + INTEL_DRAM_DDR3, 412 + INTEL_DRAM_DDR4, 413 + INTEL_DRAM_LPDDR3, 414 + INTEL_DRAM_LPDDR4, 415 + INTEL_DRAM_DDR5, 416 + INTEL_DRAM_LPDDR5, 417 + } type; 418 + u8 num_qgv_points; 419 + u8 num_psf_gv_points; 420 + } dram_info; 421 + 422 + /* 423 + * edram size in MB. 424 + * Cannot be determined by PCIID. You must always read a register. 425 + */ 426 + u32 edram_size_mb; 427 + 428 + /* To shut up runtime pm macros.. */ 429 + struct xe_runtime_pm {} runtime_pm; 430 + 411 431 /* For pcode */ 412 432 struct mutex sb_lock; 413 433 434 + /* Should be in struct intel_display */ 435 + u32 skl_preferred_vco_freq, max_dotclk_freq, hti_state; 436 + u8 snps_phy_failed_calibration; 437 + struct drm_atomic_state *modeset_restore_state; 438 + struct list_head global_obj_list; 439 + 440 + union { 441 + /* only to allow build, not used functionally */ 442 + u32 irq_mask; 443 + u32 de_irq_mask[I915_MAX_PIPES]; 444 + }; 445 + u32 pipestat_irq_mask[I915_MAX_PIPES]; 446 + 447 + bool display_irqs_enabled; 414 448 u32 enabled_irq_mask; 449 + 450 + struct intel_uncore { 451 + spinlock_t lock; 452 + } uncore; 453 + 454 + /* only to allow build, not used functionally */ 455 + struct { 456 + unsigned int hpll_freq; 457 + unsigned int czclk_freq; 458 + unsigned int fsb_freq, mem_freq, is_ddr3; 459 + u8 vblank_enabled; 460 + }; 461 + struct { 462 + const char *dmc_firmware_path; 463 + } params; 464 + 465 + void *pxp; 466 + #endif 415 467 }; 416 468 417 469 /**
+411
drivers/gpu/drm/xe/xe_display.c
··· 1 + // SPDX-License-Identifier: MIT 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #include "xe_display.h" 7 + #include "regs/xe_regs.h" 8 + 9 + #include <linux/fb.h> 10 + 11 + #include <drm/drm_drv.h> 12 + #include <drm/drm_managed.h> 13 + #include <drm/xe_drm.h> 14 + 15 + #include "soc/intel_dram.h" 16 + #include "i915_drv.h" /* FIXME: HAS_DISPLAY() depends on this */ 17 + #include "intel_acpi.h" 18 + #include "intel_audio.h" 19 + #include "intel_bw.h" 20 + #include "intel_display.h" 21 + #include "intel_display_driver.h" 22 + #include "intel_display_irq.h" 23 + #include "intel_display_types.h" 24 + #include "intel_dmc.h" 25 + #include "intel_dp.h" 26 + #include "intel_fbdev.h" 27 + #include "intel_hdcp.h" 28 + #include "intel_hotplug.h" 29 + #include "intel_opregion.h" 30 + #include "xe_module.h" 31 + 32 + /* Xe device functions */ 33 + 34 + static bool has_display(struct xe_device *xe) 35 + { 36 + return HAS_DISPLAY(xe); 37 + } 38 + 39 + /** 40 + * xe_display_driver_probe_defer - Detect if we need to wait for other drivers 41 + * early on 42 + * @pdev: PCI device 43 + * 44 + * Returns: true if probe needs to be deferred, false otherwise 45 + */ 46 + bool xe_display_driver_probe_defer(struct pci_dev *pdev) 47 + { 48 + if (!enable_display) 49 + return 0; 50 + 51 + return intel_display_driver_probe_defer(pdev); 52 + } 53 + 54 + static void xe_display_last_close(struct drm_device *dev) 55 + { 56 + struct xe_device *xe = to_xe_device(dev); 57 + 58 + if (xe->info.enable_display) 59 + intel_fbdev_restore_mode(to_xe_device(dev)); 60 + } 61 + 62 + /** 63 + * xe_display_driver_set_hooks - Add driver flags and hooks for display 64 + * @driver: DRM device driver 65 + * 66 + * Set features and function hooks in @driver that are needed for driving the 67 + * display IP. This sets the driver's capability of driving display, regardless 68 + * if the device has it enabled 69 + */ 70 + void xe_display_driver_set_hooks(struct drm_driver *driver) 71 + { 72 + if (!enable_display) 73 + return; 74 + 75 + driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC; 76 + driver->lastclose = xe_display_last_close; 77 + } 78 + 79 + static void unset_display_features(struct xe_device *xe) 80 + { 81 + xe->drm.driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC); 82 + } 83 + 84 + static void display_destroy(struct drm_device *dev, void *dummy) 85 + { 86 + struct xe_device *xe = to_xe_device(dev); 87 + 88 + destroy_workqueue(xe->display.hotplug.dp_wq); 89 + } 90 + 91 + /** 92 + * xe_display_create - create display struct 93 + * @xe: XE device instance 94 + * 95 + * Initialize all fields used by the display part. 96 + * 97 + * TODO: once everything can be inside a single struct, make the struct opaque 98 + * to the rest of xe and return it to be xe->display. 99 + * 100 + * Returns: 0 on success 101 + */ 102 + int xe_display_create(struct xe_device *xe) 103 + { 104 + int err; 105 + 106 + spin_lock_init(&xe->display.fb_tracking.lock); 107 + 108 + xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0); 109 + 110 + drmm_mutex_init(&xe->drm, &xe->sb_lock); 111 + drmm_mutex_init(&xe->drm, &xe->display.backlight.lock); 112 + drmm_mutex_init(&xe->drm, &xe->display.audio.mutex); 113 + drmm_mutex_init(&xe->drm, &xe->display.wm.wm_mutex); 114 + drmm_mutex_init(&xe->drm, &xe->display.pps.mutex); 115 + drmm_mutex_init(&xe->drm, &xe->display.hdcp.hdcp_mutex); 116 + xe->enabled_irq_mask = ~0; 117 + 118 + err = drmm_add_action_or_reset(&xe->drm, display_destroy, NULL); 119 + if (err) 120 + return err; 121 + 122 + return 0; 123 + } 124 + 125 + static void xe_display_fini_nommio(struct drm_device *dev, void *dummy) 126 + { 127 + struct xe_device *xe = to_xe_device(dev); 128 + 129 + if (!xe->info.enable_display) 130 + return; 131 + 132 + intel_power_domains_cleanup(xe); 133 + } 134 + 135 + int xe_display_init_nommio(struct xe_device *xe) 136 + { 137 + int err; 138 + 139 + if (!xe->info.enable_display) 140 + return 0; 141 + 142 + /* Fake uncore lock */ 143 + spin_lock_init(&xe->uncore.lock); 144 + 145 + /* This must be called before any calls to HAS_PCH_* */ 146 + intel_detect_pch(xe); 147 + 148 + err = intel_power_domains_init(xe); 149 + if (err) 150 + return err; 151 + 152 + return drmm_add_action_or_reset(&xe->drm, xe_display_fini_nommio, xe); 153 + } 154 + 155 + static void xe_display_fini_noirq(struct drm_device *dev, void *dummy) 156 + { 157 + struct xe_device *xe = to_xe_device(dev); 158 + 159 + if (!xe->info.enable_display) 160 + return; 161 + 162 + intel_display_driver_remove_noirq(xe); 163 + intel_power_domains_driver_remove(xe); 164 + } 165 + 166 + int xe_display_init_noirq(struct xe_device *xe) 167 + { 168 + int err; 169 + 170 + if (!xe->info.enable_display) 171 + return 0; 172 + 173 + intel_display_driver_early_probe(xe); 174 + 175 + /* Early display init.. */ 176 + intel_opregion_setup(xe); 177 + 178 + /* 179 + * Fill the dram structure to get the system dram info. This will be 180 + * used for memory latency calculation. 181 + */ 182 + intel_dram_detect(xe); 183 + 184 + intel_bw_init_hw(xe); 185 + 186 + intel_display_device_info_runtime_init(xe); 187 + 188 + err = intel_display_driver_probe_noirq(xe); 189 + if (err) 190 + return err; 191 + 192 + return drmm_add_action_or_reset(&xe->drm, xe_display_fini_noirq, NULL); 193 + } 194 + 195 + static void xe_display_fini_noaccel(struct drm_device *dev, void *dummy) 196 + { 197 + struct xe_device *xe = to_xe_device(dev); 198 + 199 + if (!xe->info.enable_display) 200 + return; 201 + 202 + intel_display_driver_remove_nogem(xe); 203 + } 204 + 205 + int xe_display_init_noaccel(struct xe_device *xe) 206 + { 207 + int err; 208 + 209 + if (!xe->info.enable_display) 210 + return 0; 211 + 212 + err = intel_display_driver_probe_nogem(xe); 213 + if (err) 214 + return err; 215 + 216 + return drmm_add_action_or_reset(&xe->drm, xe_display_fini_noaccel, NULL); 217 + } 218 + 219 + int xe_display_init(struct xe_device *xe) 220 + { 221 + if (!xe->info.enable_display) 222 + return 0; 223 + 224 + return intel_display_driver_probe(xe); 225 + } 226 + 227 + void xe_display_fini(struct xe_device *xe) 228 + { 229 + if (!xe->info.enable_display) 230 + return; 231 + 232 + /* poll work can call into fbdev, hence clean that up afterwards */ 233 + intel_hpd_poll_fini(xe); 234 + intel_fbdev_fini(xe); 235 + 236 + intel_hdcp_component_fini(xe); 237 + intel_audio_deinit(xe); 238 + } 239 + 240 + void xe_display_register(struct xe_device *xe) 241 + { 242 + if (!xe->info.enable_display) 243 + return; 244 + 245 + intel_display_driver_register(xe); 246 + intel_register_dsm_handler(); 247 + intel_power_domains_enable(xe); 248 + } 249 + 250 + void xe_display_unregister(struct xe_device *xe) 251 + { 252 + if (!xe->info.enable_display) 253 + return; 254 + 255 + intel_unregister_dsm_handler(); 256 + intel_power_domains_disable(xe); 257 + intel_display_driver_unregister(xe); 258 + } 259 + 260 + void xe_display_driver_remove(struct xe_device *xe) 261 + { 262 + if (!xe->info.enable_display) 263 + return; 264 + 265 + intel_display_driver_remove(xe); 266 + 267 + intel_display_device_remove(xe); 268 + } 269 + 270 + /* IRQ-related functions */ 271 + 272 + void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl) 273 + { 274 + if (!xe->info.enable_display) 275 + return; 276 + 277 + if (master_ctl & DISPLAY_IRQ) 278 + gen11_display_irq_handler(xe); 279 + } 280 + 281 + void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir) 282 + { 283 + if (!xe->info.enable_display) 284 + return; 285 + 286 + if (gu_misc_iir & GU_MISC_GSE) 287 + intel_opregion_asle_intr(xe); 288 + } 289 + 290 + void xe_display_irq_reset(struct xe_device *xe) 291 + { 292 + if (!xe->info.enable_display) 293 + return; 294 + 295 + gen11_display_irq_reset(xe); 296 + } 297 + 298 + void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) 299 + { 300 + if (!xe->info.enable_display) 301 + return; 302 + 303 + if (gt->info.id == XE_GT0) 304 + gen11_de_irq_postinstall(xe); 305 + } 306 + 307 + static void intel_suspend_encoders(struct xe_device *xe) 308 + { 309 + struct drm_device *dev = &xe->drm; 310 + struct intel_encoder *encoder; 311 + 312 + if (has_display(xe)) 313 + return; 314 + 315 + drm_modeset_lock_all(dev); 316 + for_each_intel_encoder(dev, encoder) 317 + if (encoder->suspend) 318 + encoder->suspend(encoder); 319 + drm_modeset_unlock_all(dev); 320 + } 321 + 322 + void xe_display_pm_suspend(struct xe_device *xe) 323 + { 324 + if (!xe->info.enable_display) 325 + return; 326 + 327 + /* 328 + * We do a lot of poking in a lot of registers, make sure they work 329 + * properly. 330 + */ 331 + intel_power_domains_disable(xe); 332 + if (has_display(xe)) 333 + drm_kms_helper_poll_disable(&xe->drm); 334 + 335 + intel_display_driver_suspend(xe); 336 + 337 + intel_dp_mst_suspend(xe); 338 + 339 + intel_hpd_cancel_work(xe); 340 + 341 + intel_suspend_encoders(xe); 342 + 343 + intel_opregion_suspend(xe, PCI_D3cold); 344 + 345 + intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_SUSPENDED, true); 346 + 347 + intel_dmc_suspend(xe); 348 + } 349 + 350 + void xe_display_pm_suspend_late(struct xe_device *xe) 351 + { 352 + if (!xe->info.enable_display) 353 + return; 354 + 355 + intel_power_domains_suspend(xe, I915_DRM_SUSPEND_MEM); 356 + 357 + intel_display_power_suspend_late(xe); 358 + } 359 + 360 + void xe_display_pm_resume_early(struct xe_device *xe) 361 + { 362 + if (!xe->info.enable_display) 363 + return; 364 + 365 + intel_display_power_resume_early(xe); 366 + 367 + intel_power_domains_resume(xe); 368 + } 369 + 370 + void xe_display_pm_resume(struct xe_device *xe) 371 + { 372 + if (!xe->info.enable_display) 373 + return; 374 + 375 + intel_dmc_resume(xe); 376 + 377 + if (has_display(xe)) 378 + drm_mode_config_reset(&xe->drm); 379 + 380 + intel_display_driver_init_hw(xe); 381 + intel_hpd_init(xe); 382 + 383 + /* MST sideband requires HPD interrupts enabled */ 384 + intel_dp_mst_resume(xe); 385 + intel_display_driver_resume(xe); 386 + 387 + intel_hpd_poll_disable(xe); 388 + if (has_display(xe)) 389 + drm_kms_helper_poll_enable(&xe->drm); 390 + 391 + intel_opregion_resume(xe); 392 + 393 + intel_fbdev_set_suspend(&xe->drm, FBINFO_STATE_RUNNING, false); 394 + 395 + intel_power_domains_enable(xe); 396 + } 397 + 398 + void xe_display_probe(struct xe_device *xe) 399 + { 400 + if (!xe->info.enable_display) 401 + goto no_display; 402 + 403 + intel_display_device_probe(xe); 404 + 405 + if (has_display(xe)) 406 + return; 407 + 408 + no_display: 409 + xe->info.enable_display = false; 410 + unset_display_features(xe); 411 + }
+72
drivers/gpu/drm/xe/xe_display.h
··· 1 + /* SPDX-License-Identifier: MIT */ 2 + /* 3 + * Copyright © 2023 Intel Corporation 4 + */ 5 + 6 + #ifndef _XE_DISPLAY_H_ 7 + #define _XE_DISPLAY_H_ 8 + 9 + #include "xe_device.h" 10 + 11 + struct drm_driver; 12 + 13 + #if IS_ENABLED(CONFIG_DRM_XE_DISPLAY) 14 + 15 + bool xe_display_driver_probe_defer(struct pci_dev *pdev); 16 + void xe_display_driver_set_hooks(struct drm_driver *driver); 17 + void xe_display_driver_remove(struct xe_device *xe); 18 + 19 + int xe_display_create(struct xe_device *xe); 20 + 21 + void xe_display_probe(struct xe_device *xe); 22 + 23 + int xe_display_init_nommio(struct xe_device *xe); 24 + int xe_display_init_noirq(struct xe_device *xe); 25 + int xe_display_init_noaccel(struct xe_device *xe); 26 + int xe_display_init(struct xe_device *xe); 27 + void xe_display_fini(struct xe_device *xe); 28 + 29 + void xe_display_register(struct xe_device *xe); 30 + void xe_display_unregister(struct xe_device *xe); 31 + 32 + void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl); 33 + void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir); 34 + void xe_display_irq_reset(struct xe_device *xe); 35 + void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt); 36 + 37 + void xe_display_pm_suspend(struct xe_device *xe); 38 + void xe_display_pm_suspend_late(struct xe_device *xe); 39 + void xe_display_pm_resume_early(struct xe_device *xe); 40 + void xe_display_pm_resume(struct xe_device *xe); 41 + 42 + #else 43 + 44 + static inline int xe_display_driver_probe_defer(struct pci_dev *pdev) { return 0; } 45 + static inline void xe_display_driver_set_hooks(struct drm_driver *driver) { } 46 + static inline void xe_display_driver_remove(struct xe_device *xe) {} 47 + 48 + static inline int xe_display_create(struct xe_device *xe) { return 0; } 49 + 50 + static inline void xe_display_probe(struct xe_device *xe) { } 51 + 52 + static inline int xe_display_init_nommio(struct xe_device *xe) { return 0; } 53 + static inline int xe_display_init_noirq(struct xe_device *xe) { return 0; } 54 + static inline int xe_display_init_noaccel(struct xe_device *xe) { return 0; } 55 + static inline int xe_display_init(struct xe_device *xe) { return 0; } 56 + static inline void xe_display_fini(struct xe_device *xe) {} 57 + 58 + static inline void xe_display_register(struct xe_device *xe) {} 59 + static inline void xe_display_unregister(struct xe_device *xe) {} 60 + 61 + static inline void xe_display_irq_handler(struct xe_device *xe, u32 master_ctl) {} 62 + static inline void xe_display_irq_enable(struct xe_device *xe, u32 gu_misc_iir) {} 63 + static inline void xe_display_irq_reset(struct xe_device *xe) {} 64 + static inline void xe_display_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) {} 65 + 66 + static inline void xe_display_pm_suspend(struct xe_device *xe) {} 67 + static inline void xe_display_pm_suspend_late(struct xe_device *xe) {} 68 + static inline void xe_display_pm_resume_early(struct xe_device *xe) {} 69 + static inline void xe_display_pm_resume(struct xe_device *xe) {} 70 + 71 + #endif /* CONFIG_DRM_XE_DISPLAY */ 72 + #endif /* _XE_DISPLAY_H_ */
+9 -16
drivers/gpu/drm/xe/xe_ggtt.c
··· 338 338 } 339 339 340 340 static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, 341 - u64 start, u64 end, u64 alignment) 341 + u64 start, u64 end) 342 342 { 343 343 int err; 344 + u64 alignment = XE_PAGE_SIZE; 345 + 346 + if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K) 347 + alignment = SZ_64K; 344 348 345 349 if (XE_WARN_ON(bo->ggtt_node.size)) { 346 350 /* Someone's already inserted this BO in the GGTT */ ··· 368 364 return err; 369 365 } 370 366 371 - int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, u64 ofs) 367 + int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, 368 + u64 start, u64 end) 372 369 { 373 - if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K) { 374 - if (XE_WARN_ON(!IS_ALIGNED(ofs, SZ_64K)) || 375 - XE_WARN_ON(!IS_ALIGNED(bo->size, SZ_64K))) 376 - return -EINVAL; 377 - } 378 - 379 - return __xe_ggtt_insert_bo_at(ggtt, bo, ofs, ofs + bo->size, 0); 370 + return __xe_ggtt_insert_bo_at(ggtt, bo, start, end); 380 371 } 381 372 382 373 int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo) 383 374 { 384 - u64 alignment; 385 - 386 - alignment = XE_PAGE_SIZE; 387 - if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K) 388 - alignment = SZ_64K; 389 - 390 - return __xe_ggtt_insert_bo_at(ggtt, bo, 0, U64_MAX, alignment); 375 + return __xe_ggtt_insert_bo_at(ggtt, bo, 0, U64_MAX); 391 376 } 392 377 393 378 void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node)
+2 -1
drivers/gpu/drm/xe/xe_ggtt.h
··· 24 24 void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node); 25 25 void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo); 26 26 int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo); 27 - int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, u64 ofs); 27 + int xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, 28 + u64 start, u64 end); 28 29 void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo); 29 30 30 31 int xe_ggtt_dump(struct xe_ggtt *ggtt, struct drm_printer *p);
+12 -1
drivers/gpu/drm/xe/xe_irq.c
··· 12 12 #include "regs/xe_gt_regs.h" 13 13 #include "regs/xe_regs.h" 14 14 #include "xe_device.h" 15 + #include "xe_display.h" 15 16 #include "xe_drv.h" 16 17 #include "xe_gt.h" 17 18 #include "xe_guc.h" ··· 352 351 353 352 gt_irq_handler(tile, master_ctl, intr_dw, identity); 354 353 354 + xe_display_irq_handler(xe, master_ctl); 355 + 355 356 gu_misc_iir = gu_misc_irq_ack(xe, master_ctl); 356 357 357 358 xelp_intr_enable(xe, false); 359 + 360 + xe_display_irq_enable(xe, gu_misc_iir); 358 361 359 362 xe_pmu_irq_stats(xe); 360 363 ··· 449 444 * that get reported as Gunit GSE) would only be hooked up to 450 445 * the primary tile. 451 446 */ 452 - if (id == 0) 447 + if (id == 0) { 448 + xe_display_irq_handler(xe, master_ctl); 453 449 gu_misc_iir = gu_misc_irq_ack(xe, master_ctl); 450 + } 454 451 } 455 452 456 453 dg1_intr_enable(xe, false); 454 + xe_display_irq_enable(xe, gu_misc_iir); 457 455 458 456 xe_pmu_irq_stats(xe); 459 457 ··· 550 542 551 543 tile = xe_device_get_root_tile(xe); 552 544 mask_and_disable(tile, GU_MISC_IRQ_OFFSET); 545 + xe_display_irq_reset(xe); 553 546 554 547 /* 555 548 * The tile's top-level status register should be the last one ··· 565 556 566 557 static void xe_irq_postinstall(struct xe_device *xe) 567 558 { 559 + xe_display_irq_postinstall(xe, xe_root_mmio_gt(xe)); 560 + 568 561 /* 569 562 * ASLE backlight operations are reported via GUnit GSE interrupts 570 563 * on the root tile.
+4
drivers/gpu/drm/xe/xe_module.c
··· 19 19 module_param_named_unsafe(force_execlist, force_execlist, bool, 0444); 20 20 MODULE_PARM_DESC(force_execlist, "Force Execlist submission"); 21 21 22 + bool enable_display = true; 23 + module_param_named(enable_display, enable_display, bool, 0444); 24 + MODULE_PARM_DESC(enable_display, "Enable display"); 25 + 22 26 u32 xe_force_vram_bar_size; 23 27 module_param_named(vram_bar_size, xe_force_vram_bar_size, uint, 0600); 24 28 MODULE_PARM_DESC(vram_bar_size, "Set the vram bar size(in MiB)");
+30 -5
drivers/gpu/drm/xe/xe_pci.c
··· 17 17 #include "regs/xe_regs.h" 18 18 #include "regs/xe_gt_regs.h" 19 19 #include "xe_device.h" 20 + #include "xe_display.h" 20 21 #include "xe_drv.h" 21 22 #include "xe_gt.h" 22 23 #include "xe_macros.h" ··· 56 55 57 56 u8 require_force_probe:1; 58 57 u8 is_dgfx:1; 58 + u8 has_display:1; 59 59 u8 has_heci_gscfi:1; 60 60 61 61 u8 has_llc:1; 62 62 u8 bypass_mtcfg:1; 63 63 u8 supports_mmio_ext:1; 64 64 }; 65 + 66 + __diag_push(); 67 + __diag_ignore_all("-Woverride-init", "Allow field overrides in table"); 65 68 66 69 #define PLATFORM(x) \ 67 70 .platform = (x), \ ··· 210 205 .graphics = &graphics_xelp, 211 206 .media = &media_xem, 212 207 PLATFORM(XE_TIGERLAKE), 213 - .has_llc = 1, 208 + .has_display = true, 209 + .has_llc = true, 214 210 .require_force_probe = true, 215 211 }; 216 212 ··· 219 213 .graphics = &graphics_xelp, 220 214 .media = &media_xem, 221 215 PLATFORM(XE_ROCKETLAKE), 216 + .has_display = true, 222 217 .has_llc = true, 223 218 .require_force_probe = true, 224 219 }; ··· 230 223 .graphics = &graphics_xelp, 231 224 .media = &media_xem, 232 225 PLATFORM(XE_ALDERLAKE_S), 233 - .has_llc = 1, 226 + .has_display = true, 227 + .has_llc = true, 234 228 .require_force_probe = true, 235 229 .subplatforms = (const struct xe_subplatform_desc[]) { 236 230 { XE_SUBPLATFORM_ALDERLAKE_S_RPLS, "RPLS", adls_rpls_ids }, ··· 245 237 .graphics = &graphics_xelp, 246 238 .media = &media_xem, 247 239 PLATFORM(XE_ALDERLAKE_P), 248 - .has_llc = 1, 240 + .has_display = true, 241 + .has_llc = true, 249 242 .require_force_probe = true, 250 243 .subplatforms = (const struct xe_subplatform_desc[]) { 251 244 { XE_SUBPLATFORM_ALDERLAKE_P_RPLU, "RPLU", adlp_rplu_ids }, ··· 258 249 .graphics = &graphics_xelp, 259 250 .media = &media_xem, 260 251 PLATFORM(XE_ALDERLAKE_N), 261 - .has_llc = 1, 252 + .has_display = true, 253 + .has_llc = true, 262 254 .require_force_probe = true, 263 255 }; 264 256 ··· 271 261 .media = &media_xem, 272 262 DGFX_FEATURES, 273 263 PLATFORM(XE_DG1), 264 + .has_display = true, 274 265 .require_force_probe = true, 275 266 .has_heci_gscfi = 1, 276 267 }; ··· 297 286 .require_force_probe = true, 298 287 299 288 DG2_FEATURES, 289 + .has_display = false, 300 290 }; 301 291 302 292 static const struct xe_device_desc dg2_desc = { ··· 306 294 .require_force_probe = true, 307 295 308 296 DG2_FEATURES, 297 + .has_display = true, 309 298 }; 310 299 311 300 static const __maybe_unused struct xe_device_desc pvc_desc = { 312 301 .graphics = &graphics_xehpc, 313 302 DGFX_FEATURES, 314 303 PLATFORM(XE_PVC), 304 + .has_display = false, 315 305 .require_force_probe = true, 316 306 .has_heci_gscfi = 1, 317 307 }; ··· 322 308 /* .graphics and .media determined via GMD_ID */ 323 309 .require_force_probe = true, 324 310 PLATFORM(XE_METEORLAKE), 311 + .has_display = true, 325 312 }; 326 313 327 314 static const struct xe_device_desc lnl_desc = { ··· 331 316 }; 332 317 333 318 #undef PLATFORM 319 + __diag_pop(); 334 320 335 321 /* Map of GMD_ID values to graphics IP */ 336 322 static struct gmdid_map graphics_ip_map[] = { ··· 590 574 xe->info.has_flat_ccs = graphics_desc->has_flat_ccs; 591 575 xe->info.has_range_tlb_invalidation = graphics_desc->has_range_tlb_invalidation; 592 576 577 + xe->info.enable_display = IS_ENABLED(CONFIG_DRM_XE_DISPLAY) && 578 + enable_display && 579 + desc->has_display; 593 580 /* 594 581 * All platforms have at least one primary GT. Any platform with media 595 582 * version 13 or higher has an additional dedicated media GT. And ··· 687 668 return -ENODEV; 688 669 } 689 670 671 + if (xe_display_driver_probe_defer(pdev)) 672 + return -EPROBE_DEFER; 673 + 690 674 xe = xe_device_create(pdev, ent); 691 675 if (IS_ERR(xe)) 692 676 return PTR_ERR(xe); ··· 708 686 if (err) 709 687 goto err_pci_disable; 710 688 711 - drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) dma_m_s:%d tc:%d gscfi:%d", 689 + xe_display_probe(xe); 690 + 691 + drm_dbg(&xe->drm, "%s %s %04x:%04x dgfx:%d gfx:%s (%d.%02d) media:%s (%d.%02d) display:%s dma_m_s:%d tc:%d gscfi:%d", 712 692 desc->platform_name, 713 693 subplatform_desc ? subplatform_desc->name : "", 714 694 xe->info.devid, xe->info.revid, ··· 721 697 xe->info.media_name, 722 698 xe->info.media_verx100 / 100, 723 699 xe->info.media_verx100 % 100, 700 + str_yes_no(xe->info.enable_display), 724 701 xe->info.dma_mask_size, xe->info.tile_count, 725 702 xe->info.has_heci_gscfi); 726 703
+12 -1
drivers/gpu/drm/xe/xe_pm.c
··· 14 14 #include "xe_bo_evict.h" 15 15 #include "xe_device.h" 16 16 #include "xe_device_sysfs.h" 17 + #include "xe_display.h" 17 18 #include "xe_ggtt.h" 18 19 #include "xe_gt.h" 19 20 #include "xe_guc.h" ··· 62 61 if (err) 63 62 return err; 64 63 64 + xe_display_pm_suspend(xe); 65 + 65 66 for_each_gt(gt, xe, id) { 66 67 err = xe_gt_suspend(gt); 67 - if (err) 68 + if (err) { 69 + xe_display_pm_resume(xe); 68 70 return err; 71 + } 69 72 } 70 73 71 74 xe_irq_suspend(xe); 75 + 76 + xe_display_pm_suspend_late(xe); 72 77 73 78 return 0; 74 79 } ··· 101 94 return err; 102 95 } 103 96 97 + xe_display_pm_resume_early(xe); 98 + 104 99 /* 105 100 * This only restores pinned memory which is the memory required for the 106 101 * GT(s) to resume. ··· 112 103 return err; 113 104 114 105 xe_irq_resume(xe); 106 + 107 + xe_display_pm_resume(xe); 115 108 116 109 for_each_gt(gt, xe, id) 117 110 xe_gt_resume(gt);