Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/imagination: Add GPU ID parsing and firmware loading

Read the GPU ID register at probe time and select the correct
features/quirks/enhancements. Use the GPU ID to form the firmware
file name and load the firmware.

The features/quirks/enhancements arrays are currently hardcoded in
the driver for the supported GPUs. We are looking at moving this
information to the firmware image.

Changes since v8:
- Corrected license identifiers

Changes since v7:
- Fix kerneldoc for pvr_device_info_set_enhancements()

Changes since v5:
- Add BRN 71242 to device info

Changes since v4:
- Retrieve device information from firmware header
- Pull forward firmware header parsing from FW infrastructure patch
- Use devm_add_action_or_reset to release firmware

Changes since v3:
- Use drm_dev_{enter,exit}

Co-developed-by: Frank Binns <frank.binns@imgtec.com>
Signed-off-by: Frank Binns <frank.binns@imgtec.com>
Co-developed-by: Matt Coster <matt.coster@imgtec.com>
Signed-off-by: Matt Coster <matt.coster@imgtec.com>
Co-developed-by: Donald Robson <donald.robson@imgtec.com>
Signed-off-by: Donald Robson <donald.robson@imgtec.com>
Signed-off-by: Sarah Walker <sarah.walker@imgtec.com>
Link: https://lore.kernel.org/r/1ff76f7a5b45c742279c78910f8491b8a5e7f6e6.1700668843.git.donald.robson@imgtec.com
Signed-off-by: Maxime Ripard <mripard@kernel.org>

authored by

Sarah Walker and committed by
Maxime Ripard
f99f5f3e a26f067f

+1925 -2
+2
drivers/gpu/drm/imagination/Makefile
··· 5 5 6 6 powervr-y := \ 7 7 pvr_device.o \ 8 + pvr_device_info.o \ 8 9 pvr_drv.o \ 10 + pvr_fw.o 9 11 10 12 obj-$(CONFIG_DRM_POWERVR) += powervr.o
+322 -1
drivers/gpu/drm/imagination/pvr_device.c
··· 2 2 /* Copyright (c) 2023 Imagination Technologies Ltd. */ 3 3 4 4 #include "pvr_device.h" 5 + #include "pvr_device_info.h" 6 + 7 + #include "pvr_fw.h" 8 + #include "pvr_rogue_cr_defs.h" 5 9 6 10 #include <drm/drm_print.h> 7 11 12 + #include <linux/bitfield.h> 8 13 #include <linux/clk.h> 9 14 #include <linux/compiler_attributes.h> 10 15 #include <linux/compiler_types.h> 11 16 #include <linux/dma-mapping.h> 12 17 #include <linux/err.h> 18 + #include <linux/firmware.h> 13 19 #include <linux/gfp.h> 20 + #include <linux/interrupt.h> 14 21 #include <linux/platform_device.h> 22 + #include <linux/pm_runtime.h> 15 23 #include <linux/slab.h> 16 24 #include <linux/stddef.h> 17 25 #include <linux/types.h> 26 + #include <linux/workqueue.h> 27 + 28 + /* Major number for the supported version of the firmware. */ 29 + #define PVR_FW_VERSION_MAJOR 1 18 30 19 31 /** 20 32 * pvr_device_reg_init() - Initialize kernel access to a PowerVR device's ··· 113 101 } 114 102 115 103 /** 104 + * pvr_build_firmware_filename() - Construct a PowerVR firmware filename 105 + * @pvr_dev: Target PowerVR device. 106 + * @base: First part of the filename. 107 + * @major: Major version number. 108 + * 109 + * A PowerVR firmware filename consists of three parts separated by underscores 110 + * (``'_'``) along with a '.fw' file suffix. The first part is the exact value 111 + * of @base, the second part is the hardware version string derived from @pvr_fw 112 + * and the final part is the firmware version number constructed from @major with 113 + * a 'v' prefix, e.g. powervr/rogue_4.40.2.51_v1.fw. 114 + * 115 + * The returned string will have been slab allocated and must be freed with 116 + * kfree(). 117 + * 118 + * Return: 119 + * * The constructed filename on success, or 120 + * * Any error returned by kasprintf(). 121 + */ 122 + static char * 123 + pvr_build_firmware_filename(struct pvr_device *pvr_dev, const char *base, 124 + u8 major) 125 + { 126 + struct pvr_gpu_id *gpu_id = &pvr_dev->gpu_id; 127 + 128 + return kasprintf(GFP_KERNEL, "%s_%d.%d.%d.%d_v%d.fw", base, gpu_id->b, 129 + gpu_id->v, gpu_id->n, gpu_id->c, major); 130 + } 131 + 132 + static void 133 + pvr_release_firmware(void *data) 134 + { 135 + struct pvr_device *pvr_dev = data; 136 + 137 + release_firmware(pvr_dev->fw_dev.firmware); 138 + } 139 + 140 + /** 141 + * pvr_request_firmware() - Load firmware for a PowerVR device 142 + * @pvr_dev: Target PowerVR device. 143 + * 144 + * See pvr_build_firmware_filename() for details on firmware file naming. 145 + * 146 + * Return: 147 + * * 0 on success, 148 + * * Any error returned by pvr_build_firmware_filename(), or 149 + * * Any error returned by request_firmware(). 150 + */ 151 + static int 152 + pvr_request_firmware(struct pvr_device *pvr_dev) 153 + { 154 + struct drm_device *drm_dev = &pvr_dev->base; 155 + char *filename; 156 + const struct firmware *fw; 157 + int err; 158 + 159 + filename = pvr_build_firmware_filename(pvr_dev, "powervr/rogue", 160 + PVR_FW_VERSION_MAJOR); 161 + if (IS_ERR(filename)) 162 + return PTR_ERR(filename); 163 + 164 + /* 165 + * This function takes a copy of &filename, meaning we can free our 166 + * instance before returning. 167 + */ 168 + err = request_firmware(&fw, filename, pvr_dev->base.dev); 169 + if (err) { 170 + drm_err(drm_dev, "failed to load firmware %s (err=%d)\n", 171 + filename, err); 172 + goto err_free_filename; 173 + } 174 + 175 + drm_info(drm_dev, "loaded firmware %s\n", filename); 176 + kfree(filename); 177 + 178 + pvr_dev->fw_dev.firmware = fw; 179 + 180 + return devm_add_action_or_reset(drm_dev->dev, pvr_release_firmware, pvr_dev); 181 + 182 + err_free_filename: 183 + kfree(filename); 184 + 185 + return err; 186 + } 187 + 188 + /** 189 + * pvr_load_gpu_id() - Load a PowerVR device's GPU ID (BVNC) from control registers. 190 + * 191 + * Sets struct pvr_dev.gpu_id. 192 + * 193 + * @pvr_dev: Target PowerVR device. 194 + */ 195 + static void 196 + pvr_load_gpu_id(struct pvr_device *pvr_dev) 197 + { 198 + struct pvr_gpu_id *gpu_id = &pvr_dev->gpu_id; 199 + u64 bvnc; 200 + 201 + /* 202 + * Try reading the BVNC using the newer (cleaner) method first. If the 203 + * B value is zero, fall back to the older method. 204 + */ 205 + bvnc = pvr_cr_read64(pvr_dev, ROGUE_CR_CORE_ID__PBVNC); 206 + 207 + gpu_id->b = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__BRANCH_ID); 208 + if (gpu_id->b != 0) { 209 + gpu_id->v = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__VERSION_ID); 210 + gpu_id->n = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__NUMBER_OF_SCALABLE_UNITS); 211 + gpu_id->c = PVR_CR_FIELD_GET(bvnc, CORE_ID__PBVNC__CONFIG_ID); 212 + } else { 213 + u32 core_rev = pvr_cr_read32(pvr_dev, ROGUE_CR_CORE_REVISION); 214 + u32 core_id = pvr_cr_read32(pvr_dev, ROGUE_CR_CORE_ID); 215 + u16 core_id_config = PVR_CR_FIELD_GET(core_id, CORE_ID_CONFIG); 216 + 217 + gpu_id->b = PVR_CR_FIELD_GET(core_rev, CORE_REVISION_MAJOR); 218 + gpu_id->v = PVR_CR_FIELD_GET(core_rev, CORE_REVISION_MINOR); 219 + gpu_id->n = FIELD_GET(0xFF00, core_id_config); 220 + gpu_id->c = FIELD_GET(0x00FF, core_id_config); 221 + } 222 + } 223 + 224 + /** 225 + * pvr_set_dma_info() - Set PowerVR device DMA information 226 + * @pvr_dev: Target PowerVR device. 227 + * 228 + * Sets the DMA mask and max segment size for the PowerVR device. 229 + * 230 + * Return: 231 + * * 0 on success, 232 + * * Any error returned by PVR_FEATURE_VALUE(), or 233 + * * Any error returned by dma_set_mask(). 234 + */ 235 + 236 + static int 237 + pvr_set_dma_info(struct pvr_device *pvr_dev) 238 + { 239 + struct drm_device *drm_dev = from_pvr_device(pvr_dev); 240 + u16 phys_bus_width; 241 + int err; 242 + 243 + err = PVR_FEATURE_VALUE(pvr_dev, phys_bus_width, &phys_bus_width); 244 + if (err) { 245 + drm_err(drm_dev, "Failed to get device physical bus width\n"); 246 + return err; 247 + } 248 + 249 + err = dma_set_mask(drm_dev->dev, DMA_BIT_MASK(phys_bus_width)); 250 + if (err) { 251 + drm_err(drm_dev, "Failed to set DMA mask (err=%d)\n", err); 252 + return err; 253 + } 254 + 255 + dma_set_max_seg_size(drm_dev->dev, UINT_MAX); 256 + 257 + return 0; 258 + } 259 + 260 + /** 261 + * pvr_device_gpu_init() - GPU-specific initialization for a PowerVR device 262 + * @pvr_dev: Target PowerVR device. 263 + * 264 + * The following steps are taken to ensure the device is ready: 265 + * 266 + * 1. Read the hardware version information from control registers, 267 + * 2. Initialise the hardware feature information, 268 + * 3. Setup the device DMA information, 269 + * 4. Setup the device-scoped memory context, and 270 + * 5. Load firmware into the device. 271 + * 272 + * Return: 273 + * * 0 on success, 274 + * * -%ENODEV if the GPU is not supported, 275 + * * Any error returned by pvr_set_dma_info(), 276 + * * Any error returned by pvr_memory_context_init(), or 277 + * * Any error returned by pvr_request_firmware(). 278 + */ 279 + static int 280 + pvr_device_gpu_init(struct pvr_device *pvr_dev) 281 + { 282 + int err; 283 + 284 + pvr_load_gpu_id(pvr_dev); 285 + 286 + err = pvr_request_firmware(pvr_dev); 287 + if (err) 288 + return err; 289 + 290 + err = pvr_fw_validate_init_device_info(pvr_dev); 291 + if (err) 292 + return err; 293 + 294 + if (PVR_HAS_FEATURE(pvr_dev, meta)) 295 + pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_META; 296 + else if (PVR_HAS_FEATURE(pvr_dev, mips)) 297 + pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_MIPS; 298 + else if (PVR_HAS_FEATURE(pvr_dev, riscv_fw_processor)) 299 + pvr_dev->fw_dev.processor_type = PVR_FW_PROCESSOR_TYPE_RISCV; 300 + else 301 + return -EINVAL; 302 + 303 + return pvr_set_dma_info(pvr_dev); 304 + } 305 + 306 + /** 116 307 * pvr_device_init() - Initialize a PowerVR device 117 308 * @pvr_dev: Target PowerVR device. 118 309 * ··· 345 130 return err; 346 131 347 132 /* Map the control registers into memory. */ 348 - return pvr_device_reg_init(pvr_dev); 133 + err = pvr_device_reg_init(pvr_dev); 134 + if (err) 135 + return err; 136 + 137 + /* Perform GPU-specific initialization steps. */ 138 + return pvr_device_gpu_init(pvr_dev); 349 139 } 350 140 351 141 /** ··· 364 144 * Deinitialization stages are performed in reverse order compared to 365 145 * the initialization stages in pvr_device_init(). 366 146 */ 147 + } 148 + 149 + bool 150 + pvr_device_has_uapi_quirk(struct pvr_device *pvr_dev, u32 quirk) 151 + { 152 + switch (quirk) { 153 + case 47217: 154 + return PVR_HAS_QUIRK(pvr_dev, 47217); 155 + case 48545: 156 + return PVR_HAS_QUIRK(pvr_dev, 48545); 157 + case 49927: 158 + return PVR_HAS_QUIRK(pvr_dev, 49927); 159 + case 51764: 160 + return PVR_HAS_QUIRK(pvr_dev, 51764); 161 + case 62269: 162 + return PVR_HAS_QUIRK(pvr_dev, 62269); 163 + default: 164 + return false; 165 + }; 166 + } 167 + 168 + bool 169 + pvr_device_has_uapi_enhancement(struct pvr_device *pvr_dev, u32 enhancement) 170 + { 171 + switch (enhancement) { 172 + case 35421: 173 + return PVR_HAS_ENHANCEMENT(pvr_dev, 35421); 174 + case 42064: 175 + return PVR_HAS_ENHANCEMENT(pvr_dev, 42064); 176 + default: 177 + return false; 178 + }; 179 + } 180 + 181 + /** 182 + * pvr_device_has_feature() - Look up device feature based on feature definition 183 + * @pvr_dev: Device pointer. 184 + * @feature: Feature to look up. Should be one of %PVR_FEATURE_*. 185 + * 186 + * Returns: 187 + * * %true if feature is present on device, or 188 + * * %false if feature is not present on device. 189 + */ 190 + bool 191 + pvr_device_has_feature(struct pvr_device *pvr_dev, u32 feature) 192 + { 193 + switch (feature) { 194 + case PVR_FEATURE_CLUSTER_GROUPING: 195 + return PVR_HAS_FEATURE(pvr_dev, cluster_grouping); 196 + 197 + case PVR_FEATURE_COMPUTE_MORTON_CAPABLE: 198 + return PVR_HAS_FEATURE(pvr_dev, compute_morton_capable); 199 + 200 + case PVR_FEATURE_FB_CDC_V4: 201 + return PVR_HAS_FEATURE(pvr_dev, fb_cdc_v4); 202 + 203 + case PVR_FEATURE_GPU_MULTICORE_SUPPORT: 204 + return PVR_HAS_FEATURE(pvr_dev, gpu_multicore_support); 205 + 206 + case PVR_FEATURE_ISP_ZLS_D24_S8_PACKING_OGL_MODE: 207 + return PVR_HAS_FEATURE(pvr_dev, isp_zls_d24_s8_packing_ogl_mode); 208 + 209 + case PVR_FEATURE_S7_TOP_INFRASTRUCTURE: 210 + return PVR_HAS_FEATURE(pvr_dev, s7_top_infrastructure); 211 + 212 + case PVR_FEATURE_TESSELLATION: 213 + return PVR_HAS_FEATURE(pvr_dev, tessellation); 214 + 215 + case PVR_FEATURE_TPU_DM_GLOBAL_REGISTERS: 216 + return PVR_HAS_FEATURE(pvr_dev, tpu_dm_global_registers); 217 + 218 + case PVR_FEATURE_VDM_DRAWINDIRECT: 219 + return PVR_HAS_FEATURE(pvr_dev, vdm_drawindirect); 220 + 221 + case PVR_FEATURE_VDM_OBJECT_LEVEL_LLS: 222 + return PVR_HAS_FEATURE(pvr_dev, vdm_object_level_lls); 223 + 224 + case PVR_FEATURE_ZLS_SUBTILE: 225 + return PVR_HAS_FEATURE(pvr_dev, zls_subtile); 226 + 227 + /* Derived features. */ 228 + case PVR_FEATURE_CDM_USER_MODE_QUEUE: { 229 + u8 cdm_control_stream_format = 0; 230 + 231 + PVR_FEATURE_VALUE(pvr_dev, cdm_control_stream_format, &cdm_control_stream_format); 232 + return (cdm_control_stream_format >= 2 && cdm_control_stream_format <= 4); 233 + } 234 + 235 + case PVR_FEATURE_REQUIRES_FB_CDC_ZLS_SETUP: 236 + if (PVR_HAS_FEATURE(pvr_dev, fbcdc_algorithm)) { 237 + u8 fbcdc_algorithm = 0; 238 + 239 + PVR_FEATURE_VALUE(pvr_dev, fbcdc_algorithm, &fbcdc_algorithm); 240 + return (fbcdc_algorithm < 3 || PVR_HAS_FEATURE(pvr_dev, fb_cdc_v4)); 241 + } 242 + return false; 243 + 244 + default: 245 + WARN(true, "Looking up undefined feature %u\n", feature); 246 + return false; 247 + } 367 248 }
+220
drivers/gpu/drm/imagination/pvr_device.h
··· 4 4 #ifndef PVR_DEVICE_H 5 5 #define PVR_DEVICE_H 6 6 7 + #include "pvr_device_info.h" 8 + #include "pvr_fw.h" 9 + 7 10 #include <drm/drm_device.h> 8 11 #include <drm/drm_file.h> 9 12 #include <drm/drm_mm.h> ··· 32 29 struct firmware; 33 30 34 31 /** 32 + * struct pvr_gpu_id - Hardware GPU ID information for a PowerVR device 33 + * @b: Branch ID. 34 + * @v: Version ID. 35 + * @n: Number of scalable units. 36 + * @c: Config ID. 37 + */ 38 + struct pvr_gpu_id { 39 + u16 b, v, n, c; 40 + }; 41 + 42 + /** 43 + * struct pvr_fw_version - Firmware version information 44 + * @major: Major version number. 45 + * @minor: Minor version number. 46 + */ 47 + struct pvr_fw_version { 48 + u16 major, minor; 49 + }; 50 + 51 + /** 35 52 * struct pvr_device - powervr-specific wrapper for &struct drm_device 36 53 */ 37 54 struct pvr_device { ··· 62 39 * from_pvr_device(). 63 40 */ 64 41 struct drm_device base; 42 + 43 + /** @gpu_id: GPU ID detected at runtime. */ 44 + struct pvr_gpu_id gpu_id; 45 + 46 + /** 47 + * @features: Hardware feature information. 48 + * 49 + * Do not access this member directly, instead use PVR_HAS_FEATURE() 50 + * or PVR_FEATURE_VALUE() macros. 51 + */ 52 + struct pvr_device_features features; 53 + 54 + /** 55 + * @quirks: Hardware quirk information. 56 + * 57 + * Do not access this member directly, instead use PVR_HAS_QUIRK(). 58 + */ 59 + struct pvr_device_quirks quirks; 60 + 61 + /** 62 + * @enhancements: Hardware enhancement information. 63 + * 64 + * Do not access this member directly, instead use 65 + * PVR_HAS_ENHANCEMENT(). 66 + */ 67 + struct pvr_device_enhancements enhancements; 68 + 69 + /** @fw_version: Firmware version detected at runtime. */ 70 + struct pvr_fw_version fw_version; 65 71 66 72 /** 67 73 * @regs: Device control registers. ··· 122 70 * Interface (MEMIF). If present, this needs to be enabled/disabled together with @core_clk. 123 71 */ 124 72 struct clk *mem_clk; 73 + 74 + /** @fw_dev: Firmware related data. */ 75 + struct pvr_fw_device fw_dev; 125 76 }; 126 77 127 78 /** ··· 147 92 struct pvr_device *pvr_dev; 148 93 }; 149 94 95 + /** 96 + * PVR_HAS_FEATURE() - Tests whether a PowerVR device has a given feature 97 + * @pvr_dev: [IN] Target PowerVR device. 98 + * @feature: [IN] Hardware feature name. 99 + * 100 + * Feature names are derived from those found in &struct pvr_device_features by 101 + * dropping the 'has_' prefix, which is applied by this macro. 102 + * 103 + * Return: 104 + * * true if the named feature is present in the hardware 105 + * * false if the named feature is not present in the hardware 106 + */ 107 + #define PVR_HAS_FEATURE(pvr_dev, feature) ((pvr_dev)->features.has_##feature) 108 + 109 + /** 110 + * PVR_FEATURE_VALUE() - Gets a PowerVR device feature value 111 + * @pvr_dev: [IN] Target PowerVR device. 112 + * @feature: [IN] Feature name. 113 + * @value_out: [OUT] Feature value. 114 + * 115 + * This macro will get a feature value for those features that have values. 116 + * If the feature is not present, nothing will be stored to @value_out. 117 + * 118 + * Feature names are derived from those found in &struct pvr_device_features by 119 + * dropping the 'has_' prefix. 120 + * 121 + * Return: 122 + * * 0 on success, or 123 + * * -%EINVAL if the named feature is not present in the hardware 124 + */ 125 + #define PVR_FEATURE_VALUE(pvr_dev, feature, value_out) \ 126 + ({ \ 127 + struct pvr_device *_pvr_dev = pvr_dev; \ 128 + int _ret = -EINVAL; \ 129 + if (_pvr_dev->features.has_##feature) { \ 130 + *(value_out) = _pvr_dev->features.feature; \ 131 + _ret = 0; \ 132 + } \ 133 + _ret; \ 134 + }) 135 + 136 + /** 137 + * PVR_HAS_QUIRK() - Tests whether a physical device has a given quirk 138 + * @pvr_dev: [IN] Target PowerVR device. 139 + * @quirk: [IN] Hardware quirk name. 140 + * 141 + * Quirk numbers are derived from those found in #pvr_device_quirks by 142 + * dropping the 'has_brn' prefix, which is applied by this macro. 143 + * 144 + * Returns 145 + * * true if the quirk is present in the hardware, or 146 + * * false if the quirk is not present in the hardware. 147 + */ 148 + #define PVR_HAS_QUIRK(pvr_dev, quirk) ((pvr_dev)->quirks.has_brn##quirk) 149 + 150 + /** 151 + * PVR_HAS_ENHANCEMENT() - Tests whether a physical device has a given 152 + * enhancement 153 + * @pvr_dev: [IN] Target PowerVR device. 154 + * @enhancement: [IN] Hardware enhancement name. 155 + * 156 + * Enhancement numbers are derived from those found in #pvr_device_enhancements 157 + * by dropping the 'has_ern' prefix, which is applied by this macro. 158 + * 159 + * Returns 160 + * * true if the enhancement is present in the hardware, or 161 + * * false if the enhancement is not present in the hardware. 162 + */ 163 + #define PVR_HAS_ENHANCEMENT(pvr_dev, enhancement) ((pvr_dev)->enhancements.has_ern##enhancement) 164 + 150 165 #define from_pvr_device(pvr_dev) (&(pvr_dev)->base) 151 166 152 167 #define to_pvr_device(drm_dev) container_of_const(drm_dev, struct pvr_device, base) ··· 225 100 226 101 #define to_pvr_file(file) ((file)->driver_priv) 227 102 103 + /** 104 + * PVR_PACKED_BVNC() - Packs B, V, N and C values into a 64-bit unsigned integer 105 + * @b: Branch ID. 106 + * @v: Version ID. 107 + * @n: Number of scalable units. 108 + * @c: Config ID. 109 + * 110 + * The packed layout is as follows: 111 + * 112 + * +--------+--------+--------+-------+ 113 + * | 63..48 | 47..32 | 31..16 | 15..0 | 114 + * +========+========+========+=======+ 115 + * | B | V | N | C | 116 + * +--------+--------+--------+-------+ 117 + * 118 + * pvr_gpu_id_to_packed_bvnc() should be used instead of this macro when a 119 + * &struct pvr_gpu_id is available in order to ensure proper type checking. 120 + * 121 + * Return: Packed BVNC. 122 + */ 123 + /* clang-format off */ 124 + #define PVR_PACKED_BVNC(b, v, n, c) \ 125 + ((((u64)(b) & GENMASK_ULL(15, 0)) << 48) | \ 126 + (((u64)(v) & GENMASK_ULL(15, 0)) << 32) | \ 127 + (((u64)(n) & GENMASK_ULL(15, 0)) << 16) | \ 128 + (((u64)(c) & GENMASK_ULL(15, 0)) << 0)) 129 + /* clang-format on */ 130 + 131 + /** 132 + * pvr_gpu_id_to_packed_bvnc() - Packs B, V, N and C values into a 64-bit 133 + * unsigned integer 134 + * @gpu_id: GPU ID. 135 + * 136 + * The packed layout is as follows: 137 + * 138 + * +--------+--------+--------+-------+ 139 + * | 63..48 | 47..32 | 31..16 | 15..0 | 140 + * +========+========+========+=======+ 141 + * | B | V | N | C | 142 + * +--------+--------+--------+-------+ 143 + * 144 + * This should be used in preference to PVR_PACKED_BVNC() when a &struct 145 + * pvr_gpu_id is available in order to ensure proper type checking. 146 + * 147 + * Return: Packed BVNC. 148 + */ 149 + static __always_inline u64 150 + pvr_gpu_id_to_packed_bvnc(struct pvr_gpu_id *gpu_id) 151 + { 152 + return PVR_PACKED_BVNC(gpu_id->b, gpu_id->v, gpu_id->n, gpu_id->c); 153 + } 154 + 155 + static __always_inline void 156 + packed_bvnc_to_pvr_gpu_id(u64 bvnc, struct pvr_gpu_id *gpu_id) 157 + { 158 + gpu_id->b = (bvnc & GENMASK_ULL(63, 48)) >> 48; 159 + gpu_id->v = (bvnc & GENMASK_ULL(47, 32)) >> 32; 160 + gpu_id->n = (bvnc & GENMASK_ULL(31, 16)) >> 16; 161 + gpu_id->c = bvnc & GENMASK_ULL(15, 0); 162 + } 163 + 228 164 int pvr_device_init(struct pvr_device *pvr_dev); 229 165 void pvr_device_fini(struct pvr_device *pvr_dev); 166 + 167 + bool 168 + pvr_device_has_uapi_quirk(struct pvr_device *pvr_dev, u32 quirk); 169 + bool 170 + pvr_device_has_uapi_enhancement(struct pvr_device *pvr_dev, u32 enhancement); 171 + bool 172 + pvr_device_has_feature(struct pvr_device *pvr_dev, u32 feature); 230 173 231 174 /** 232 175 * PVR_CR_FIELD_GET() - Extract a single field from a PowerVR control register ··· 402 209 } 403 210 404 211 /** 212 + * pvr_round_up_to_cacheline_size() - Round up a provided size to be cacheline 213 + * aligned 214 + * @pvr_dev: Target PowerVR device. 215 + * @size: Initial size, in bytes. 216 + * 217 + * Returns: 218 + * * Size aligned to cacheline size. 219 + */ 220 + static __always_inline size_t 221 + pvr_round_up_to_cacheline_size(struct pvr_device *pvr_dev, size_t size) 222 + { 223 + u16 slc_cacheline_size_bits = 0; 224 + u16 slc_cacheline_size_bytes; 225 + 226 + WARN_ON(!PVR_HAS_FEATURE(pvr_dev, slc_cache_line_size_bits)); 227 + PVR_FEATURE_VALUE(pvr_dev, slc_cache_line_size_bits, 228 + &slc_cacheline_size_bits); 229 + slc_cacheline_size_bytes = slc_cacheline_size_bits / 8; 230 + 231 + return round_up(size, slc_cacheline_size_bytes); 232 + } 233 + 234 + /** 405 235 * DOC: IOCTL validation helpers 406 236 * 407 237 * To validate the constraints imposed on IOCTL argument structs, a collection ··· 517 301 pvr_ioctl_union_padding_check(__instance, __union_offset, \ 518 302 __union_size, __member_size); \ 519 303 }) 304 + 305 + #define PVR_FW_PROCESSOR_TYPE_META 0 306 + #define PVR_FW_PROCESSOR_TYPE_MIPS 1 307 + #define PVR_FW_PROCESSOR_TYPE_RISCV 2 520 308 521 309 #endif /* PVR_DEVICE_H */
+254
drivers/gpu/drm/imagination/pvr_device_info.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only OR MIT 2 + /* Copyright (c) 2023 Imagination Technologies Ltd. */ 3 + 4 + #include "pvr_device.h" 5 + #include "pvr_device_info.h" 6 + #include "pvr_rogue_fwif_dev_info.h" 7 + 8 + #include <drm/drm_print.h> 9 + 10 + #include <linux/bits.h> 11 + #include <linux/minmax.h> 12 + #include <linux/stddef.h> 13 + #include <linux/types.h> 14 + 15 + #define QUIRK_MAPPING(quirk) \ 16 + [PVR_FW_HAS_BRN_##quirk] = offsetof(struct pvr_device, quirks.has_brn##quirk) 17 + 18 + static const uintptr_t quirks_mapping[] = { 19 + QUIRK_MAPPING(44079), 20 + QUIRK_MAPPING(47217), 21 + QUIRK_MAPPING(48492), 22 + QUIRK_MAPPING(48545), 23 + QUIRK_MAPPING(49927), 24 + QUIRK_MAPPING(50767), 25 + QUIRK_MAPPING(51764), 26 + QUIRK_MAPPING(62269), 27 + QUIRK_MAPPING(63142), 28 + QUIRK_MAPPING(63553), 29 + QUIRK_MAPPING(66011), 30 + QUIRK_MAPPING(71242), 31 + }; 32 + 33 + #undef QUIRK_MAPPING 34 + 35 + #define ENHANCEMENT_MAPPING(enhancement) \ 36 + [PVR_FW_HAS_ERN_##enhancement] = offsetof(struct pvr_device, \ 37 + enhancements.has_ern##enhancement) 38 + 39 + static const uintptr_t enhancements_mapping[] = { 40 + ENHANCEMENT_MAPPING(35421), 41 + ENHANCEMENT_MAPPING(38020), 42 + ENHANCEMENT_MAPPING(38748), 43 + ENHANCEMENT_MAPPING(42064), 44 + ENHANCEMENT_MAPPING(42290), 45 + ENHANCEMENT_MAPPING(42606), 46 + ENHANCEMENT_MAPPING(47025), 47 + ENHANCEMENT_MAPPING(57596), 48 + }; 49 + 50 + #undef ENHANCEMENT_MAPPING 51 + 52 + static void pvr_device_info_set_common(struct pvr_device *pvr_dev, const u64 *bitmask, 53 + u32 bitmask_size, const uintptr_t *mapping, u32 mapping_max) 54 + { 55 + const u32 mapping_max_size = (mapping_max + 63) >> 6; 56 + const u32 nr_bits = min(bitmask_size * 64, mapping_max); 57 + 58 + /* Warn if any unsupported values in the bitmask. */ 59 + if (bitmask_size > mapping_max_size) { 60 + if (mapping == quirks_mapping) 61 + drm_warn(from_pvr_device(pvr_dev), "Unsupported quirks in firmware image"); 62 + else 63 + drm_warn(from_pvr_device(pvr_dev), 64 + "Unsupported enhancements in firmware image"); 65 + } else if (bitmask_size == mapping_max_size && (mapping_max & 63)) { 66 + u64 invalid_mask = ~0ull << (mapping_max & 63); 67 + 68 + if (bitmask[bitmask_size - 1] & invalid_mask) { 69 + if (mapping == quirks_mapping) 70 + drm_warn(from_pvr_device(pvr_dev), 71 + "Unsupported quirks in firmware image"); 72 + else 73 + drm_warn(from_pvr_device(pvr_dev), 74 + "Unsupported enhancements in firmware image"); 75 + } 76 + } 77 + 78 + for (u32 i = 0; i < nr_bits; i++) { 79 + if (bitmask[i >> 6] & BIT_ULL(i & 63)) 80 + *(bool *)((u8 *)pvr_dev + mapping[i]) = true; 81 + } 82 + } 83 + 84 + /** 85 + * pvr_device_info_set_quirks() - Set device quirks from device information in firmware 86 + * @pvr_dev: Device pointer. 87 + * @quirks: Pointer to quirks mask in device information. 88 + * @quirks_size: Size of quirks mask, in u64s. 89 + */ 90 + void pvr_device_info_set_quirks(struct pvr_device *pvr_dev, const u64 *quirks, u32 quirks_size) 91 + { 92 + BUILD_BUG_ON(ARRAY_SIZE(quirks_mapping) != PVR_FW_HAS_BRN_MAX); 93 + 94 + pvr_device_info_set_common(pvr_dev, quirks, quirks_size, quirks_mapping, 95 + ARRAY_SIZE(quirks_mapping)); 96 + } 97 + 98 + /** 99 + * pvr_device_info_set_enhancements() - Set device enhancements from device information in firmware 100 + * @pvr_dev: Device pointer. 101 + * @enhancements: Pointer to enhancements mask in device information. 102 + * @enhancements_size: Size of enhancements mask, in u64s. 103 + */ 104 + void pvr_device_info_set_enhancements(struct pvr_device *pvr_dev, const u64 *enhancements, 105 + u32 enhancements_size) 106 + { 107 + BUILD_BUG_ON(ARRAY_SIZE(enhancements_mapping) != PVR_FW_HAS_ERN_MAX); 108 + 109 + pvr_device_info_set_common(pvr_dev, enhancements, enhancements_size, 110 + enhancements_mapping, ARRAY_SIZE(enhancements_mapping)); 111 + } 112 + 113 + #define FEATURE_MAPPING(fw_feature, feature) \ 114 + [PVR_FW_HAS_FEATURE_##fw_feature] = { \ 115 + .flag_offset = offsetof(struct pvr_device, features.has_##feature), \ 116 + .value_offset = 0 \ 117 + } 118 + 119 + #define FEATURE_MAPPING_VALUE(fw_feature, feature) \ 120 + [PVR_FW_HAS_FEATURE_##fw_feature] = { \ 121 + .flag_offset = offsetof(struct pvr_device, features.has_##feature), \ 122 + .value_offset = offsetof(struct pvr_device, features.feature) \ 123 + } 124 + 125 + static const struct { 126 + uintptr_t flag_offset; 127 + uintptr_t value_offset; 128 + } features_mapping[] = { 129 + FEATURE_MAPPING(AXI_ACELITE, axi_acelite), 130 + FEATURE_MAPPING_VALUE(CDM_CONTROL_STREAM_FORMAT, cdm_control_stream_format), 131 + FEATURE_MAPPING(CLUSTER_GROUPING, cluster_grouping), 132 + FEATURE_MAPPING_VALUE(COMMON_STORE_SIZE_IN_DWORDS, common_store_size_in_dwords), 133 + FEATURE_MAPPING(COMPUTE, compute), 134 + FEATURE_MAPPING(COMPUTE_MORTON_CAPABLE, compute_morton_capable), 135 + FEATURE_MAPPING(COMPUTE_OVERLAP, compute_overlap), 136 + FEATURE_MAPPING(COREID_PER_OS, coreid_per_os), 137 + FEATURE_MAPPING(DYNAMIC_DUST_POWER, dynamic_dust_power), 138 + FEATURE_MAPPING_VALUE(ECC_RAMS, ecc_rams), 139 + FEATURE_MAPPING_VALUE(FBCDC, fbcdc), 140 + FEATURE_MAPPING_VALUE(FBCDC_ALGORITHM, fbcdc_algorithm), 141 + FEATURE_MAPPING_VALUE(FBCDC_ARCHITECTURE, fbcdc_architecture), 142 + FEATURE_MAPPING_VALUE(FBC_MAX_DEFAULT_DESCRIPTORS, fbc_max_default_descriptors), 143 + FEATURE_MAPPING_VALUE(FBC_MAX_LARGE_DESCRIPTORS, fbc_max_large_descriptors), 144 + FEATURE_MAPPING(FB_CDC_V4, fb_cdc_v4), 145 + FEATURE_MAPPING(GPU_MULTICORE_SUPPORT, gpu_multicore_support), 146 + FEATURE_MAPPING(GPU_VIRTUALISATION, gpu_virtualisation), 147 + FEATURE_MAPPING(GS_RTA_SUPPORT, gs_rta_support), 148 + FEATURE_MAPPING(IRQ_PER_OS, irq_per_os), 149 + FEATURE_MAPPING_VALUE(ISP_MAX_TILES_IN_FLIGHT, isp_max_tiles_in_flight), 150 + FEATURE_MAPPING_VALUE(ISP_SAMPLES_PER_PIXEL, isp_samples_per_pixel), 151 + FEATURE_MAPPING(ISP_ZLS_D24_S8_PACKING_OGL_MODE, isp_zls_d24_s8_packing_ogl_mode), 152 + FEATURE_MAPPING_VALUE(LAYOUT_MARS, layout_mars), 153 + FEATURE_MAPPING_VALUE(MAX_PARTITIONS, max_partitions), 154 + FEATURE_MAPPING_VALUE(META, meta), 155 + FEATURE_MAPPING_VALUE(META_COREMEM_SIZE, meta_coremem_size), 156 + FEATURE_MAPPING(MIPS, mips), 157 + FEATURE_MAPPING_VALUE(NUM_CLUSTERS, num_clusters), 158 + FEATURE_MAPPING_VALUE(NUM_ISP_IPP_PIPES, num_isp_ipp_pipes), 159 + FEATURE_MAPPING_VALUE(NUM_OSIDS, num_osids), 160 + FEATURE_MAPPING_VALUE(NUM_RASTER_PIPES, num_raster_pipes), 161 + FEATURE_MAPPING(PBE2_IN_XE, pbe2_in_xe), 162 + FEATURE_MAPPING(PBVNC_COREID_REG, pbvnc_coreid_reg), 163 + FEATURE_MAPPING(PERFBUS, perfbus), 164 + FEATURE_MAPPING(PERF_COUNTER_BATCH, perf_counter_batch), 165 + FEATURE_MAPPING_VALUE(PHYS_BUS_WIDTH, phys_bus_width), 166 + FEATURE_MAPPING(RISCV_FW_PROCESSOR, riscv_fw_processor), 167 + FEATURE_MAPPING(ROGUEXE, roguexe), 168 + FEATURE_MAPPING(S7_TOP_INFRASTRUCTURE, s7_top_infrastructure), 169 + FEATURE_MAPPING(SIMPLE_INTERNAL_PARAMETER_FORMAT, simple_internal_parameter_format), 170 + FEATURE_MAPPING(SIMPLE_INTERNAL_PARAMETER_FORMAT_V2, simple_internal_parameter_format_v2), 171 + FEATURE_MAPPING_VALUE(SIMPLE_PARAMETER_FORMAT_VERSION, simple_parameter_format_version), 172 + FEATURE_MAPPING_VALUE(SLC_BANKS, slc_banks), 173 + FEATURE_MAPPING_VALUE(SLC_CACHE_LINE_SIZE_BITS, slc_cache_line_size_bits), 174 + FEATURE_MAPPING(SLC_SIZE_CONFIGURABLE, slc_size_configurable), 175 + FEATURE_MAPPING_VALUE(SLC_SIZE_IN_KILOBYTES, slc_size_in_kilobytes), 176 + FEATURE_MAPPING(SOC_TIMER, soc_timer), 177 + FEATURE_MAPPING(SYS_BUS_SECURE_RESET, sys_bus_secure_reset), 178 + FEATURE_MAPPING(TESSELLATION, tessellation), 179 + FEATURE_MAPPING(TILE_REGION_PROTECTION, tile_region_protection), 180 + FEATURE_MAPPING_VALUE(TILE_SIZE_X, tile_size_x), 181 + FEATURE_MAPPING_VALUE(TILE_SIZE_Y, tile_size_y), 182 + FEATURE_MAPPING(TLA, tla), 183 + FEATURE_MAPPING(TPU_CEM_DATAMASTER_GLOBAL_REGISTERS, tpu_cem_datamaster_global_registers), 184 + FEATURE_MAPPING(TPU_DM_GLOBAL_REGISTERS, tpu_dm_global_registers), 185 + FEATURE_MAPPING(TPU_FILTERING_MODE_CONTROL, tpu_filtering_mode_control), 186 + FEATURE_MAPPING_VALUE(USC_MIN_OUTPUT_REGISTERS_PER_PIX, usc_min_output_registers_per_pix), 187 + FEATURE_MAPPING(VDM_DRAWINDIRECT, vdm_drawindirect), 188 + FEATURE_MAPPING(VDM_OBJECT_LEVEL_LLS, vdm_object_level_lls), 189 + FEATURE_MAPPING_VALUE(VIRTUAL_ADDRESS_SPACE_BITS, virtual_address_space_bits), 190 + FEATURE_MAPPING(WATCHDOG_TIMER, watchdog_timer), 191 + FEATURE_MAPPING(WORKGROUP_PROTECTION, workgroup_protection), 192 + FEATURE_MAPPING_VALUE(XE_ARCHITECTURE, xe_architecture), 193 + FEATURE_MAPPING(XE_MEMORY_HIERARCHY, xe_memory_hierarchy), 194 + FEATURE_MAPPING(XE_TPU2, xe_tpu2), 195 + FEATURE_MAPPING_VALUE(XPU_MAX_REGBANKS_ADDR_WIDTH, xpu_max_regbanks_addr_width), 196 + FEATURE_MAPPING_VALUE(XPU_MAX_SLAVES, xpu_max_slaves), 197 + FEATURE_MAPPING_VALUE(XPU_REGISTER_BROADCAST, xpu_register_broadcast), 198 + FEATURE_MAPPING(XT_TOP_INFRASTRUCTURE, xt_top_infrastructure), 199 + FEATURE_MAPPING(ZLS_SUBTILE, zls_subtile), 200 + }; 201 + 202 + #undef FEATURE_MAPPING_VALUE 203 + #undef FEATURE_MAPPING 204 + 205 + /** 206 + * pvr_device_info_set_features() - Set device features from device information in firmware 207 + * @pvr_dev: Device pointer. 208 + * @features: Pointer to features mask in device information. 209 + * @features_size: Size of features mask, in u64s. 210 + * @feature_param_size: Size of feature parameters, in u64s. 211 + * 212 + * Returns: 213 + * * 0 on success, or 214 + * * -%EINVAL on malformed stream. 215 + */ 216 + int pvr_device_info_set_features(struct pvr_device *pvr_dev, const u64 *features, u32 features_size, 217 + u32 feature_param_size) 218 + { 219 + const u32 mapping_max = ARRAY_SIZE(features_mapping); 220 + const u32 mapping_max_size = (mapping_max + 63) >> 6; 221 + const u32 nr_bits = min(features_size * 64, mapping_max); 222 + const u64 *feature_params = features + features_size; 223 + u32 param_idx = 0; 224 + 225 + BUILD_BUG_ON(ARRAY_SIZE(features_mapping) != PVR_FW_HAS_FEATURE_MAX); 226 + 227 + /* Verify no unsupported values in the bitmask. */ 228 + if (features_size > mapping_max_size) { 229 + drm_warn(from_pvr_device(pvr_dev), "Unsupported features in firmware image"); 230 + } else if (features_size == mapping_max_size && (mapping_max & 63)) { 231 + u64 invalid_mask = ~0ull << (mapping_max & 63); 232 + 233 + if (features[features_size - 1] & invalid_mask) 234 + drm_warn(from_pvr_device(pvr_dev), 235 + "Unsupported features in firmware image"); 236 + } 237 + 238 + for (u32 i = 0; i < nr_bits; i++) { 239 + if (features[i >> 6] & BIT_ULL(i & 63)) { 240 + *(bool *)((u8 *)pvr_dev + features_mapping[i].flag_offset) = true; 241 + 242 + if (features_mapping[i].value_offset) { 243 + if (param_idx >= feature_param_size) 244 + return -EINVAL; 245 + 246 + *(u64 *)((u8 *)pvr_dev + features_mapping[i].value_offset) = 247 + feature_params[param_idx]; 248 + param_idx++; 249 + } 250 + } 251 + } 252 + 253 + return 0; 254 + }
+186
drivers/gpu/drm/imagination/pvr_device_info.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only OR MIT */ 2 + /* Copyright (c) 2023 Imagination Technologies Ltd. */ 3 + 4 + #ifndef PVR_DEVICE_INFO_H 5 + #define PVR_DEVICE_INFO_H 6 + 7 + #include <linux/types.h> 8 + 9 + struct pvr_device; 10 + 11 + /* 12 + * struct pvr_device_features - Hardware feature information 13 + */ 14 + struct pvr_device_features { 15 + bool has_axi_acelite; 16 + bool has_cdm_control_stream_format; 17 + bool has_cluster_grouping; 18 + bool has_common_store_size_in_dwords; 19 + bool has_compute; 20 + bool has_compute_morton_capable; 21 + bool has_compute_overlap; 22 + bool has_coreid_per_os; 23 + bool has_dynamic_dust_power; 24 + bool has_ecc_rams; 25 + bool has_fb_cdc_v4; 26 + bool has_fbc_max_default_descriptors; 27 + bool has_fbc_max_large_descriptors; 28 + bool has_fbcdc; 29 + bool has_fbcdc_algorithm; 30 + bool has_fbcdc_architecture; 31 + bool has_gpu_multicore_support; 32 + bool has_gpu_virtualisation; 33 + bool has_gs_rta_support; 34 + bool has_irq_per_os; 35 + bool has_isp_max_tiles_in_flight; 36 + bool has_isp_samples_per_pixel; 37 + bool has_isp_zls_d24_s8_packing_ogl_mode; 38 + bool has_layout_mars; 39 + bool has_max_partitions; 40 + bool has_meta; 41 + bool has_meta_coremem_size; 42 + bool has_mips; 43 + bool has_num_clusters; 44 + bool has_num_isp_ipp_pipes; 45 + bool has_num_osids; 46 + bool has_num_raster_pipes; 47 + bool has_pbe2_in_xe; 48 + bool has_pbvnc_coreid_reg; 49 + bool has_perfbus; 50 + bool has_perf_counter_batch; 51 + bool has_phys_bus_width; 52 + bool has_riscv_fw_processor; 53 + bool has_roguexe; 54 + bool has_s7_top_infrastructure; 55 + bool has_simple_internal_parameter_format; 56 + bool has_simple_internal_parameter_format_v2; 57 + bool has_simple_parameter_format_version; 58 + bool has_slc_banks; 59 + bool has_slc_cache_line_size_bits; 60 + bool has_slc_size_configurable; 61 + bool has_slc_size_in_kilobytes; 62 + bool has_soc_timer; 63 + bool has_sys_bus_secure_reset; 64 + bool has_tessellation; 65 + bool has_tile_region_protection; 66 + bool has_tile_size_x; 67 + bool has_tile_size_y; 68 + bool has_tla; 69 + bool has_tpu_cem_datamaster_global_registers; 70 + bool has_tpu_dm_global_registers; 71 + bool has_tpu_filtering_mode_control; 72 + bool has_usc_min_output_registers_per_pix; 73 + bool has_vdm_drawindirect; 74 + bool has_vdm_object_level_lls; 75 + bool has_virtual_address_space_bits; 76 + bool has_watchdog_timer; 77 + bool has_workgroup_protection; 78 + bool has_xe_architecture; 79 + bool has_xe_memory_hierarchy; 80 + bool has_xe_tpu2; 81 + bool has_xpu_max_regbanks_addr_width; 82 + bool has_xpu_max_slaves; 83 + bool has_xpu_register_broadcast; 84 + bool has_xt_top_infrastructure; 85 + bool has_zls_subtile; 86 + 87 + u64 cdm_control_stream_format; 88 + u64 common_store_size_in_dwords; 89 + u64 ecc_rams; 90 + u64 fbc_max_default_descriptors; 91 + u64 fbc_max_large_descriptors; 92 + u64 fbcdc; 93 + u64 fbcdc_algorithm; 94 + u64 fbcdc_architecture; 95 + u64 isp_max_tiles_in_flight; 96 + u64 isp_samples_per_pixel; 97 + u64 layout_mars; 98 + u64 max_partitions; 99 + u64 meta; 100 + u64 meta_coremem_size; 101 + u64 num_clusters; 102 + u64 num_isp_ipp_pipes; 103 + u64 num_osids; 104 + u64 num_raster_pipes; 105 + u64 phys_bus_width; 106 + u64 simple_parameter_format_version; 107 + u64 slc_banks; 108 + u64 slc_cache_line_size_bits; 109 + u64 slc_size_in_kilobytes; 110 + u64 tile_size_x; 111 + u64 tile_size_y; 112 + u64 usc_min_output_registers_per_pix; 113 + u64 virtual_address_space_bits; 114 + u64 xe_architecture; 115 + u64 xpu_max_regbanks_addr_width; 116 + u64 xpu_max_slaves; 117 + u64 xpu_register_broadcast; 118 + }; 119 + 120 + /* 121 + * struct pvr_device_quirks - Hardware quirk information 122 + */ 123 + struct pvr_device_quirks { 124 + bool has_brn44079; 125 + bool has_brn47217; 126 + bool has_brn48492; 127 + bool has_brn48545; 128 + bool has_brn49927; 129 + bool has_brn50767; 130 + bool has_brn51764; 131 + bool has_brn62269; 132 + bool has_brn63142; 133 + bool has_brn63553; 134 + bool has_brn66011; 135 + bool has_brn71242; 136 + }; 137 + 138 + /* 139 + * struct pvr_device_enhancements - Hardware enhancement information 140 + */ 141 + struct pvr_device_enhancements { 142 + bool has_ern35421; 143 + bool has_ern38020; 144 + bool has_ern38748; 145 + bool has_ern42064; 146 + bool has_ern42290; 147 + bool has_ern42606; 148 + bool has_ern47025; 149 + bool has_ern57596; 150 + }; 151 + 152 + void pvr_device_info_set_quirks(struct pvr_device *pvr_dev, const u64 *bitmask, 153 + u32 bitmask_len); 154 + void pvr_device_info_set_enhancements(struct pvr_device *pvr_dev, const u64 *bitmask, 155 + u32 bitmask_len); 156 + int pvr_device_info_set_features(struct pvr_device *pvr_dev, const u64 *features, u32 features_size, 157 + u32 feature_param_size); 158 + 159 + /* 160 + * Meta cores 161 + * 162 + * These are the values for the 'meta' feature when the feature is present 163 + * (as per &struct pvr_device_features)/ 164 + */ 165 + #define PVR_META_MTP218 (1) 166 + #define PVR_META_MTP219 (2) 167 + #define PVR_META_LTP218 (3) 168 + #define PVR_META_LTP217 (4) 169 + 170 + enum { 171 + PVR_FEATURE_CDM_USER_MODE_QUEUE, 172 + PVR_FEATURE_CLUSTER_GROUPING, 173 + PVR_FEATURE_COMPUTE_MORTON_CAPABLE, 174 + PVR_FEATURE_FB_CDC_V4, 175 + PVR_FEATURE_GPU_MULTICORE_SUPPORT, 176 + PVR_FEATURE_ISP_ZLS_D24_S8_PACKING_OGL_MODE, 177 + PVR_FEATURE_REQUIRES_FB_CDC_ZLS_SETUP, 178 + PVR_FEATURE_S7_TOP_INFRASTRUCTURE, 179 + PVR_FEATURE_TESSELLATION, 180 + PVR_FEATURE_TPU_DM_GLOBAL_REGISTERS, 181 + PVR_FEATURE_VDM_DRAWINDIRECT, 182 + PVR_FEATURE_VDM_OBJECT_LEVEL_LLS, 183 + PVR_FEATURE_ZLS_SUBTILE, 184 + }; 185 + 186 + #endif /* PVR_DEVICE_INFO_H */
+520 -1
drivers/gpu/drm/imagination/pvr_drv.c
··· 3 3 4 4 #include "pvr_device.h" 5 5 #include "pvr_drv.h" 6 + #include "pvr_rogue_defs.h" 7 + #include "pvr_rogue_fwif_client.h" 8 + #include "pvr_rogue_fwif_shared.h" 6 9 7 10 #include <uapi/drm/pvr_drm.h> 8 11 ··· 90 87 return -ENOTTY; 91 88 } 92 89 90 + static __always_inline u64 91 + pvr_fw_version_packed(u32 major, u32 minor) 92 + { 93 + return ((u64)major << 32) | minor; 94 + } 95 + 96 + static u32 97 + rogue_get_common_store_partition_space_size(struct pvr_device *pvr_dev) 98 + { 99 + u32 max_partitions = 0; 100 + u32 tile_size_x = 0; 101 + u32 tile_size_y = 0; 102 + 103 + PVR_FEATURE_VALUE(pvr_dev, tile_size_x, &tile_size_x); 104 + PVR_FEATURE_VALUE(pvr_dev, tile_size_y, &tile_size_y); 105 + PVR_FEATURE_VALUE(pvr_dev, max_partitions, &max_partitions); 106 + 107 + if (tile_size_x == 16 && tile_size_y == 16) { 108 + u32 usc_min_output_registers_per_pix = 0; 109 + 110 + PVR_FEATURE_VALUE(pvr_dev, usc_min_output_registers_per_pix, 111 + &usc_min_output_registers_per_pix); 112 + 113 + return tile_size_x * tile_size_y * max_partitions * 114 + usc_min_output_registers_per_pix; 115 + } 116 + 117 + return max_partitions * 1024; 118 + } 119 + 120 + static u32 121 + rogue_get_common_store_alloc_region_size(struct pvr_device *pvr_dev) 122 + { 123 + u32 common_store_size_in_dwords = 512 * 4 * 4; 124 + u32 alloc_region_size; 125 + 126 + PVR_FEATURE_VALUE(pvr_dev, common_store_size_in_dwords, &common_store_size_in_dwords); 127 + 128 + alloc_region_size = common_store_size_in_dwords - (256U * 4U) - 129 + rogue_get_common_store_partition_space_size(pvr_dev); 130 + 131 + if (PVR_HAS_QUIRK(pvr_dev, 44079)) { 132 + u32 common_store_split_point = (768U * 4U * 4U); 133 + 134 + return min(common_store_split_point - (256U * 4U), alloc_region_size); 135 + } 136 + 137 + return alloc_region_size; 138 + } 139 + 140 + static inline u32 141 + rogue_get_num_phantoms(struct pvr_device *pvr_dev) 142 + { 143 + u32 num_clusters = 1; 144 + 145 + PVR_FEATURE_VALUE(pvr_dev, num_clusters, &num_clusters); 146 + 147 + return ROGUE_REQ_NUM_PHANTOMS(num_clusters); 148 + } 149 + 150 + static inline u32 151 + rogue_get_max_coeffs(struct pvr_device *pvr_dev) 152 + { 153 + u32 max_coeff_additional_portion = ROGUE_MAX_VERTEX_SHARED_REGISTERS; 154 + u32 pending_allocation_shared_regs = 2U * 1024U; 155 + u32 pending_allocation_coeff_regs = 0U; 156 + u32 num_phantoms = rogue_get_num_phantoms(pvr_dev); 157 + u32 tiles_in_flight = 0; 158 + u32 max_coeff_pixel_portion; 159 + 160 + PVR_FEATURE_VALUE(pvr_dev, isp_max_tiles_in_flight, &tiles_in_flight); 161 + max_coeff_pixel_portion = DIV_ROUND_UP(tiles_in_flight, num_phantoms); 162 + max_coeff_pixel_portion *= ROGUE_MAX_PIXEL_SHARED_REGISTERS; 163 + 164 + /* 165 + * Compute tasks on cores with BRN48492 and without compute overlap may lock 166 + * up without two additional lines of coeffs. 167 + */ 168 + if (PVR_HAS_QUIRK(pvr_dev, 48492) && !PVR_HAS_FEATURE(pvr_dev, compute_overlap)) 169 + pending_allocation_coeff_regs = 2U * 1024U; 170 + 171 + if (PVR_HAS_ENHANCEMENT(pvr_dev, 38748)) 172 + pending_allocation_shared_regs = 0; 173 + 174 + if (PVR_HAS_ENHANCEMENT(pvr_dev, 38020)) 175 + max_coeff_additional_portion += ROGUE_MAX_COMPUTE_SHARED_REGISTERS; 176 + 177 + return rogue_get_common_store_alloc_region_size(pvr_dev) + pending_allocation_coeff_regs - 178 + (max_coeff_pixel_portion + max_coeff_additional_portion + 179 + pending_allocation_shared_regs); 180 + } 181 + 182 + static inline u32 183 + rogue_get_cdm_max_local_mem_size_regs(struct pvr_device *pvr_dev) 184 + { 185 + u32 available_coeffs_in_dwords = rogue_get_max_coeffs(pvr_dev); 186 + 187 + if (PVR_HAS_QUIRK(pvr_dev, 48492) && PVR_HAS_FEATURE(pvr_dev, roguexe) && 188 + !PVR_HAS_FEATURE(pvr_dev, compute_overlap)) { 189 + /* Driver must not use the 2 reserved lines. */ 190 + available_coeffs_in_dwords -= ROGUE_CSRM_LINE_SIZE_IN_DWORDS * 2; 191 + } 192 + 193 + /* 194 + * The maximum amount of local memory available to a kernel is the minimum 195 + * of the total number of coefficient registers available and the max common 196 + * store allocation size which can be made by the CDM. 197 + * 198 + * If any coeff lines are reserved for tessellation or pixel then we need to 199 + * subtract those too. 200 + */ 201 + return min(available_coeffs_in_dwords, (u32)ROGUE_MAX_PER_KERNEL_LOCAL_MEM_SIZE_REGS); 202 + } 203 + 204 + /** 205 + * pvr_dev_query_gpu_info_get() 206 + * @pvr_dev: Device pointer. 207 + * @args: [IN] Device query arguments containing a pointer to a userspace 208 + * struct drm_pvr_dev_query_gpu_info. 209 + * 210 + * If the query object pointer is NULL, the size field is updated with the 211 + * expected size of the query object. 212 + * 213 + * Returns: 214 + * * 0 on success, or if size is requested using a NULL pointer, or 215 + * * -%E2BIG if the indicated length of the allocation is less than is 216 + * required to contain the copied data, or 217 + * * -%EFAULT if local memory could not be copied to userspace. 218 + */ 219 + static int 220 + pvr_dev_query_gpu_info_get(struct pvr_device *pvr_dev, 221 + struct drm_pvr_ioctl_dev_query_args *args) 222 + { 223 + struct drm_pvr_dev_query_gpu_info gpu_info = {0}; 224 + int err; 225 + 226 + if (!args->pointer) { 227 + args->size = sizeof(struct drm_pvr_dev_query_gpu_info); 228 + return 0; 229 + } 230 + 231 + gpu_info.gpu_id = 232 + pvr_gpu_id_to_packed_bvnc(&pvr_dev->gpu_id); 233 + gpu_info.num_phantoms = rogue_get_num_phantoms(pvr_dev); 234 + 235 + err = PVR_UOBJ_SET(args->pointer, args->size, gpu_info); 236 + if (err < 0) 237 + return err; 238 + 239 + if (args->size > sizeof(gpu_info)) 240 + args->size = sizeof(gpu_info); 241 + return 0; 242 + } 243 + 244 + /** 245 + * pvr_dev_query_runtime_info_get() 246 + * @pvr_dev: Device pointer. 247 + * @args: [IN] Device query arguments containing a pointer to a userspace 248 + * struct drm_pvr_dev_query_runtime_info. 249 + * 250 + * If the query object pointer is NULL, the size field is updated with the 251 + * expected size of the query object. 252 + * 253 + * Returns: 254 + * * 0 on success, or if size is requested using a NULL pointer, or 255 + * * -%E2BIG if the indicated length of the allocation is less than is 256 + * required to contain the copied data, or 257 + * * -%EFAULT if local memory could not be copied to userspace. 258 + */ 259 + static int 260 + pvr_dev_query_runtime_info_get(struct pvr_device *pvr_dev, 261 + struct drm_pvr_ioctl_dev_query_args *args) 262 + { 263 + struct drm_pvr_dev_query_runtime_info runtime_info = {0}; 264 + int err; 265 + 266 + if (!args->pointer) { 267 + args->size = sizeof(struct drm_pvr_dev_query_runtime_info); 268 + return 0; 269 + } 270 + 271 + runtime_info.free_list_min_pages = 0; /* FIXME */ 272 + runtime_info.free_list_max_pages = 273 + ROGUE_PM_MAX_FREELIST_SIZE / ROGUE_PM_PAGE_SIZE; 274 + runtime_info.common_store_alloc_region_size = 275 + rogue_get_common_store_alloc_region_size(pvr_dev); 276 + runtime_info.common_store_partition_space_size = 277 + rogue_get_common_store_partition_space_size(pvr_dev); 278 + runtime_info.max_coeffs = rogue_get_max_coeffs(pvr_dev); 279 + runtime_info.cdm_max_local_mem_size_regs = 280 + rogue_get_cdm_max_local_mem_size_regs(pvr_dev); 281 + 282 + err = PVR_UOBJ_SET(args->pointer, args->size, runtime_info); 283 + if (err < 0) 284 + return err; 285 + 286 + if (args->size > sizeof(runtime_info)) 287 + args->size = sizeof(runtime_info); 288 + return 0; 289 + } 290 + 291 + /** 292 + * pvr_dev_query_quirks_get() - Unpack array of quirks at the address given 293 + * in a struct drm_pvr_dev_query_quirks, or gets the amount of space required 294 + * for it. 295 + * @pvr_dev: Device pointer. 296 + * @args: [IN] Device query arguments containing a pointer to a userspace 297 + * struct drm_pvr_dev_query_query_quirks. 298 + * 299 + * If the query object pointer is NULL, the size field is updated with the 300 + * expected size of the query object. 301 + * If the userspace pointer in the query object is NULL, or the count is 302 + * short, no data is copied. 303 + * The count field will be updated to that copied, or if either pointer is 304 + * NULL, that which would have been copied. 305 + * The size field in the query object will be updated to the size copied. 306 + * 307 + * Returns: 308 + * * 0 on success, or if size/count is requested using a NULL pointer, or 309 + * * -%EINVAL if args contained non-zero reserved fields, or 310 + * * -%E2BIG if the indicated length of the allocation is less than is 311 + * required to contain the copied data, or 312 + * * -%EFAULT if local memory could not be copied to userspace. 313 + */ 314 + static int 315 + pvr_dev_query_quirks_get(struct pvr_device *pvr_dev, 316 + struct drm_pvr_ioctl_dev_query_args *args) 317 + { 318 + /* 319 + * @FIXME - hardcoding of numbers here is intended as an 320 + * intermediate step so the UAPI can be fixed, but requires a 321 + * a refactor in the future to store them in a more appropriate 322 + * location 323 + */ 324 + static const u32 umd_quirks_musthave[] = { 325 + 47217, 326 + 49927, 327 + 62269, 328 + }; 329 + static const u32 umd_quirks[] = { 330 + 48545, 331 + 51764, 332 + }; 333 + struct drm_pvr_dev_query_quirks query; 334 + u32 out[ARRAY_SIZE(umd_quirks_musthave) + ARRAY_SIZE(umd_quirks)]; 335 + size_t out_musthave_count = 0; 336 + size_t out_count = 0; 337 + int err; 338 + 339 + if (!args->pointer) { 340 + args->size = sizeof(struct drm_pvr_dev_query_quirks); 341 + return 0; 342 + } 343 + 344 + err = PVR_UOBJ_GET(query, args->size, args->pointer); 345 + 346 + if (err < 0) 347 + return err; 348 + if (query._padding_c) 349 + return -EINVAL; 350 + 351 + for (int i = 0; i < ARRAY_SIZE(umd_quirks_musthave); i++) { 352 + if (pvr_device_has_uapi_quirk(pvr_dev, umd_quirks_musthave[i])) { 353 + out[out_count++] = umd_quirks_musthave[i]; 354 + out_musthave_count++; 355 + } 356 + } 357 + 358 + for (int i = 0; i < ARRAY_SIZE(umd_quirks); i++) { 359 + if (pvr_device_has_uapi_quirk(pvr_dev, umd_quirks[i])) 360 + out[out_count++] = umd_quirks[i]; 361 + } 362 + 363 + if (!query.quirks) 364 + goto copy_out; 365 + if (query.count < out_count) 366 + return -E2BIG; 367 + 368 + if (copy_to_user(u64_to_user_ptr(query.quirks), out, 369 + out_count * sizeof(u32))) { 370 + return -EFAULT; 371 + } 372 + 373 + query.musthave_count = out_musthave_count; 374 + 375 + copy_out: 376 + query.count = out_count; 377 + err = PVR_UOBJ_SET(args->pointer, args->size, query); 378 + if (err < 0) 379 + return err; 380 + 381 + args->size = sizeof(query); 382 + return 0; 383 + } 384 + 385 + /** 386 + * pvr_dev_query_enhancements_get() - Unpack array of enhancements at the 387 + * address given in a struct drm_pvr_dev_query_enhancements, or gets the amount 388 + * of space required for it. 389 + * @pvr_dev: Device pointer. 390 + * @args: [IN] Device query arguments containing a pointer to a userspace 391 + * struct drm_pvr_dev_query_enhancements. 392 + * 393 + * If the query object pointer is NULL, the size field is updated with the 394 + * expected size of the query object. 395 + * If the userspace pointer in the query object is NULL, or the count is 396 + * short, no data is copied. 397 + * The count field will be updated to that copied, or if either pointer is 398 + * NULL, that which would have been copied. 399 + * The size field in the query object will be updated to the size copied. 400 + * 401 + * Returns: 402 + * * 0 on success, or if size/count is requested using a NULL pointer, or 403 + * * -%EINVAL if args contained non-zero reserved fields, or 404 + * * -%E2BIG if the indicated length of the allocation is less than is 405 + * required to contain the copied data, or 406 + * * -%EFAULT if local memory could not be copied to userspace. 407 + */ 408 + static int 409 + pvr_dev_query_enhancements_get(struct pvr_device *pvr_dev, 410 + struct drm_pvr_ioctl_dev_query_args *args) 411 + { 412 + /* 413 + * @FIXME - hardcoding of numbers here is intended as an 414 + * intermediate step so the UAPI can be fixed, but requires a 415 + * a refactor in the future to store them in a more appropriate 416 + * location 417 + */ 418 + const u32 umd_enhancements[] = { 419 + 35421, 420 + 42064, 421 + }; 422 + struct drm_pvr_dev_query_enhancements query; 423 + u32 out[ARRAY_SIZE(umd_enhancements)]; 424 + size_t out_idx = 0; 425 + int err; 426 + 427 + if (!args->pointer) { 428 + args->size = sizeof(struct drm_pvr_dev_query_enhancements); 429 + return 0; 430 + } 431 + 432 + err = PVR_UOBJ_GET(query, args->size, args->pointer); 433 + 434 + if (err < 0) 435 + return err; 436 + if (query._padding_a) 437 + return -EINVAL; 438 + if (query._padding_c) 439 + return -EINVAL; 440 + 441 + for (int i = 0; i < ARRAY_SIZE(umd_enhancements); i++) { 442 + if (pvr_device_has_uapi_enhancement(pvr_dev, umd_enhancements[i])) 443 + out[out_idx++] = umd_enhancements[i]; 444 + } 445 + 446 + if (!query.enhancements) 447 + goto copy_out; 448 + if (query.count < out_idx) 449 + return -E2BIG; 450 + 451 + if (copy_to_user(u64_to_user_ptr(query.enhancements), out, 452 + out_idx * sizeof(u32))) { 453 + return -EFAULT; 454 + } 455 + 456 + copy_out: 457 + query.count = out_idx; 458 + err = PVR_UOBJ_SET(args->pointer, args->size, query); 459 + if (err < 0) 460 + return err; 461 + 462 + args->size = sizeof(query); 463 + return 0; 464 + } 465 + 93 466 /** 94 467 * pvr_ioctl_dev_query() - IOCTL to copy information about a device 95 468 * @drm_dev: [IN] DRM device. ··· 490 111 pvr_ioctl_dev_query(struct drm_device *drm_dev, void *raw_args, 491 112 struct drm_file *file) 492 113 { 493 - return -ENOTTY; 114 + struct pvr_device *pvr_dev = to_pvr_device(drm_dev); 115 + struct drm_pvr_ioctl_dev_query_args *args = raw_args; 116 + int idx; 117 + int ret = -EINVAL; 118 + 119 + if (!drm_dev_enter(drm_dev, &idx)) 120 + return -EIO; 121 + 122 + switch ((enum drm_pvr_dev_query)args->type) { 123 + case DRM_PVR_DEV_QUERY_GPU_INFO_GET: 124 + ret = pvr_dev_query_gpu_info_get(pvr_dev, args); 125 + break; 126 + 127 + case DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET: 128 + ret = pvr_dev_query_runtime_info_get(pvr_dev, args); 129 + break; 130 + 131 + case DRM_PVR_DEV_QUERY_QUIRKS_GET: 132 + ret = pvr_dev_query_quirks_get(pvr_dev, args); 133 + break; 134 + 135 + case DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET: 136 + ret = pvr_dev_query_enhancements_get(pvr_dev, args); 137 + break; 138 + 139 + case DRM_PVR_DEV_QUERY_HEAP_INFO_GET: 140 + return -EINVAL; 141 + 142 + case DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET: 143 + return -EINVAL; 144 + } 145 + 146 + drm_dev_exit(idx); 147 + 148 + return ret; 494 149 } 495 150 496 151 /** ··· 760 347 struct drm_file *file) 761 348 { 762 349 return -ENOTTY; 350 + } 351 + 352 + int 353 + pvr_get_uobj(u64 usr_ptr, u32 usr_stride, u32 min_stride, u32 obj_size, void *out) 354 + { 355 + if (usr_stride < min_stride) 356 + return -EINVAL; 357 + 358 + return copy_struct_from_user(out, obj_size, u64_to_user_ptr(usr_ptr), usr_stride); 359 + } 360 + 361 + int 362 + pvr_set_uobj(u64 usr_ptr, u32 usr_stride, u32 min_stride, u32 obj_size, const void *in) 363 + { 364 + if (usr_stride < min_stride) 365 + return -EINVAL; 366 + 367 + if (copy_to_user(u64_to_user_ptr(usr_ptr), in, min_t(u32, usr_stride, obj_size))) 368 + return -EFAULT; 369 + 370 + if (usr_stride > obj_size && 371 + clear_user(u64_to_user_ptr(usr_ptr + obj_size), usr_stride - obj_size)) { 372 + return -EFAULT; 373 + } 374 + 375 + return 0; 376 + } 377 + 378 + int 379 + pvr_get_uobj_array(const struct drm_pvr_obj_array *in, u32 min_stride, u32 obj_size, void **out) 380 + { 381 + int ret = 0; 382 + void *out_alloc; 383 + 384 + if (in->stride < min_stride) 385 + return -EINVAL; 386 + 387 + if (!in->count) 388 + return 0; 389 + 390 + out_alloc = kvmalloc_array(in->count, obj_size, GFP_KERNEL); 391 + if (!out_alloc) 392 + return -ENOMEM; 393 + 394 + if (obj_size == in->stride) { 395 + if (copy_from_user(out_alloc, u64_to_user_ptr(in->array), 396 + (unsigned long)obj_size * in->count)) 397 + ret = -EFAULT; 398 + } else { 399 + void __user *in_ptr = u64_to_user_ptr(in->array); 400 + void *out_ptr = out_alloc; 401 + 402 + for (u32 i = 0; i < in->count; i++) { 403 + ret = copy_struct_from_user(out_ptr, obj_size, in_ptr, in->stride); 404 + if (ret) 405 + break; 406 + 407 + out_ptr += obj_size; 408 + in_ptr += in->stride; 409 + } 410 + } 411 + 412 + if (ret) { 413 + kvfree(out_alloc); 414 + return ret; 415 + } 416 + 417 + *out = out_alloc; 418 + return 0; 419 + } 420 + 421 + int 422 + pvr_set_uobj_array(const struct drm_pvr_obj_array *out, u32 min_stride, u32 obj_size, 423 + const void *in) 424 + { 425 + if (out->stride < min_stride) 426 + return -EINVAL; 427 + 428 + if (!out->count) 429 + return 0; 430 + 431 + if (obj_size == out->stride) { 432 + if (copy_to_user(u64_to_user_ptr(out->array), in, 433 + (unsigned long)obj_size * out->count)) 434 + return -EFAULT; 435 + } else { 436 + u32 cpy_elem_size = min_t(u32, out->stride, obj_size); 437 + void __user *out_ptr = u64_to_user_ptr(out->array); 438 + const void *in_ptr = in; 439 + 440 + for (u32 i = 0; i < out->count; i++) { 441 + if (copy_to_user(out_ptr, in_ptr, cpy_elem_size)) 442 + return -EFAULT; 443 + 444 + out_ptr += obj_size; 445 + in_ptr += out->stride; 446 + } 447 + 448 + if (out->stride > obj_size && 449 + clear_user(u64_to_user_ptr(out->array + obj_size), 450 + out->stride - obj_size)) { 451 + return -EFAULT; 452 + } 453 + } 454 + 455 + return 0; 763 456 } 764 457 765 458 #define DRM_PVR_IOCTL(_name, _func, _flags) \
+107
drivers/gpu/drm/imagination/pvr_drv.h
··· 19 19 #define PVR_DRIVER_MINOR 0 20 20 #define PVR_DRIVER_PATCHLEVEL 0 21 21 22 + int pvr_get_uobj(u64 usr_ptr, u32 usr_size, u32 min_size, u32 obj_size, void *out); 23 + int pvr_set_uobj(u64 usr_ptr, u32 usr_size, u32 min_size, u32 obj_size, const void *in); 24 + int pvr_get_uobj_array(const struct drm_pvr_obj_array *in, u32 min_stride, u32 obj_size, 25 + void **out); 26 + int pvr_set_uobj_array(const struct drm_pvr_obj_array *out, u32 min_stride, u32 obj_size, 27 + const void *in); 28 + 29 + #define PVR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field) \ 30 + (offsetof(_typename, _last_mandatory_field) + \ 31 + sizeof(((_typename *)NULL)->_last_mandatory_field)) 32 + 33 + /* NOLINTBEGIN(bugprone-macro-parentheses) */ 34 + #define PVR_UOBJ_DECL(_typename, _last_mandatory_field) \ 35 + , _typename : PVR_UOBJ_MIN_SIZE_INTERNAL(_typename, _last_mandatory_field) 36 + /* NOLINTEND(bugprone-macro-parentheses) */ 37 + 38 + /** 39 + * DOC: PVR user objects. 40 + * 41 + * Macros used to aid copying structured and array data to and from 42 + * userspace. Objects can differ in size, provided the minimum size 43 + * allowed is specified (using the last mandatory field in the struct). 44 + * All types used with PVR_UOBJ_GET/SET macros must be listed here under 45 + * PVR_UOBJ_MIN_SIZE, with the last mandatory struct field specified. 46 + */ 47 + 48 + /** 49 + * PVR_UOBJ_MIN_SIZE() - Fetch the minimum copy size of a compatible type object. 50 + * @_obj_name: The name of the object. Cannot be a typename - this is deduced. 51 + * 52 + * This cannot fail. Using the macro with an incompatible type will result in a 53 + * compiler error. 54 + * 55 + * To add compatibility for a type, list it within the macro in an orderly 56 + * fashion. The second argument is the name of the last mandatory field of the 57 + * struct type, which is used to calculate the size. See also PVR_UOBJ_DECL(). 58 + * 59 + * Return: The minimum copy size. 60 + */ 61 + #define PVR_UOBJ_MIN_SIZE(_obj_name) _Generic(_obj_name \ 62 + PVR_UOBJ_DECL(struct drm_pvr_job, hwrt) \ 63 + PVR_UOBJ_DECL(struct drm_pvr_sync_op, value) \ 64 + PVR_UOBJ_DECL(struct drm_pvr_dev_query_gpu_info, num_phantoms) \ 65 + PVR_UOBJ_DECL(struct drm_pvr_dev_query_runtime_info, cdm_max_local_mem_size_regs) \ 66 + PVR_UOBJ_DECL(struct drm_pvr_dev_query_quirks, _padding_c) \ 67 + PVR_UOBJ_DECL(struct drm_pvr_dev_query_enhancements, _padding_c) \ 68 + PVR_UOBJ_DECL(struct drm_pvr_heap, page_size_log2) \ 69 + PVR_UOBJ_DECL(struct drm_pvr_dev_query_heap_info, heaps) \ 70 + PVR_UOBJ_DECL(struct drm_pvr_static_data_area, offset) \ 71 + PVR_UOBJ_DECL(struct drm_pvr_dev_query_static_data_areas, static_data_areas) \ 72 + ) 73 + 74 + /** 75 + * PVR_UOBJ_GET() - Copies from _src_usr_ptr to &_dest_obj. 76 + * @_dest_obj: The destination container object in kernel space. 77 + * @_usr_size: The size of the source container in user space. 78 + * @_src_usr_ptr: __u64 raw pointer to the source container in user space. 79 + * 80 + * Return: Error code. See pvr_get_uobj(). 81 + */ 82 + #define PVR_UOBJ_GET(_dest_obj, _usr_size, _src_usr_ptr) \ 83 + pvr_get_uobj(_src_usr_ptr, _usr_size, \ 84 + PVR_UOBJ_MIN_SIZE(_dest_obj), \ 85 + sizeof(_dest_obj), &(_dest_obj)) 86 + 87 + /** 88 + * PVR_UOBJ_SET() - Copies from &_src_obj to _dest_usr_ptr. 89 + * @_dest_usr_ptr: __u64 raw pointer to the destination container in user space. 90 + * @_usr_size: The size of the destination container in user space. 91 + * @_src_obj: The source container object in kernel space. 92 + * 93 + * Return: Error code. See pvr_set_uobj(). 94 + */ 95 + #define PVR_UOBJ_SET(_dest_usr_ptr, _usr_size, _src_obj) \ 96 + pvr_set_uobj(_dest_usr_ptr, _usr_size, \ 97 + PVR_UOBJ_MIN_SIZE(_src_obj), \ 98 + sizeof(_src_obj), &(_src_obj)) 99 + 100 + /** 101 + * PVR_UOBJ_GET_ARRAY() - Copies from @_src_drm_pvr_obj_array.array to 102 + * alloced memory and returns a pointer in _dest_array. 103 + * @_dest_array: The destination C array object in kernel space. 104 + * @_src_drm_pvr_obj_array: The &struct drm_pvr_obj_array containing a __u64 raw 105 + * pointer to the source C array in user space and the size of each array 106 + * element in user space (the 'stride'). 107 + * 108 + * Return: Error code. See pvr_get_uobj_array(). 109 + */ 110 + #define PVR_UOBJ_GET_ARRAY(_dest_array, _src_drm_pvr_obj_array) \ 111 + pvr_get_uobj_array(_src_drm_pvr_obj_array, \ 112 + PVR_UOBJ_MIN_SIZE((_dest_array)[0]), \ 113 + sizeof((_dest_array)[0]), (void **)&(_dest_array)) 114 + 115 + /** 116 + * PVR_UOBJ_SET_ARRAY() - Copies from _src_array to @_dest_drm_pvr_obj_array.array. 117 + * @_dest_drm_pvr_obj_array: The &struct drm_pvr_obj_array containing a __u64 raw 118 + * pointer to the destination C array in user space and the size of each array 119 + * element in user space (the 'stride'). 120 + * @_src_array: The source C array object in kernel space. 121 + * 122 + * Return: Error code. See pvr_set_uobj_array(). 123 + */ 124 + #define PVR_UOBJ_SET_ARRAY(_dest_drm_pvr_obj_array, _src_array) \ 125 + pvr_set_uobj_array(_dest_drm_pvr_obj_array, \ 126 + PVR_UOBJ_MIN_SIZE((_src_array)[0]), \ 127 + sizeof((_src_array)[0]), _src_array) 128 + 22 129 #endif /* PVR_DRV_H */
+145
drivers/gpu/drm/imagination/pvr_fw.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-only OR MIT 2 + /* Copyright (c) 2023 Imagination Technologies Ltd. */ 3 + 4 + #include "pvr_device.h" 5 + #include "pvr_device_info.h" 6 + #include "pvr_fw.h" 7 + 8 + #include <drm/drm_drv.h> 9 + #include <linux/firmware.h> 10 + #include <linux/sizes.h> 11 + 12 + #define FW_MAX_SUPPORTED_MAJOR_VERSION 1 13 + 14 + /** 15 + * pvr_fw_validate() - Parse firmware header and check compatibility 16 + * @pvr_dev: Device pointer. 17 + * 18 + * Returns: 19 + * * 0 on success, or 20 + * * -EINVAL if firmware is incompatible. 21 + */ 22 + static int 23 + pvr_fw_validate(struct pvr_device *pvr_dev) 24 + { 25 + struct drm_device *drm_dev = from_pvr_device(pvr_dev); 26 + const struct firmware *firmware = pvr_dev->fw_dev.firmware; 27 + const struct pvr_fw_layout_entry *layout_entries; 28 + const struct pvr_fw_info_header *header; 29 + const u8 *fw = firmware->data; 30 + u32 fw_offset = firmware->size - SZ_4K; 31 + u32 layout_table_size; 32 + u32 entry; 33 + 34 + if (firmware->size < SZ_4K || (firmware->size % FW_BLOCK_SIZE)) 35 + return -EINVAL; 36 + 37 + header = (const struct pvr_fw_info_header *)&fw[fw_offset]; 38 + 39 + if (header->info_version != PVR_FW_INFO_VERSION) { 40 + drm_err(drm_dev, "Unsupported fw info version %u\n", 41 + header->info_version); 42 + return -EINVAL; 43 + } 44 + 45 + if (header->header_len != sizeof(struct pvr_fw_info_header) || 46 + header->layout_entry_size != sizeof(struct pvr_fw_layout_entry) || 47 + header->layout_entry_num > PVR_FW_INFO_MAX_NUM_ENTRIES) { 48 + drm_err(drm_dev, "FW info format mismatch\n"); 49 + return -EINVAL; 50 + } 51 + 52 + if (!(header->flags & PVR_FW_FLAGS_OPEN_SOURCE) || 53 + header->fw_version_major > FW_MAX_SUPPORTED_MAJOR_VERSION || 54 + header->fw_version_major == 0) { 55 + drm_err(drm_dev, "Unsupported FW version %u.%u (build: %u%s)\n", 56 + header->fw_version_major, header->fw_version_minor, 57 + header->fw_version_build, 58 + (header->flags & PVR_FW_FLAGS_OPEN_SOURCE) ? " OS" : ""); 59 + return -EINVAL; 60 + } 61 + 62 + if (pvr_gpu_id_to_packed_bvnc(&pvr_dev->gpu_id) != header->bvnc) { 63 + struct pvr_gpu_id fw_gpu_id; 64 + 65 + packed_bvnc_to_pvr_gpu_id(header->bvnc, &fw_gpu_id); 66 + drm_err(drm_dev, "FW built for incorrect GPU ID %i.%i.%i.%i (expected %i.%i.%i.%i)\n", 67 + fw_gpu_id.b, fw_gpu_id.v, fw_gpu_id.n, fw_gpu_id.c, 68 + pvr_dev->gpu_id.b, pvr_dev->gpu_id.v, pvr_dev->gpu_id.n, pvr_dev->gpu_id.c); 69 + return -EINVAL; 70 + } 71 + 72 + fw_offset += header->header_len; 73 + layout_table_size = 74 + header->layout_entry_size * header->layout_entry_num; 75 + if ((fw_offset + layout_table_size) > firmware->size) 76 + return -EINVAL; 77 + 78 + layout_entries = (const struct pvr_fw_layout_entry *)&fw[fw_offset]; 79 + for (entry = 0; entry < header->layout_entry_num; entry++) { 80 + u32 start_addr = layout_entries[entry].base_addr; 81 + u32 end_addr = start_addr + layout_entries[entry].alloc_size; 82 + 83 + if (start_addr >= end_addr) 84 + return -EINVAL; 85 + } 86 + 87 + fw_offset = (firmware->size - SZ_4K) - header->device_info_size; 88 + 89 + drm_info(drm_dev, "FW version v%u.%u (build %u OS)\n", header->fw_version_major, 90 + header->fw_version_minor, header->fw_version_build); 91 + 92 + pvr_dev->fw_version.major = header->fw_version_major; 93 + pvr_dev->fw_version.minor = header->fw_version_minor; 94 + 95 + pvr_dev->fw_dev.header = header; 96 + pvr_dev->fw_dev.layout_entries = layout_entries; 97 + 98 + return 0; 99 + } 100 + 101 + static int 102 + pvr_fw_get_device_info(struct pvr_device *pvr_dev) 103 + { 104 + const struct firmware *firmware = pvr_dev->fw_dev.firmware; 105 + struct pvr_fw_device_info_header *header; 106 + const u8 *fw = firmware->data; 107 + const u64 *dev_info; 108 + u32 fw_offset; 109 + 110 + fw_offset = (firmware->size - SZ_4K) - pvr_dev->fw_dev.header->device_info_size; 111 + 112 + header = (struct pvr_fw_device_info_header *)&fw[fw_offset]; 113 + dev_info = (u64 *)(header + 1); 114 + 115 + pvr_device_info_set_quirks(pvr_dev, dev_info, header->brn_mask_size); 116 + dev_info += header->brn_mask_size; 117 + 118 + pvr_device_info_set_enhancements(pvr_dev, dev_info, header->ern_mask_size); 119 + dev_info += header->ern_mask_size; 120 + 121 + return pvr_device_info_set_features(pvr_dev, dev_info, header->feature_mask_size, 122 + header->feature_param_size); 123 + } 124 + 125 + /** 126 + * pvr_fw_validate_init_device_info() - Validate firmware and initialise device information 127 + * @pvr_dev: Target PowerVR device. 128 + * 129 + * This function must be called before querying device information. 130 + * 131 + * Returns: 132 + * * 0 on success, or 133 + * * -%EINVAL if firmware validation fails. 134 + */ 135 + int 136 + pvr_fw_validate_init_device_info(struct pvr_device *pvr_dev) 137 + { 138 + int err; 139 + 140 + err = pvr_fw_validate(pvr_dev); 141 + if (err) 142 + return err; 143 + 144 + return pvr_fw_get_device_info(pvr_dev); 145 + }
+34
drivers/gpu/drm/imagination/pvr_fw.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only OR MIT */ 2 + /* Copyright (c) 2023 Imagination Technologies Ltd. */ 3 + 4 + #ifndef PVR_FW_H 5 + #define PVR_FW_H 6 + 7 + #include "pvr_fw_info.h" 8 + 9 + #include <linux/types.h> 10 + 11 + /* Forward declarations from "pvr_device.h". */ 12 + struct pvr_device; 13 + struct pvr_file; 14 + 15 + struct pvr_fw_device { 16 + /** @firmware: Handle to the firmware loaded into the device. */ 17 + const struct firmware *firmware; 18 + 19 + /** @header: Pointer to firmware header. */ 20 + const struct pvr_fw_info_header *header; 21 + 22 + /** @layout_entries: Pointer to firmware layout. */ 23 + const struct pvr_fw_layout_entry *layout_entries; 24 + 25 + /** 26 + * @processor_type: FW processor type for this device. Must be one of 27 + * %PVR_FW_PROCESSOR_TYPE_*. 28 + */ 29 + u16 processor_type; 30 + }; 31 + 32 + int pvr_fw_validate_init_device_info(struct pvr_device *pvr_dev); 33 + 34 + #endif /* PVR_FW_H */
+135
drivers/gpu/drm/imagination/pvr_fw_info.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0-only OR MIT */ 2 + /* Copyright (c) 2023 Imagination Technologies Ltd. */ 3 + 4 + #ifndef PVR_FW_INFO_H 5 + #define PVR_FW_INFO_H 6 + 7 + #include <linux/bits.h> 8 + #include <linux/sizes.h> 9 + #include <linux/types.h> 10 + 11 + /* 12 + * Firmware binary block unit in bytes. 13 + * Raw data stored in FW binary will be aligned to this size. 14 + */ 15 + #define FW_BLOCK_SIZE SZ_4K 16 + 17 + /* Maximum number of entries in firmware layout table. */ 18 + #define PVR_FW_INFO_MAX_NUM_ENTRIES 8 19 + 20 + enum pvr_fw_section_id { 21 + META_CODE = 0, 22 + META_PRIVATE_DATA, 23 + META_COREMEM_CODE, 24 + META_COREMEM_DATA, 25 + MIPS_CODE, 26 + MIPS_EXCEPTIONS_CODE, 27 + MIPS_BOOT_CODE, 28 + MIPS_PRIVATE_DATA, 29 + MIPS_BOOT_DATA, 30 + MIPS_STACK, 31 + RISCV_UNCACHED_CODE, 32 + RISCV_CACHED_CODE, 33 + RISCV_PRIVATE_DATA, 34 + RISCV_COREMEM_CODE, 35 + RISCV_COREMEM_DATA, 36 + }; 37 + 38 + enum pvr_fw_section_type { 39 + NONE = 0, 40 + FW_CODE, 41 + FW_DATA, 42 + FW_COREMEM_CODE, 43 + FW_COREMEM_DATA, 44 + }; 45 + 46 + /* 47 + * FW binary format with FW info attached: 48 + * 49 + * Contents Offset 50 + * +-----------------+ 51 + * | | 0 52 + * | | 53 + * | Original binary | 54 + * | file | 55 + * | (.ldr/.elf) | 56 + * | | 57 + * | | 58 + * +-----------------+ 59 + * | Device info | FILE_SIZE - 4K - device_info_size 60 + * +-----------------+ 61 + * | FW info header | FILE_SIZE - 4K 62 + * +-----------------+ 63 + * | | 64 + * | FW layout table | 65 + * | | 66 + * +-----------------+ 67 + * FILE_SIZE 68 + */ 69 + 70 + #define PVR_FW_INFO_VERSION 3 71 + 72 + #define PVR_FW_FLAGS_OPEN_SOURCE BIT(0) 73 + 74 + /** struct pvr_fw_info_header - Firmware header */ 75 + struct pvr_fw_info_header { 76 + /** @info_version: FW info header version. */ 77 + u32 info_version; 78 + /** @header_len: Header length. */ 79 + u32 header_len; 80 + /** @layout_entry_num: Number of entries in the layout table. */ 81 + u32 layout_entry_num; 82 + /** @layout_entry_size: Size of an entry in the layout table. */ 83 + u32 layout_entry_size; 84 + /** @bvnc: GPU ID supported by firmware. */ 85 + aligned_u64 bvnc; 86 + /** @fw_page_size: Page size of processor on which firmware executes. */ 87 + u32 fw_page_size; 88 + /** @flags: Compatibility flags. */ 89 + u32 flags; 90 + /** @fw_version_major: Firmware major version number. */ 91 + u16 fw_version_major; 92 + /** @fw_version_minor: Firmware minor version number. */ 93 + u16 fw_version_minor; 94 + /** @fw_version_build: Firmware build number. */ 95 + u32 fw_version_build; 96 + /** @device_info_size: Size of device info structure. */ 97 + u32 device_info_size; 98 + /** @padding: Padding. */ 99 + u32 padding; 100 + }; 101 + 102 + /** 103 + * struct pvr_fw_layout_entry - Entry in firmware layout table, describing a 104 + * section of the firmware image 105 + */ 106 + struct pvr_fw_layout_entry { 107 + /** @id: Section ID. */ 108 + enum pvr_fw_section_id id; 109 + /** @type: Section type. */ 110 + enum pvr_fw_section_type type; 111 + /** @base_addr: Base address of section in FW address space. */ 112 + u32 base_addr; 113 + /** @max_size: Maximum size of section, in bytes. */ 114 + u32 max_size; 115 + /** @alloc_size: Allocation size of section, in bytes. */ 116 + u32 alloc_size; 117 + /** @alloc_offset: Allocation offset of section. */ 118 + u32 alloc_offset; 119 + }; 120 + 121 + /** 122 + * struct pvr_fw_device_info_header - Device information header. 123 + */ 124 + struct pvr_fw_device_info_header { 125 + /* BRN Mask size (in u64s). */ 126 + u64 brn_mask_size; 127 + /* ERN Mask size (in u64s). */ 128 + u64 ern_mask_size; 129 + /* Feature Mask size (in u64s). */ 130 + u64 feature_mask_size; 131 + /* Feature Parameter size (in u64s). */ 132 + u64 feature_param_size; 133 + }; 134 + 135 + #endif /* PVR_FW_INFO_H */